f2fs: Don't overwrite all types of node to keep node chain
[linux-2.6-block.git] / fs / f2fs / segment.c
CommitLineData
0a8165d7 1/*
351df4b2
JK
2 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
690e4a3e 15#include <linux/prefetch.h>
6b4afdd7 16#include <linux/kthread.h>
74de593a 17#include <linux/swap.h>
60b99b48 18#include <linux/timer.h>
1d7be270 19#include <linux/freezer.h>
1eb1ef4a 20#include <linux/sched/signal.h>
351df4b2
JK
21
22#include "f2fs.h"
23#include "segment.h"
24#include "node.h"
5f656541 25#include "gc.h"
9e4ded3f 26#include "trace.h"
6ec178da 27#include <trace/events/f2fs.h>
351df4b2 28
9a7f143a
CL
29#define __reverse_ffz(x) __reverse_ffs(~(x))
30
7fd9e544 31static struct kmem_cache *discard_entry_slab;
b01a9201 32static struct kmem_cache *discard_cmd_slab;
184a5cd2 33static struct kmem_cache *sit_entry_set_slab;
88b88a66 34static struct kmem_cache *inmem_entry_slab;
7fd9e544 35
f96999c3
JK
36static unsigned long __reverse_ulong(unsigned char *str)
37{
38 unsigned long tmp = 0;
39 int shift = 24, idx = 0;
40
41#if BITS_PER_LONG == 64
42 shift = 56;
43#endif
44 while (shift >= 0) {
45 tmp |= (unsigned long)str[idx++] << shift;
46 shift -= BITS_PER_BYTE;
47 }
48 return tmp;
49}
50
9a7f143a
CL
51/*
52 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
53 * MSB and LSB are reversed in a byte by f2fs_set_bit.
54 */
55static inline unsigned long __reverse_ffs(unsigned long word)
56{
57 int num = 0;
58
59#if BITS_PER_LONG == 64
f96999c3 60 if ((word & 0xffffffff00000000UL) == 0)
9a7f143a 61 num += 32;
f96999c3 62 else
9a7f143a 63 word >>= 32;
9a7f143a 64#endif
f96999c3 65 if ((word & 0xffff0000) == 0)
9a7f143a 66 num += 16;
f96999c3 67 else
9a7f143a 68 word >>= 16;
f96999c3
JK
69
70 if ((word & 0xff00) == 0)
9a7f143a 71 num += 8;
f96999c3 72 else
9a7f143a 73 word >>= 8;
f96999c3 74
9a7f143a
CL
75 if ((word & 0xf0) == 0)
76 num += 4;
77 else
78 word >>= 4;
f96999c3 79
9a7f143a
CL
80 if ((word & 0xc) == 0)
81 num += 2;
82 else
83 word >>= 2;
f96999c3 84
9a7f143a
CL
85 if ((word & 0x2) == 0)
86 num += 1;
87 return num;
88}
89
90/*
e1c42045 91 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
9a7f143a 92 * f2fs_set_bit makes MSB and LSB reversed in a byte.
692223d1 93 * @size must be integral times of unsigned long.
9a7f143a 94 * Example:
f96999c3
JK
95 * MSB <--> LSB
96 * f2fs_set_bit(0, bitmap) => 1000 0000
97 * f2fs_set_bit(7, bitmap) => 0000 0001
9a7f143a
CL
98 */
99static unsigned long __find_rev_next_bit(const unsigned long *addr,
100 unsigned long size, unsigned long offset)
101{
102 const unsigned long *p = addr + BIT_WORD(offset);
692223d1 103 unsigned long result = size;
9a7f143a 104 unsigned long tmp;
9a7f143a
CL
105
106 if (offset >= size)
107 return size;
108
692223d1 109 size -= (offset & ~(BITS_PER_LONG - 1));
9a7f143a 110 offset %= BITS_PER_LONG;
f96999c3 111
692223d1
FL
112 while (1) {
113 if (*p == 0)
114 goto pass;
9a7f143a 115
f96999c3 116 tmp = __reverse_ulong((unsigned char *)p);
692223d1
FL
117
118 tmp &= ~0UL >> offset;
119 if (size < BITS_PER_LONG)
120 tmp &= (~0UL << (BITS_PER_LONG - size));
9a7f143a 121 if (tmp)
692223d1
FL
122 goto found;
123pass:
124 if (size <= BITS_PER_LONG)
125 break;
9a7f143a 126 size -= BITS_PER_LONG;
692223d1 127 offset = 0;
f96999c3 128 p++;
9a7f143a 129 }
692223d1
FL
130 return result;
131found:
132 return result - size + __reverse_ffs(tmp);
9a7f143a
CL
133}
134
135static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
136 unsigned long size, unsigned long offset)
137{
138 const unsigned long *p = addr + BIT_WORD(offset);
80609448 139 unsigned long result = size;
9a7f143a 140 unsigned long tmp;
9a7f143a
CL
141
142 if (offset >= size)
143 return size;
144
80609448 145 size -= (offset & ~(BITS_PER_LONG - 1));
9a7f143a 146 offset %= BITS_PER_LONG;
80609448
JK
147
148 while (1) {
149 if (*p == ~0UL)
150 goto pass;
151
f96999c3 152 tmp = __reverse_ulong((unsigned char *)p);
80609448
JK
153
154 if (offset)
155 tmp |= ~0UL << (BITS_PER_LONG - offset);
156 if (size < BITS_PER_LONG)
157 tmp |= ~0UL >> size;
f96999c3 158 if (tmp != ~0UL)
80609448
JK
159 goto found;
160pass:
161 if (size <= BITS_PER_LONG)
162 break;
9a7f143a 163 size -= BITS_PER_LONG;
80609448 164 offset = 0;
f96999c3 165 p++;
9a7f143a 166 }
80609448
JK
167 return result;
168found:
169 return result - size + __reverse_ffz(tmp);
9a7f143a
CL
170}
171
b3a97a2a
JK
172bool need_SSR(struct f2fs_sb_info *sbi)
173{
174 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
175 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
176 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
177
178 if (test_opt(sbi, LFS))
179 return false;
180 if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
181 return true;
182
183 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
a2a12b67 184 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
b3a97a2a
JK
185}
186
88b88a66
JK
187void register_inmem_page(struct inode *inode, struct page *page)
188{
57864ae5 189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
88b88a66
JK
190 struct f2fs_inode_info *fi = F2FS_I(inode);
191 struct inmem_pages *new;
9be32d72 192
9e4ded3f 193 f2fs_trace_pid(page);
0722b101 194
decd36b6
CY
195 set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
196 SetPagePrivate(page);
197
88b88a66
JK
198 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
199
200 /* add atomic page indices to the list */
201 new->page = page;
202 INIT_LIST_HEAD(&new->list);
decd36b6 203
88b88a66
JK
204 /* increase reference count with clean state */
205 mutex_lock(&fi->inmem_lock);
206 get_page(page);
207 list_add_tail(&new->list, &fi->inmem_pages);
57864ae5
JK
208 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
209 if (list_empty(&fi->inmem_ilist))
210 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
211 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
8dcf2ff7 212 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
88b88a66 213 mutex_unlock(&fi->inmem_lock);
8ce67cb0
JK
214
215 trace_f2fs_register_inmem_page(page, INMEM);
88b88a66
JK
216}
217
28bc106b
CY
218static int __revoke_inmem_pages(struct inode *inode,
219 struct list_head *head, bool drop, bool recover)
29b96b54 220{
28bc106b 221 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29b96b54 222 struct inmem_pages *cur, *tmp;
28bc106b 223 int err = 0;
29b96b54
CY
224
225 list_for_each_entry_safe(cur, tmp, head, list) {
28bc106b
CY
226 struct page *page = cur->page;
227
228 if (drop)
229 trace_f2fs_commit_inmem_page(page, INMEM_DROP);
230
231 lock_page(page);
29b96b54 232
28bc106b
CY
233 if (recover) {
234 struct dnode_of_data dn;
235 struct node_info ni;
236
237 trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
7f2b4e8e 238retry:
28bc106b 239 set_new_dnode(&dn, inode, NULL, NULL, 0);
7f2b4e8e
CY
240 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
241 if (err) {
242 if (err == -ENOMEM) {
243 congestion_wait(BLK_RW_ASYNC, HZ/50);
244 cond_resched();
245 goto retry;
246 }
28bc106b
CY
247 err = -EAGAIN;
248 goto next;
249 }
250 get_node_info(sbi, dn.nid, &ni);
f1d2564a
DJ
251 if (cur->old_addr == NEW_ADDR) {
252 invalidate_blocks(sbi, dn.data_blkaddr);
253 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
254 } else
255 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
28bc106b
CY
256 cur->old_addr, ni.version, true, true);
257 f2fs_put_dnode(&dn);
258 }
259next:
63c52d78
JK
260 /* we don't need to invalidate this in the sccessful status */
261 if (drop || recover)
262 ClearPageUptodate(page);
28bc106b 263 set_page_private(page, 0);
c81ced05 264 ClearPagePrivate(page);
28bc106b 265 f2fs_put_page(page, 1);
29b96b54
CY
266
267 list_del(&cur->list);
268 kmem_cache_free(inmem_entry_slab, cur);
269 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
270 }
28bc106b 271 return err;
29b96b54
CY
272}
273
57864ae5
JK
274void drop_inmem_pages_all(struct f2fs_sb_info *sbi)
275{
276 struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
277 struct inode *inode;
278 struct f2fs_inode_info *fi;
279next:
280 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
281 if (list_empty(head)) {
282 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
283 return;
284 }
285 fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
286 inode = igrab(&fi->vfs_inode);
287 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
288
289 if (inode) {
290 drop_inmem_pages(inode);
291 iput(inode);
292 }
293 congestion_wait(BLK_RW_ASYNC, HZ/50);
294 cond_resched();
295 goto next;
296}
297
29b96b54
CY
298void drop_inmem_pages(struct inode *inode)
299{
57864ae5 300 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29b96b54
CY
301 struct f2fs_inode_info *fi = F2FS_I(inode);
302
303 mutex_lock(&fi->inmem_lock);
28bc106b 304 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
57864ae5
JK
305 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
306 if (!list_empty(&fi->inmem_ilist))
307 list_del_init(&fi->inmem_ilist);
308 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
29b96b54 309 mutex_unlock(&fi->inmem_lock);
5fe45743
CY
310
311 clear_inode_flag(inode, FI_ATOMIC_FILE);
84a23fbe 312 clear_inode_flag(inode, FI_HOT_DATA);
5fe45743 313 stat_dec_atomic_write(inode);
29b96b54
CY
314}
315
8c242db9
JK
316void drop_inmem_page(struct inode *inode, struct page *page)
317{
318 struct f2fs_inode_info *fi = F2FS_I(inode);
319 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
320 struct list_head *head = &fi->inmem_pages;
321 struct inmem_pages *cur = NULL;
322
323 f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
324
325 mutex_lock(&fi->inmem_lock);
326 list_for_each_entry(cur, head, list) {
327 if (cur->page == page)
328 break;
329 }
330
331 f2fs_bug_on(sbi, !cur || cur->page != page);
332 list_del(&cur->list);
333 mutex_unlock(&fi->inmem_lock);
334
335 dec_page_count(sbi, F2FS_INMEM_PAGES);
336 kmem_cache_free(inmem_entry_slab, cur);
337
338 ClearPageUptodate(page);
339 set_page_private(page, 0);
340 ClearPagePrivate(page);
341 f2fs_put_page(page, 0);
342
343 trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
344}
345
28bc106b
CY
346static int __commit_inmem_pages(struct inode *inode,
347 struct list_head *revoke_list)
88b88a66
JK
348{
349 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
350 struct f2fs_inode_info *fi = F2FS_I(inode);
351 struct inmem_pages *cur, *tmp;
88b88a66 352 struct f2fs_io_info fio = {
05ca3632 353 .sbi = sbi,
39d787be 354 .ino = inode->i_ino,
88b88a66 355 .type = DATA,
04d328de 356 .op = REQ_OP_WRITE,
70fd7614 357 .op_flags = REQ_SYNC | REQ_PRIO,
b0af6d49 358 .io_type = FS_DATA_IO,
88b88a66 359 };
942fd319 360 pgoff_t last_idx = ULONG_MAX;
edb27dee 361 int err = 0;
88b88a66 362
88b88a66 363 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
28bc106b
CY
364 struct page *page = cur->page;
365
366 lock_page(page);
367 if (page->mapping == inode->i_mapping) {
368 trace_f2fs_commit_inmem_page(page, INMEM);
369
370 set_page_dirty(page);
371 f2fs_wait_on_page_writeback(page, DATA, true);
933439c8 372 if (clear_page_dirty_for_io(page)) {
29b96b54 373 inode_dec_dirty_pages(inode);
933439c8
CY
374 remove_dirty_inode(inode);
375 }
640cc189 376retry:
28bc106b 377 fio.page = page;
e959c8f5 378 fio.old_blkaddr = NULL_ADDR;
4d978078 379 fio.encrypted_page = NULL;
cc15620b 380 fio.need_lock = LOCK_DONE;
29b96b54
CY
381 err = do_write_data_page(&fio);
382 if (err) {
640cc189
JK
383 if (err == -ENOMEM) {
384 congestion_wait(BLK_RW_ASYNC, HZ/50);
385 cond_resched();
386 goto retry;
387 }
28bc106b 388 unlock_page(page);
29b96b54 389 break;
70c640b1 390 }
28bc106b
CY
391 /* record old blkaddr for revoking */
392 cur->old_addr = fio.old_blkaddr;
942fd319 393 last_idx = page->index;
28bc106b
CY
394 }
395 unlock_page(page);
396 list_move_tail(&cur->list, revoke_list);
88b88a66 397 }
29b96b54 398
942fd319 399 if (last_idx != ULONG_MAX)
b9109b0e 400 f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
28bc106b
CY
401
402 if (!err)
403 __revoke_inmem_pages(inode, revoke_list, false, false);
404
29b96b54
CY
405 return err;
406}
407
408int commit_inmem_pages(struct inode *inode)
409{
410 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
411 struct f2fs_inode_info *fi = F2FS_I(inode);
28bc106b
CY
412 struct list_head revoke_list;
413 int err;
29b96b54 414
28bc106b 415 INIT_LIST_HEAD(&revoke_list);
29b96b54
CY
416 f2fs_balance_fs(sbi, true);
417 f2fs_lock_op(sbi);
418
5fe45743
CY
419 set_inode_flag(inode, FI_ATOMIC_COMMIT);
420
29b96b54 421 mutex_lock(&fi->inmem_lock);
28bc106b
CY
422 err = __commit_inmem_pages(inode, &revoke_list);
423 if (err) {
424 int ret;
425 /*
426 * try to revoke all committed pages, but still we could fail
427 * due to no memory or other reason, if that happened, EAGAIN
428 * will be returned, which means in such case, transaction is
429 * already not integrity, caller should use journal to do the
430 * recovery or rewrite & commit last transaction. For other
431 * error number, revoking was done by filesystem itself.
432 */
433 ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
434 if (ret)
435 err = ret;
436
437 /* drop all uncommitted pages */
438 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
439 }
57864ae5
JK
440 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
441 if (!list_empty(&fi->inmem_ilist))
442 list_del_init(&fi->inmem_ilist);
443 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
88b88a66
JK
444 mutex_unlock(&fi->inmem_lock);
445
5fe45743
CY
446 clear_inode_flag(inode, FI_ATOMIC_COMMIT);
447
29b96b54 448 f2fs_unlock_op(sbi);
edb27dee 449 return err;
88b88a66
JK
450}
451
0a8165d7 452/*
351df4b2
JK
453 * This function balances dirty node and dentry pages.
454 * In addition, it controls garbage collection.
455 */
2c4db1a6 456void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
351df4b2 457{
0f348028 458#ifdef CONFIG_F2FS_FAULT_INJECTION
55523519
CY
459 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
460 f2fs_show_injection_info(FAULT_CHECKPOINT);
0f348028 461 f2fs_stop_checkpoint(sbi, false);
55523519 462 }
0f348028
CY
463#endif
464
e589c2c4 465 /* balance_fs_bg is able to be pending */
a7881893 466 if (need && excess_cached_nats(sbi))
e589c2c4
JK
467 f2fs_balance_fs_bg(sbi);
468
351df4b2 469 /*
029cd28c
JK
470 * We should do GC or end up with checkpoint, if there are so many dirty
471 * dir/node pages without enough free segments.
351df4b2 472 */
7f3037a5 473 if (has_not_enough_free_secs(sbi, 0, 0)) {
351df4b2 474 mutex_lock(&sbi->gc_mutex);
e066b83c 475 f2fs_gc(sbi, false, false, NULL_SEGNO);
351df4b2
JK
476 }
477}
478
4660f9c0
JK
479void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
480{
1dcc336b 481 /* try to shrink extent cache when there is no enough memory */
554df79e
JK
482 if (!available_free_memory(sbi, EXTENT_CACHE))
483 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
1dcc336b 484
1b38dc8e
JK
485 /* check the # of cached NAT entries */
486 if (!available_free_memory(sbi, NAT_ENTRIES))
487 try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
488
31696580 489 if (!available_free_memory(sbi, FREE_NIDS))
ad4edb83
JK
490 try_to_free_nids(sbi, MAX_FREE_NIDS);
491 else
22ad0b6a 492 build_free_nids(sbi, false, false);
31696580 493
1c0f4bf5 494 if (!is_idle(sbi) && !excess_dirty_nats(sbi))
f455c8a5 495 return;
31696580 496
1b38dc8e
JK
497 /* checkpoint is the only way to shrink partial cached entries */
498 if (!available_free_memory(sbi, NAT_ENTRIES) ||
60b99b48 499 !available_free_memory(sbi, INO_ENTRIES) ||
7d768d2c
CY
500 excess_prefree_segs(sbi) ||
501 excess_dirty_nats(sbi) ||
f455c8a5 502 f2fs_time_over(sbi, CP_TIME)) {
e9f5b8b8
CY
503 if (test_opt(sbi, DATA_FLUSH)) {
504 struct blk_plug plug;
505
506 blk_start_plug(&plug);
36b35a0d 507 sync_dirty_inodes(sbi, FILE_INODE);
e9f5b8b8
CY
508 blk_finish_plug(&plug);
509 }
4660f9c0 510 f2fs_sync_fs(sbi->sb, true);
42190d2a 511 stat_inc_bg_cp_count(sbi->stat_info);
36b35a0d 512 }
4660f9c0
JK
513}
514
20fda56b
KM
515static int __submit_flush_wait(struct f2fs_sb_info *sbi,
516 struct block_device *bdev)
3c62be17 517{
d62fe971 518 struct bio *bio = f2fs_bio_alloc(sbi, 0, true);
3c62be17
JK
519 int ret;
520
3adc5fcb 521 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
74d46992 522 bio_set_dev(bio, bdev);
3c62be17
JK
523 ret = submit_bio_wait(bio);
524 bio_put(bio);
20fda56b
KM
525
526 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
527 test_opt(sbi, FLUSH_MERGE), ret);
3c62be17
JK
528 return ret;
529}
530
39d787be 531static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
3c62be17 532{
39d787be 533 int ret = 0;
3c62be17
JK
534 int i;
535
39d787be
CY
536 if (!sbi->s_ndevs)
537 return __submit_flush_wait(sbi, sbi->sb->s_bdev);
20fda56b 538
39d787be
CY
539 for (i = 0; i < sbi->s_ndevs; i++) {
540 if (!is_dirty_device(sbi, ino, i, FLUSH_INO))
541 continue;
20fda56b
KM
542 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
543 if (ret)
544 break;
3c62be17
JK
545 }
546 return ret;
547}
548
2163d198 549static int issue_flush_thread(void *data)
6b4afdd7
JK
550{
551 struct f2fs_sb_info *sbi = data;
b01a9201 552 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
a688b9d9 553 wait_queue_head_t *q = &fcc->flush_wait_queue;
6b4afdd7
JK
554repeat:
555 if (kthread_should_stop())
556 return 0;
557
dc6febb6
CY
558 sb_start_intwrite(sbi->sb);
559
721bd4d5 560 if (!llist_empty(&fcc->issue_list)) {
6b4afdd7
JK
561 struct flush_cmd *cmd, *next;
562 int ret;
563
721bd4d5
GZ
564 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
565 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
566
39d787be
CY
567 cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
568
569 ret = submit_flush_wait(sbi, cmd->ino);
8b8dd65f
CY
570 atomic_inc(&fcc->issued_flush);
571
721bd4d5
GZ
572 llist_for_each_entry_safe(cmd, next,
573 fcc->dispatch_list, llnode) {
6b4afdd7 574 cmd->ret = ret;
6b4afdd7
JK
575 complete(&cmd->wait);
576 }
a688b9d9 577 fcc->dispatch_list = NULL;
6b4afdd7
JK
578 }
579
dc6febb6
CY
580 sb_end_intwrite(sbi->sb);
581
a688b9d9 582 wait_event_interruptible(*q,
721bd4d5 583 kthread_should_stop() || !llist_empty(&fcc->issue_list));
6b4afdd7
JK
584 goto repeat;
585}
586
39d787be 587int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
6b4afdd7 588{
b01a9201 589 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
adf8d90b 590 struct flush_cmd cmd;
8b8dd65f 591 int ret;
6b4afdd7 592
0f7b2abd
JK
593 if (test_opt(sbi, NOBARRIER))
594 return 0;
595
8b8dd65f 596 if (!test_opt(sbi, FLUSH_MERGE)) {
39d787be 597 ret = submit_flush_wait(sbi, ino);
8b8dd65f
CY
598 atomic_inc(&fcc->issued_flush);
599 return ret;
600 }
740432f8 601
39d787be
CY
602 if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) {
603 ret = submit_flush_wait(sbi, ino);
8b8dd65f
CY
604 atomic_dec(&fcc->issing_flush);
605
606 atomic_inc(&fcc->issued_flush);
740432f8
JK
607 return ret;
608 }
6b4afdd7 609
39d787be 610 cmd.ino = ino;
adf8d90b 611 init_completion(&cmd.wait);
6b4afdd7 612
721bd4d5 613 llist_add(&cmd.llnode, &fcc->issue_list);
6b4afdd7 614
6f890df0
CY
615 /* update issue_list before we wake up issue_flush thread */
616 smp_mb();
617
618 if (waitqueue_active(&fcc->flush_wait_queue))
a688b9d9 619 wake_up(&fcc->flush_wait_queue);
6b4afdd7 620
5eba8c5d
JK
621 if (fcc->f2fs_issue_flush) {
622 wait_for_completion(&cmd.wait);
8b8dd65f 623 atomic_dec(&fcc->issing_flush);
5eba8c5d 624 } else {
d3238691
CY
625 struct llist_node *list;
626
627 list = llist_del_all(&fcc->issue_list);
628 if (!list) {
629 wait_for_completion(&cmd.wait);
630 atomic_dec(&fcc->issing_flush);
631 } else {
632 struct flush_cmd *tmp, *next;
633
39d787be 634 ret = submit_flush_wait(sbi, ino);
d3238691
CY
635
636 llist_for_each_entry_safe(tmp, next, list, llnode) {
637 if (tmp == &cmd) {
638 cmd.ret = ret;
639 atomic_dec(&fcc->issing_flush);
640 continue;
641 }
642 tmp->ret = ret;
643 complete(&tmp->wait);
644 }
645 }
5eba8c5d 646 }
adf8d90b
CY
647
648 return cmd.ret;
6b4afdd7
JK
649}
650
2163d198
GZ
651int create_flush_cmd_control(struct f2fs_sb_info *sbi)
652{
653 dev_t dev = sbi->sb->s_bdev->bd_dev;
654 struct flush_cmd_control *fcc;
655 int err = 0;
656
b01a9201
JK
657 if (SM_I(sbi)->fcc_info) {
658 fcc = SM_I(sbi)->fcc_info;
d871cd04
YS
659 if (fcc->f2fs_issue_flush)
660 return err;
5eba8c5d
JK
661 goto init_thread;
662 }
663
acbf054d 664 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
2163d198
GZ
665 if (!fcc)
666 return -ENOMEM;
8b8dd65f
CY
667 atomic_set(&fcc->issued_flush, 0);
668 atomic_set(&fcc->issing_flush, 0);
2163d198 669 init_waitqueue_head(&fcc->flush_wait_queue);
721bd4d5 670 init_llist_head(&fcc->issue_list);
b01a9201 671 SM_I(sbi)->fcc_info = fcc;
d4fdf8ba
YH
672 if (!test_opt(sbi, FLUSH_MERGE))
673 return err;
674
5eba8c5d 675init_thread:
2163d198
GZ
676 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
677 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
678 if (IS_ERR(fcc->f2fs_issue_flush)) {
679 err = PTR_ERR(fcc->f2fs_issue_flush);
680 kfree(fcc);
b01a9201 681 SM_I(sbi)->fcc_info = NULL;
2163d198
GZ
682 return err;
683 }
2163d198
GZ
684
685 return err;
686}
687
5eba8c5d 688void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
2163d198 689{
b01a9201 690 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
2163d198 691
5eba8c5d
JK
692 if (fcc && fcc->f2fs_issue_flush) {
693 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
694
695 fcc->f2fs_issue_flush = NULL;
696 kthread_stop(flush_thread);
697 }
698 if (free) {
699 kfree(fcc);
b01a9201 700 SM_I(sbi)->fcc_info = NULL;
5eba8c5d 701 }
2163d198
GZ
702}
703
1228b482
CY
704int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
705{
706 int ret = 0, i;
707
708 if (!sbi->s_ndevs)
709 return 0;
710
711 for (i = 1; i < sbi->s_ndevs; i++) {
712 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
713 continue;
714 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
715 if (ret)
716 break;
717
718 spin_lock(&sbi->dev_lock);
719 f2fs_clear_bit(i, (char *)&sbi->dirty_device);
720 spin_unlock(&sbi->dev_lock);
721 }
722
723 return ret;
724}
725
351df4b2
JK
726static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
727 enum dirty_type dirty_type)
728{
729 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
730
731 /* need not be added */
732 if (IS_CURSEG(sbi, segno))
733 return;
734
735 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
736 dirty_i->nr_dirty[dirty_type]++;
737
738 if (dirty_type == DIRTY) {
739 struct seg_entry *sentry = get_seg_entry(sbi, segno);
4625d6aa 740 enum dirty_type t = sentry->type;
b2f2c390 741
ec325b52
JK
742 if (unlikely(t >= DIRTY)) {
743 f2fs_bug_on(sbi, 1);
744 return;
745 }
4625d6aa
CL
746 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
747 dirty_i->nr_dirty[t]++;
351df4b2
JK
748 }
749}
750
751static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
752 enum dirty_type dirty_type)
753{
754 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
755
756 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
757 dirty_i->nr_dirty[dirty_type]--;
758
759 if (dirty_type == DIRTY) {
4625d6aa
CL
760 struct seg_entry *sentry = get_seg_entry(sbi, segno);
761 enum dirty_type t = sentry->type;
762
763 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
764 dirty_i->nr_dirty[t]--;
b2f2c390 765
302bd348 766 if (get_valid_blocks(sbi, segno, true) == 0)
4ddb1a4d 767 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
5ec4e49f 768 dirty_i->victim_secmap);
351df4b2
JK
769 }
770}
771
0a8165d7 772/*
351df4b2
JK
773 * Should not occur error such as -ENOMEM.
774 * Adding dirty entry into seglist is not critical operation.
775 * If a given segment is one of current working segments, it won't be added.
776 */
8d8451af 777static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
351df4b2
JK
778{
779 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
780 unsigned short valid_blocks;
781
782 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
783 return;
784
785 mutex_lock(&dirty_i->seglist_lock);
786
302bd348 787 valid_blocks = get_valid_blocks(sbi, segno, false);
351df4b2
JK
788
789 if (valid_blocks == 0) {
790 __locate_dirty_segment(sbi, segno, PRE);
791 __remove_dirty_segment(sbi, segno, DIRTY);
792 } else if (valid_blocks < sbi->blocks_per_seg) {
793 __locate_dirty_segment(sbi, segno, DIRTY);
794 } else {
795 /* Recovery routine with SSR needs this */
796 __remove_dirty_segment(sbi, segno, DIRTY);
797 }
798
799 mutex_unlock(&dirty_i->seglist_lock);
351df4b2
JK
800}
801
004b6862 802static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
c81abe34
JK
803 struct block_device *bdev, block_t lstart,
804 block_t start, block_t len)
275b66b0 805{
0b54fb84 806 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
ba48a33e 807 struct list_head *pend_list;
b01a9201 808 struct discard_cmd *dc;
275b66b0 809
ba48a33e
CY
810 f2fs_bug_on(sbi, !len);
811
812 pend_list = &dcc->pend_list[plist_idx(len)];
813
b01a9201
JK
814 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
815 INIT_LIST_HEAD(&dc->list);
c81abe34 816 dc->bdev = bdev;
b01a9201 817 dc->lstart = lstart;
c81abe34 818 dc->start = start;
b01a9201 819 dc->len = len;
ec9895ad 820 dc->ref = 0;
15469963 821 dc->state = D_PREP;
c81abe34 822 dc->error = 0;
b01a9201 823 init_completion(&dc->wait);
22d375dd 824 list_add_tail(&dc->list, pend_list);
5f32366a 825 atomic_inc(&dcc->discard_cmd_cnt);
d84d1cbd 826 dcc->undiscard_blks += len;
004b6862
CY
827
828 return dc;
829}
830
831static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
832 struct block_device *bdev, block_t lstart,
833 block_t start, block_t len,
834 struct rb_node *parent, struct rb_node **p)
835{
836 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
837 struct discard_cmd *dc;
838
839 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
840
841 rb_link_node(&dc->rb_node, parent, p);
842 rb_insert_color(&dc->rb_node, &dcc->root);
843
844 return dc;
15469963
JK
845}
846
004b6862
CY
847static void __detach_discard_cmd(struct discard_cmd_control *dcc,
848 struct discard_cmd *dc)
15469963 849{
dcc9165d 850 if (dc->state == D_DONE)
004b6862
CY
851 atomic_dec(&dcc->issing_discard);
852
853 list_del(&dc->list);
854 rb_erase(&dc->rb_node, &dcc->root);
d84d1cbd 855 dcc->undiscard_blks -= dc->len;
004b6862
CY
856
857 kmem_cache_free(discard_cmd_slab, dc);
858
859 atomic_dec(&dcc->discard_cmd_cnt);
860}
861
862static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
863 struct discard_cmd *dc)
864{
865 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
dcc9165d 866
2ec6f2ef
CY
867 trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
868
d9703d90
CY
869 f2fs_bug_on(sbi, dc->ref);
870
c81abe34
JK
871 if (dc->error == -EOPNOTSUPP)
872 dc->error = 0;
15469963 873
c81abe34 874 if (dc->error)
15469963 875 f2fs_msg(sbi->sb, KERN_INFO,
04dfc230
CY
876 "Issue discard(%u, %u, %u) failed, ret: %d",
877 dc->lstart, dc->start, dc->len, dc->error);
004b6862 878 __detach_discard_cmd(dcc, dc);
275b66b0
CY
879}
880
c81abe34
JK
881static void f2fs_submit_discard_endio(struct bio *bio)
882{
883 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
884
4e4cbee9 885 dc->error = blk_status_to_errno(bio->bi_status);
c81abe34 886 dc->state = D_DONE;
e31b9821 887 complete_all(&dc->wait);
c81abe34
JK
888 bio_put(bio);
889}
890
94b1e10e 891static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
6915ea9d
CY
892 block_t start, block_t end)
893{
894#ifdef CONFIG_F2FS_CHECK_FS
895 struct seg_entry *sentry;
896 unsigned int segno;
897 block_t blk = start;
898 unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
899 unsigned long *map;
900
901 while (blk < end) {
902 segno = GET_SEGNO(sbi, blk);
903 sentry = get_seg_entry(sbi, segno);
904 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
905
008396e1
YS
906 if (end < START_BLOCK(sbi, segno + 1))
907 size = GET_BLKOFF_FROM_SEG0(sbi, end);
908 else
909 size = max_blocks;
6915ea9d
CY
910 map = (unsigned long *)(sentry->cur_valid_map);
911 offset = __find_rev_next_bit(map, size, offset);
912 f2fs_bug_on(sbi, offset != size);
008396e1 913 blk = START_BLOCK(sbi, segno + 1);
6915ea9d
CY
914 }
915#endif
916}
917
c81abe34
JK
918/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
919static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
78997b56
CY
920 struct discard_policy *dpolicy,
921 struct discard_cmd *dc)
c81abe34
JK
922{
923 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
78997b56
CY
924 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
925 &(dcc->fstrim_list) : &(dcc->wait_list);
c81abe34 926 struct bio *bio = NULL;
78997b56 927 int flag = dpolicy->sync ? REQ_SYNC : 0;
c81abe34
JK
928
929 if (dc->state != D_PREP)
930 return;
931
0243a5f9
CY
932 trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
933
c81abe34
JK
934 dc->error = __blkdev_issue_discard(dc->bdev,
935 SECTOR_FROM_BLOCK(dc->start),
936 SECTOR_FROM_BLOCK(dc->len),
937 GFP_NOFS, 0, &bio);
938 if (!dc->error) {
939 /* should keep before submission to avoid D_DONE right away */
940 dc->state = D_SUBMIT;
8b8dd65f
CY
941 atomic_inc(&dcc->issued_discard);
942 atomic_inc(&dcc->issing_discard);
c81abe34
JK
943 if (bio) {
944 bio->bi_private = dc;
945 bio->bi_end_io = f2fs_submit_discard_endio;
ecc9aa00 946 bio->bi_opf |= flag;
c81abe34 947 submit_bio(bio);
8412663d 948 list_move_tail(&dc->list, wait_list);
6915ea9d 949 __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
b0af6d49
CY
950
951 f2fs_update_iostat(sbi, FS_DISCARD, 1);
c81abe34
JK
952 }
953 } else {
954 __remove_discard_cmd(sbi, dc);
955 }
956}
957
004b6862
CY
958static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
959 struct block_device *bdev, block_t lstart,
960 block_t start, block_t len,
961 struct rb_node **insert_p,
962 struct rb_node *insert_parent)
c81abe34 963{
004b6862 964 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
dca6951f 965 struct rb_node **p;
004b6862
CY
966 struct rb_node *parent = NULL;
967 struct discard_cmd *dc = NULL;
968
969 if (insert_p && insert_parent) {
970 parent = insert_parent;
971 p = insert_p;
972 goto do_insert;
973 }
c81abe34 974
004b6862
CY
975 p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
976do_insert:
977 dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
978 if (!dc)
979 return NULL;
c81abe34 980
004b6862 981 return dc;
c81abe34
JK
982}
983
ba48a33e
CY
984static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
985 struct discard_cmd *dc)
986{
987 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
988}
989
3d6a650f
YH
990static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
991 struct discard_cmd *dc, block_t blkaddr)
992{
ba48a33e 993 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
004b6862
CY
994 struct discard_info di = dc->di;
995 bool modified = false;
3d6a650f 996
004b6862 997 if (dc->state == D_DONE || dc->len == 1) {
3d6a650f
YH
998 __remove_discard_cmd(sbi, dc);
999 return;
1000 }
1001
d84d1cbd
CY
1002 dcc->undiscard_blks -= di.len;
1003
004b6862 1004 if (blkaddr > di.lstart) {
3d6a650f 1005 dc->len = blkaddr - dc->lstart;
d84d1cbd 1006 dcc->undiscard_blks += dc->len;
ba48a33e 1007 __relocate_discard_cmd(dcc, dc);
004b6862
CY
1008 modified = true;
1009 }
1010
1011 if (blkaddr < di.lstart + di.len - 1) {
1012 if (modified) {
1013 __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1014 di.start + blkaddr + 1 - di.lstart,
1015 di.lstart + di.len - 1 - blkaddr,
1016 NULL, NULL);
1017 } else {
1018 dc->lstart++;
1019 dc->len--;
1020 dc->start++;
d84d1cbd 1021 dcc->undiscard_blks += dc->len;
ba48a33e 1022 __relocate_discard_cmd(dcc, dc);
004b6862 1023 }
3d6a650f
YH
1024 }
1025}
1026
004b6862
CY
1027static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1028 struct block_device *bdev, block_t lstart,
1029 block_t start, block_t len)
275b66b0 1030{
0b54fb84 1031 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
004b6862
CY
1032 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1033 struct discard_cmd *dc;
1034 struct discard_info di = {0};
1035 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1036 block_t end = lstart + len;
275b66b0 1037
15469963 1038 mutex_lock(&dcc->cmd_lock);
40465257 1039
004b6862
CY
1040 dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
1041 NULL, lstart,
1042 (struct rb_entry **)&prev_dc,
1043 (struct rb_entry **)&next_dc,
1044 &insert_p, &insert_parent, true);
1045 if (dc)
1046 prev_dc = dc;
1047
1048 if (!prev_dc) {
1049 di.lstart = lstart;
1050 di.len = next_dc ? next_dc->lstart - lstart : len;
1051 di.len = min(di.len, len);
1052 di.start = start;
22d375dd 1053 }
15469963 1054
004b6862
CY
1055 while (1) {
1056 struct rb_node *node;
1057 bool merged = false;
1058 struct discard_cmd *tdc = NULL;
1059
1060 if (prev_dc) {
1061 di.lstart = prev_dc->lstart + prev_dc->len;
1062 if (di.lstart < lstart)
1063 di.lstart = lstart;
1064 if (di.lstart >= end)
1065 break;
1066
1067 if (!next_dc || next_dc->lstart > end)
1068 di.len = end - di.lstart;
1069 else
1070 di.len = next_dc->lstart - di.lstart;
1071 di.start = start + di.lstart - lstart;
1072 }
1073
1074 if (!di.len)
1075 goto next;
1076
1077 if (prev_dc && prev_dc->state == D_PREP &&
1078 prev_dc->bdev == bdev &&
1079 __is_discard_back_mergeable(&di, &prev_dc->di)) {
1080 prev_dc->di.len += di.len;
d84d1cbd 1081 dcc->undiscard_blks += di.len;
ba48a33e 1082 __relocate_discard_cmd(dcc, prev_dc);
004b6862
CY
1083 di = prev_dc->di;
1084 tdc = prev_dc;
1085 merged = true;
1086 }
1087
1088 if (next_dc && next_dc->state == D_PREP &&
1089 next_dc->bdev == bdev &&
1090 __is_discard_front_mergeable(&di, &next_dc->di)) {
1091 next_dc->di.lstart = di.lstart;
1092 next_dc->di.len += di.len;
1093 next_dc->di.start = di.start;
d84d1cbd 1094 dcc->undiscard_blks += di.len;
ba48a33e 1095 __relocate_discard_cmd(dcc, next_dc);
004b6862
CY
1096 if (tdc)
1097 __remove_discard_cmd(sbi, tdc);
004b6862 1098 merged = true;
4e6a8d9b 1099 }
004b6862 1100
df0f6b44 1101 if (!merged) {
004b6862
CY
1102 __insert_discard_tree(sbi, bdev, di.lstart, di.start,
1103 di.len, NULL, NULL);
df0f6b44 1104 }
004b6862
CY
1105 next:
1106 prev_dc = next_dc;
1107 if (!prev_dc)
1108 break;
1109
1110 node = rb_next(&prev_dc->rb_node);
1111 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1112 }
1113
1114 mutex_unlock(&dcc->cmd_lock);
1115}
1116
1117static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1118 struct block_device *bdev, block_t blkstart, block_t blklen)
1119{
1120 block_t lblkstart = blkstart;
1121
0243a5f9 1122 trace_f2fs_queue_discard(bdev, blkstart, blklen);
004b6862
CY
1123
1124 if (sbi->s_ndevs) {
1125 int devi = f2fs_target_device_index(sbi, blkstart);
1126
1127 blkstart -= FDEV(devi).start_blk;
1128 }
1129 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
004b6862
CY
1130 return 0;
1131}
1132
8412663d 1133static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
78997b56
CY
1134 struct discard_policy *dpolicy,
1135 unsigned int start, unsigned int end)
8412663d
CY
1136{
1137 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1138 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1139 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1140 struct discard_cmd *dc;
1141 struct blk_plug plug;
1142 int issued;
1143
1144next:
1145 issued = 0;
1146
1147 mutex_lock(&dcc->cmd_lock);
1148 f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
1149
1150 dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
1151 NULL, start,
1152 (struct rb_entry **)&prev_dc,
1153 (struct rb_entry **)&next_dc,
1154 &insert_p, &insert_parent, true);
1155 if (!dc)
1156 dc = next_dc;
1157
1158 blk_start_plug(&plug);
1159
1160 while (dc && dc->lstart <= end) {
1161 struct rb_node *node;
1162
78997b56 1163 if (dc->len < dpolicy->granularity)
8412663d
CY
1164 goto skip;
1165
1166 if (dc->state != D_PREP) {
1167 list_move_tail(&dc->list, &dcc->fstrim_list);
1168 goto skip;
1169 }
1170
78997b56 1171 __submit_discard_cmd(sbi, dpolicy, dc);
8412663d 1172
ecc9aa00 1173 if (++issued >= dpolicy->max_requests) {
8412663d
CY
1174 start = dc->lstart + dc->len;
1175
1176 blk_finish_plug(&plug);
1177 mutex_unlock(&dcc->cmd_lock);
1178
1179 schedule();
1180
1181 goto next;
1182 }
1183skip:
1184 node = rb_next(&dc->rb_node);
1185 dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1186
1187 if (fatal_signal_pending(current))
1188 break;
1189 }
1190
1191 blk_finish_plug(&plug);
1192 mutex_unlock(&dcc->cmd_lock);
1193}
1194
78997b56
CY
1195static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1196 struct discard_policy *dpolicy)
bd5b0738
CY
1197{
1198 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1199 struct list_head *pend_list;
1200 struct discard_cmd *dc, *tmp;
1201 struct blk_plug plug;
78997b56 1202 int i, iter = 0, issued = 0;
e6c6de18 1203 bool io_interrupted = false;
bd5b0738 1204
78997b56
CY
1205 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1206 if (i + 1 < dpolicy->granularity)
1207 break;
bd5b0738 1208 pend_list = &dcc->pend_list[i];
33da62cf
CY
1209
1210 mutex_lock(&dcc->cmd_lock);
49c60c67
CY
1211 if (list_empty(pend_list))
1212 goto next;
33da62cf
CY
1213 f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
1214 blk_start_plug(&plug);
bd5b0738
CY
1215 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1216 f2fs_bug_on(sbi, dc->state != D_PREP);
1217
ecc9aa00
CY
1218 if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1219 !is_idle(sbi)) {
e6c6de18 1220 io_interrupted = true;
ecc9aa00 1221 goto skip;
969d1b18 1222 }
e6c6de18 1223
78997b56 1224 __submit_discard_cmd(sbi, dpolicy, dc);
ecc9aa00
CY
1225 issued++;
1226skip:
1227 if (++iter >= dpolicy->max_requests)
33da62cf 1228 break;
bd5b0738 1229 }
33da62cf 1230 blk_finish_plug(&plug);
49c60c67 1231next:
33da62cf
CY
1232 mutex_unlock(&dcc->cmd_lock);
1233
1234 if (iter >= dpolicy->max_requests)
1235 break;
bd5b0738 1236 }
969d1b18 1237
e6c6de18
CY
1238 if (!issued && io_interrupted)
1239 issued = -1;
1240
969d1b18
CY
1241 return issued;
1242}
1243
cf5c759f 1244static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
969d1b18
CY
1245{
1246 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1247 struct list_head *pend_list;
1248 struct discard_cmd *dc, *tmp;
1249 int i;
cf5c759f 1250 bool dropped = false;
969d1b18
CY
1251
1252 mutex_lock(&dcc->cmd_lock);
1253 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1254 pend_list = &dcc->pend_list[i];
1255 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1256 f2fs_bug_on(sbi, dc->state != D_PREP);
1257 __remove_discard_cmd(sbi, dc);
cf5c759f 1258 dropped = true;
969d1b18
CY
1259 }
1260 }
1261 mutex_unlock(&dcc->cmd_lock);
cf5c759f
CY
1262
1263 return dropped;
bd5b0738
CY
1264}
1265
7950e9ac
CY
1266void drop_discard_cmd(struct f2fs_sb_info *sbi)
1267{
1268 __drop_discard_cmd(sbi);
1269}
1270
0ea80512 1271static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
2a510c00
CY
1272 struct discard_cmd *dc)
1273{
1274 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
0ea80512 1275 unsigned int len = 0;
2a510c00
CY
1276
1277 wait_for_completion_io(&dc->wait);
1278 mutex_lock(&dcc->cmd_lock);
1279 f2fs_bug_on(sbi, dc->state != D_DONE);
1280 dc->ref--;
0ea80512
CY
1281 if (!dc->ref) {
1282 if (!dc->error)
1283 len = dc->len;
2a510c00 1284 __remove_discard_cmd(sbi, dc);
0ea80512 1285 }
2a510c00 1286 mutex_unlock(&dcc->cmd_lock);
0ea80512
CY
1287
1288 return len;
2a510c00
CY
1289}
1290
0ea80512 1291static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
78997b56
CY
1292 struct discard_policy *dpolicy,
1293 block_t start, block_t end)
63a94fa1
CY
1294{
1295 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
78997b56
CY
1296 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1297 &(dcc->fstrim_list) : &(dcc->wait_list);
63a94fa1 1298 struct discard_cmd *dc, *tmp;
6afae633 1299 bool need_wait;
0ea80512 1300 unsigned int trimmed = 0;
6afae633
CY
1301
1302next:
1303 need_wait = false;
63a94fa1
CY
1304
1305 mutex_lock(&dcc->cmd_lock);
1306 list_for_each_entry_safe(dc, tmp, wait_list, list) {
8412663d
CY
1307 if (dc->lstart + dc->len <= start || end <= dc->lstart)
1308 continue;
78997b56 1309 if (dc->len < dpolicy->granularity)
8412663d 1310 continue;
78997b56 1311 if (dc->state == D_DONE && !dc->ref) {
63a94fa1 1312 wait_for_completion_io(&dc->wait);
0ea80512
CY
1313 if (!dc->error)
1314 trimmed += dc->len;
63a94fa1 1315 __remove_discard_cmd(sbi, dc);
6afae633
CY
1316 } else {
1317 dc->ref++;
1318 need_wait = true;
1319 break;
63a94fa1
CY
1320 }
1321 }
1322 mutex_unlock(&dcc->cmd_lock);
6afae633
CY
1323
1324 if (need_wait) {
0ea80512 1325 trimmed += __wait_one_discard_bio(sbi, dc);
6afae633
CY
1326 goto next;
1327 }
0ea80512
CY
1328
1329 return trimmed;
63a94fa1
CY
1330}
1331
78997b56
CY
1332static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1333 struct discard_policy *dpolicy)
8412663d 1334{
78997b56 1335 __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
8412663d
CY
1336}
1337
004b6862 1338/* This should be covered by global mutex, &sit_i->sentry_lock */
94b1e10e 1339static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
004b6862
CY
1340{
1341 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1342 struct discard_cmd *dc;
ec9895ad 1343 bool need_wait = false;
004b6862
CY
1344
1345 mutex_lock(&dcc->cmd_lock);
004b6862
CY
1346 dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
1347 if (dc) {
ec9895ad
CY
1348 if (dc->state == D_PREP) {
1349 __punch_discard_cmd(sbi, dc, blkaddr);
1350 } else {
1351 dc->ref++;
1352 need_wait = true;
1353 }
275b66b0 1354 }
d431413f 1355 mutex_unlock(&dcc->cmd_lock);
ec9895ad 1356
2a510c00
CY
1357 if (need_wait)
1358 __wait_one_discard_bio(sbi, dc);
d431413f
CY
1359}
1360
cce13252
CY
1361void stop_discard_thread(struct f2fs_sb_info *sbi)
1362{
1363 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1364
1365 if (dcc && dcc->f2fs_issue_discard) {
1366 struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1367
1368 dcc->f2fs_issue_discard = NULL;
1369 kthread_stop(discard_thread);
ec9895ad 1370 }
d431413f
CY
1371}
1372
8412663d 1373/* This comes from f2fs_put_super */
cf5c759f 1374bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
969d1b18
CY
1375{
1376 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
78997b56 1377 struct discard_policy dpolicy;
cf5c759f 1378 bool dropped;
969d1b18 1379
78997b56
CY
1380 init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity);
1381 __issue_discard_cmd(sbi, &dpolicy);
cf5c759f 1382 dropped = __drop_discard_cmd(sbi);
78997b56 1383 __wait_all_discard_cmd(sbi, &dpolicy);
cf5c759f
CY
1384
1385 return dropped;
969d1b18
CY
1386}
1387
15469963
JK
1388static int issue_discard_thread(void *data)
1389{
1390 struct f2fs_sb_info *sbi = data;
1391 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1392 wait_queue_head_t *q = &dcc->discard_wait_queue;
78997b56 1393 struct discard_policy dpolicy;
969d1b18
CY
1394 unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1395 int issued;
15469963 1396
1d7be270 1397 set_freezable();
15469963 1398
1d7be270 1399 do {
78997b56
CY
1400 init_discard_policy(&dpolicy, DPOLICY_BG,
1401 dcc->discard_granularity);
1402
969d1b18
CY
1403 wait_event_interruptible_timeout(*q,
1404 kthread_should_stop() || freezing(current) ||
1405 dcc->discard_wake,
1406 msecs_to_jiffies(wait_ms));
1d7be270
JK
1407 if (try_to_freeze())
1408 continue;
3b60d802
CY
1409 if (f2fs_readonly(sbi->sb))
1410 continue;
1d7be270
JK
1411 if (kthread_should_stop())
1412 return 0;
15469963 1413
dee02f0d 1414 if (dcc->discard_wake)
969d1b18 1415 dcc->discard_wake = 0;
dee02f0d
JK
1416
1417 if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
1418 init_discard_policy(&dpolicy, DPOLICY_FORCE, 1);
969d1b18 1419
dc6febb6
CY
1420 sb_start_intwrite(sbi->sb);
1421
78997b56 1422 issued = __issue_discard_cmd(sbi, &dpolicy);
969d1b18 1423 if (issued) {
78997b56
CY
1424 __wait_all_discard_cmd(sbi, &dpolicy);
1425 wait_ms = dpolicy.min_interval;
969d1b18 1426 } else {
78997b56 1427 wait_ms = dpolicy.max_interval;
969d1b18 1428 }
1d7be270 1429
dc6febb6 1430 sb_end_intwrite(sbi->sb);
1d7be270 1431
1d7be270
JK
1432 } while (!kthread_should_stop());
1433 return 0;
15469963
JK
1434}
1435
f46e8809 1436#ifdef CONFIG_BLK_DEV_ZONED
3c62be17
JK
1437static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1438 struct block_device *bdev, block_t blkstart, block_t blklen)
f46e8809 1439{
92592285 1440 sector_t sector, nr_sects;
10a875f8 1441 block_t lblkstart = blkstart;
3c62be17
JK
1442 int devi = 0;
1443
1444 if (sbi->s_ndevs) {
1445 devi = f2fs_target_device_index(sbi, blkstart);
1446 blkstart -= FDEV(devi).start_blk;
1447 }
f46e8809
DLM
1448
1449 /*
1450 * We need to know the type of the zone: for conventional zones,
1451 * use regular discard if the drive supports it. For sequential
1452 * zones, reset the zone write pointer.
1453 */
3c62be17 1454 switch (get_blkz_type(sbi, bdev, blkstart)) {
f46e8809
DLM
1455
1456 case BLK_ZONE_TYPE_CONVENTIONAL:
1457 if (!blk_queue_discard(bdev_get_queue(bdev)))
1458 return 0;
c81abe34 1459 return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
f46e8809
DLM
1460 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1461 case BLK_ZONE_TYPE_SEQWRITE_PREF:
92592285
JK
1462 sector = SECTOR_FROM_BLOCK(blkstart);
1463 nr_sects = SECTOR_FROM_BLOCK(blklen);
1464
1465 if (sector & (bdev_zone_sectors(bdev) - 1) ||
1466 nr_sects != bdev_zone_sectors(bdev)) {
1467 f2fs_msg(sbi->sb, KERN_INFO,
1468 "(%d) %s: Unaligned discard attempted (block %x + %x)",
1469 devi, sbi->s_ndevs ? FDEV(devi).path: "",
1470 blkstart, blklen);
1471 return -EIO;
1472 }
d50aaeec 1473 trace_f2fs_issue_reset_zone(bdev, blkstart);
f46e8809
DLM
1474 return blkdev_reset_zones(bdev, sector,
1475 nr_sects, GFP_NOFS);
1476 default:
1477 /* Unknown zone type: broken device ? */
1478 return -EIO;
1479 }
1480}
1481#endif
1482
3c62be17
JK
1483static int __issue_discard_async(struct f2fs_sb_info *sbi,
1484 struct block_device *bdev, block_t blkstart, block_t blklen)
1485{
1486#ifdef CONFIG_BLK_DEV_ZONED
ccd31cb2 1487 if (f2fs_sb_has_blkzoned(sbi->sb) &&
3c62be17
JK
1488 bdev_zoned_model(bdev) != BLK_ZONED_NONE)
1489 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1490#endif
c81abe34 1491 return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
3c62be17
JK
1492}
1493
1e87a78d 1494static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
37208879
JK
1495 block_t blkstart, block_t blklen)
1496{
3c62be17
JK
1497 sector_t start = blkstart, len = 0;
1498 struct block_device *bdev;
a66cdd98
JK
1499 struct seg_entry *se;
1500 unsigned int offset;
1501 block_t i;
3c62be17
JK
1502 int err = 0;
1503
1504 bdev = f2fs_target_device(sbi, blkstart, NULL);
1505
1506 for (i = blkstart; i < blkstart + blklen; i++, len++) {
1507 if (i != start) {
1508 struct block_device *bdev2 =
1509 f2fs_target_device(sbi, i, NULL);
1510
1511 if (bdev2 != bdev) {
1512 err = __issue_discard_async(sbi, bdev,
1513 start, len);
1514 if (err)
1515 return err;
1516 bdev = bdev2;
1517 start = i;
1518 len = 0;
1519 }
1520 }
a66cdd98 1521
a66cdd98
JK
1522 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1523 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1524
1525 if (!f2fs_test_and_set_bit(offset, se->discard_map))
1526 sbi->discard_blks--;
1527 }
f46e8809 1528
3c62be17
JK
1529 if (len)
1530 err = __issue_discard_async(sbi, bdev, start, len);
1531 return err;
1e87a78d
JK
1532}
1533
25290fa5
JK
1534static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1535 bool check_only)
adf4983b 1536{
b2955550
JK
1537 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1538 int max_blocks = sbi->blocks_per_seg;
4b2fecc8 1539 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
b2955550
JK
1540 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1541 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
a66cdd98 1542 unsigned long *discard_map = (unsigned long *)se->discard_map;
60a3b782 1543 unsigned long *dmap = SIT_I(sbi)->tmp_map;
b2955550 1544 unsigned int start = 0, end = -1;
c473f1a9 1545 bool force = (cpc->reason & CP_DISCARD);
a7eeb823 1546 struct discard_entry *de = NULL;
46f84c2c 1547 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
b2955550
JK
1548 int i;
1549
3e025740 1550 if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
25290fa5 1551 return false;
b2955550 1552
a66cdd98
JK
1553 if (!force) {
1554 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
0b54fb84
JK
1555 SM_I(sbi)->dcc_info->nr_discards >=
1556 SM_I(sbi)->dcc_info->max_discards)
25290fa5 1557 return false;
4b2fecc8
JK
1558 }
1559
b2955550
JK
1560 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1561 for (i = 0; i < entries; i++)
a66cdd98 1562 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
d7bc2484 1563 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
b2955550 1564
0b54fb84
JK
1565 while (force || SM_I(sbi)->dcc_info->nr_discards <=
1566 SM_I(sbi)->dcc_info->max_discards) {
b2955550
JK
1567 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1568 if (start >= max_blocks)
1569 break;
1570
1571 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
c7b41e16
YH
1572 if (force && start && end != max_blocks
1573 && (end - start) < cpc->trim_minlen)
1574 continue;
1575
25290fa5
JK
1576 if (check_only)
1577 return true;
1578
a7eeb823
CY
1579 if (!de) {
1580 de = f2fs_kmem_cache_alloc(discard_entry_slab,
1581 GFP_F2FS_ZERO);
1582 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1583 list_add_tail(&de->list, head);
1584 }
1585
1586 for (i = start; i < end; i++)
1587 __set_bit_le(i, (void *)de->discard_map);
1588
1589 SM_I(sbi)->dcc_info->nr_discards += end - start;
b2955550 1590 }
25290fa5 1591 return false;
b2955550
JK
1592}
1593
4b2fecc8
JK
1594void release_discard_addrs(struct f2fs_sb_info *sbi)
1595{
46f84c2c 1596 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
4b2fecc8
JK
1597 struct discard_entry *entry, *this;
1598
1599 /* drop caches */
1600 list_for_each_entry_safe(entry, this, head, list) {
1601 list_del(&entry->list);
1602 kmem_cache_free(discard_entry_slab, entry);
1603 }
1604}
1605
0a8165d7 1606/*
351df4b2
JK
1607 * Should call clear_prefree_segments after checkpoint is done.
1608 */
1609static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1610{
1611 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
b65ee148 1612 unsigned int segno;
351df4b2
JK
1613
1614 mutex_lock(&dirty_i->seglist_lock);
7cd8558b 1615 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
351df4b2 1616 __set_test_and_free(sbi, segno);
351df4b2
JK
1617 mutex_unlock(&dirty_i->seglist_lock);
1618}
1619
836b5a63 1620void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
351df4b2 1621{
969d1b18
CY
1622 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1623 struct list_head *head = &dcc->entry_list;
2d7b822a 1624 struct discard_entry *entry, *this;
351df4b2 1625 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
29e59c14 1626 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
29e59c14 1627 unsigned int start = 0, end = -1;
36abef4e 1628 unsigned int secno, start_segno;
c473f1a9 1629 bool force = (cpc->reason & CP_DISCARD);
351df4b2
JK
1630
1631 mutex_lock(&dirty_i->seglist_lock);
29e59c14 1632
351df4b2 1633 while (1) {
29e59c14 1634 int i;
7cd8558b
JK
1635 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1636 if (start >= MAIN_SEGS(sbi))
351df4b2 1637 break;
7cd8558b
JK
1638 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
1639 start + 1);
29e59c14
CL
1640
1641 for (i = start; i < end; i++)
1642 clear_bit(i, prefree_map);
1643
1644 dirty_i->nr_dirty[PRE] -= end - start;
1645
650d3c4e 1646 if (!test_opt(sbi, DISCARD))
29e59c14 1647 continue;
351df4b2 1648
650d3c4e
YH
1649 if (force && start >= cpc->trim_start &&
1650 (end - 1) <= cpc->trim_end)
1651 continue;
1652
36abef4e
JK
1653 if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
1654 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
37208879 1655 (end - start) << sbi->log_blocks_per_seg);
36abef4e
JK
1656 continue;
1657 }
1658next:
4ddb1a4d
JK
1659 secno = GET_SEC_FROM_SEG(sbi, start);
1660 start_segno = GET_SEG_FROM_SEC(sbi, secno);
36abef4e 1661 if (!IS_CURSEC(sbi, secno) &&
302bd348 1662 !get_valid_blocks(sbi, start, true))
36abef4e
JK
1663 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
1664 sbi->segs_per_sec << sbi->log_blocks_per_seg);
1665
1666 start = start_segno + sbi->segs_per_sec;
1667 if (start < end)
1668 goto next;
8b107f5b
JK
1669 else
1670 end = start - 1;
351df4b2
JK
1671 }
1672 mutex_unlock(&dirty_i->seglist_lock);
b2955550
JK
1673
1674 /* send small discards */
2d7b822a 1675 list_for_each_entry_safe(entry, this, head, list) {
a7eeb823
CY
1676 unsigned int cur_pos = 0, next_pos, len, total_len = 0;
1677 bool is_valid = test_bit_le(0, entry->discard_map);
1678
1679find_next:
1680 if (is_valid) {
1681 next_pos = find_next_zero_bit_le(entry->discard_map,
1682 sbi->blocks_per_seg, cur_pos);
1683 len = next_pos - cur_pos;
1684
ccd31cb2 1685 if (f2fs_sb_has_blkzoned(sbi->sb) ||
acfd2810 1686 (force && len < cpc->trim_minlen))
a7eeb823
CY
1687 goto skip;
1688
1689 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
1690 len);
a7eeb823
CY
1691 total_len += len;
1692 } else {
1693 next_pos = find_next_bit_le(entry->discard_map,
1694 sbi->blocks_per_seg, cur_pos);
1695 }
836b5a63 1696skip:
a7eeb823
CY
1697 cur_pos = next_pos;
1698 is_valid = !is_valid;
1699
1700 if (cur_pos < sbi->blocks_per_seg)
1701 goto find_next;
1702
b2955550 1703 list_del(&entry->list);
969d1b18 1704 dcc->nr_discards -= total_len;
b2955550
JK
1705 kmem_cache_free(discard_entry_slab, entry);
1706 }
34e159da 1707
01983c71 1708 wake_up_discard_thread(sbi, false);
351df4b2
JK
1709}
1710
78997b56
CY
1711void init_discard_policy(struct discard_policy *dpolicy,
1712 int discard_type, unsigned int granularity)
ecc9aa00 1713{
78997b56
CY
1714 /* common policy */
1715 dpolicy->type = discard_type;
ecc9aa00 1716 dpolicy->sync = true;
78997b56
CY
1717 dpolicy->granularity = granularity;
1718
6819b884
CY
1719 dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1720 dpolicy->io_aware_gran = MAX_PLIST_NUM;
1721
78997b56
CY
1722 if (discard_type == DPOLICY_BG) {
1723 dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1724 dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
78997b56
CY
1725 dpolicy->io_aware = true;
1726 } else if (discard_type == DPOLICY_FORCE) {
1727 dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1728 dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
dee02f0d 1729 dpolicy->io_aware = false;
78997b56 1730 } else if (discard_type == DPOLICY_FSTRIM) {
78997b56
CY
1731 dpolicy->io_aware = false;
1732 } else if (discard_type == DPOLICY_UMOUNT) {
78997b56
CY
1733 dpolicy->io_aware = false;
1734 }
ecc9aa00
CY
1735}
1736
8ed59745 1737static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
0b54fb84 1738{
15469963 1739 dev_t dev = sbi->sb->s_bdev->bd_dev;
0b54fb84 1740 struct discard_cmd_control *dcc;
ba48a33e 1741 int err = 0, i;
0b54fb84
JK
1742
1743 if (SM_I(sbi)->dcc_info) {
1744 dcc = SM_I(sbi)->dcc_info;
1745 goto init_thread;
1746 }
1747
acbf054d 1748 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
0b54fb84
JK
1749 if (!dcc)
1750 return -ENOMEM;
1751
969d1b18 1752 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
46f84c2c 1753 INIT_LIST_HEAD(&dcc->entry_list);
78997b56 1754 for (i = 0; i < MAX_PLIST_NUM; i++)
ba48a33e 1755 INIT_LIST_HEAD(&dcc->pend_list[i]);
46f84c2c 1756 INIT_LIST_HEAD(&dcc->wait_list);
8412663d 1757 INIT_LIST_HEAD(&dcc->fstrim_list);
15469963 1758 mutex_init(&dcc->cmd_lock);
8b8dd65f
CY
1759 atomic_set(&dcc->issued_discard, 0);
1760 atomic_set(&dcc->issing_discard, 0);
5f32366a 1761 atomic_set(&dcc->discard_cmd_cnt, 0);
0b54fb84 1762 dcc->nr_discards = 0;
d618ebaf 1763 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
d84d1cbd 1764 dcc->undiscard_blks = 0;
004b6862 1765 dcc->root = RB_ROOT;
0b54fb84 1766
15469963 1767 init_waitqueue_head(&dcc->discard_wait_queue);
0b54fb84
JK
1768 SM_I(sbi)->dcc_info = dcc;
1769init_thread:
15469963
JK
1770 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
1771 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
1772 if (IS_ERR(dcc->f2fs_issue_discard)) {
1773 err = PTR_ERR(dcc->f2fs_issue_discard);
1774 kfree(dcc);
1775 SM_I(sbi)->dcc_info = NULL;
1776 return err;
1777 }
1778
0b54fb84
JK
1779 return err;
1780}
1781
f099405f 1782static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
0b54fb84
JK
1783{
1784 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1785
f099405f
CY
1786 if (!dcc)
1787 return;
1788
cce13252 1789 stop_discard_thread(sbi);
f099405f
CY
1790
1791 kfree(dcc);
1792 SM_I(sbi)->dcc_info = NULL;
0b54fb84
JK
1793}
1794
184a5cd2 1795static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
351df4b2
JK
1796{
1797 struct sit_info *sit_i = SIT_I(sbi);
184a5cd2
CY
1798
1799 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
351df4b2 1800 sit_i->dirty_sentries++;
184a5cd2
CY
1801 return false;
1802 }
1803
1804 return true;
351df4b2
JK
1805}
1806
1807static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
1808 unsigned int segno, int modified)
1809{
1810 struct seg_entry *se = get_seg_entry(sbi, segno);
1811 se->type = type;
1812 if (modified)
1813 __mark_sit_entry_dirty(sbi, segno);
1814}
1815
1816static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
1817{
1818 struct seg_entry *se;
1819 unsigned int segno, offset;
1820 long int new_vblocks;
6415fedc
YS
1821 bool exist;
1822#ifdef CONFIG_F2FS_CHECK_FS
1823 bool mir_exist;
1824#endif
351df4b2
JK
1825
1826 segno = GET_SEGNO(sbi, blkaddr);
1827
1828 se = get_seg_entry(sbi, segno);
1829 new_vblocks = se->valid_blocks + del;
491c0854 1830 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
351df4b2 1831
9850cf4a 1832 f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
351df4b2
JK
1833 (new_vblocks > sbi->blocks_per_seg)));
1834
1835 se->valid_blocks = new_vblocks;
1836 se->mtime = get_mtime(sbi);
1837 SIT_I(sbi)->max_mtime = se->mtime;
1838
1839 /* Update valid block bitmap */
1840 if (del > 0) {
6415fedc 1841 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
355e7891 1842#ifdef CONFIG_F2FS_CHECK_FS
6415fedc
YS
1843 mir_exist = f2fs_test_and_set_bit(offset,
1844 se->cur_valid_map_mir);
1845 if (unlikely(exist != mir_exist)) {
1846 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1847 "when setting bitmap, blk:%u, old bit:%d",
1848 blkaddr, exist);
05796763 1849 f2fs_bug_on(sbi, 1);
6415fedc 1850 }
355e7891 1851#endif
6415fedc
YS
1852 if (unlikely(exist)) {
1853 f2fs_msg(sbi->sb, KERN_ERR,
1854 "Bitmap was wrongly set, blk:%u", blkaddr);
1855 f2fs_bug_on(sbi, 1);
35ee82ca
YS
1856 se->valid_blocks--;
1857 del = 0;
355e7891 1858 }
6415fedc 1859
3e025740
JK
1860 if (f2fs_discard_en(sbi) &&
1861 !f2fs_test_and_set_bit(offset, se->discard_map))
a66cdd98 1862 sbi->discard_blks--;
720037f9
JK
1863
1864 /* don't overwrite by SSR to keep node chain */
5d7881ca 1865 if (IS_NODESEG(se->type)) {
720037f9
JK
1866 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
1867 se->ckpt_valid_blocks++;
1868 }
351df4b2 1869 } else {
6415fedc 1870 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
355e7891 1871#ifdef CONFIG_F2FS_CHECK_FS
6415fedc
YS
1872 mir_exist = f2fs_test_and_clear_bit(offset,
1873 se->cur_valid_map_mir);
1874 if (unlikely(exist != mir_exist)) {
1875 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1876 "when clearing bitmap, blk:%u, old bit:%d",
1877 blkaddr, exist);
05796763 1878 f2fs_bug_on(sbi, 1);
6415fedc 1879 }
355e7891 1880#endif
6415fedc
YS
1881 if (unlikely(!exist)) {
1882 f2fs_msg(sbi->sb, KERN_ERR,
1883 "Bitmap was wrongly cleared, blk:%u", blkaddr);
1884 f2fs_bug_on(sbi, 1);
35ee82ca
YS
1885 se->valid_blocks++;
1886 del = 0;
355e7891 1887 }
6415fedc 1888
3e025740
JK
1889 if (f2fs_discard_en(sbi) &&
1890 f2fs_test_and_clear_bit(offset, se->discard_map))
a66cdd98 1891 sbi->discard_blks++;
351df4b2
JK
1892 }
1893 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
1894 se->ckpt_valid_blocks += del;
1895
1896 __mark_sit_entry_dirty(sbi, segno);
1897
1898 /* update total number of valid blocks to be written in ckpt area */
1899 SIT_I(sbi)->written_valid_blocks += del;
1900
1901 if (sbi->segs_per_sec > 1)
1902 get_sec_entry(sbi, segno)->valid_blocks += del;
1903}
1904
351df4b2
JK
1905void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
1906{
1907 unsigned int segno = GET_SEGNO(sbi, addr);
1908 struct sit_info *sit_i = SIT_I(sbi);
1909
9850cf4a 1910 f2fs_bug_on(sbi, addr == NULL_ADDR);
351df4b2
JK
1911 if (addr == NEW_ADDR)
1912 return;
1913
1914 /* add it into sit main buffer */
3d26fa6b 1915 down_write(&sit_i->sentry_lock);
351df4b2
JK
1916
1917 update_sit_entry(sbi, addr, -1);
1918
1919 /* add it into dirty seglist */
1920 locate_dirty_segment(sbi, segno);
1921
3d26fa6b 1922 up_write(&sit_i->sentry_lock);
351df4b2
JK
1923}
1924
6e2c64ad
JK
1925bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
1926{
1927 struct sit_info *sit_i = SIT_I(sbi);
1928 unsigned int segno, offset;
1929 struct seg_entry *se;
1930 bool is_cp = false;
1931
1932 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
1933 return true;
1934
3d26fa6b 1935 down_read(&sit_i->sentry_lock);
6e2c64ad
JK
1936
1937 segno = GET_SEGNO(sbi, blkaddr);
1938 se = get_seg_entry(sbi, segno);
1939 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1940
1941 if (f2fs_test_bit(offset, se->ckpt_valid_map))
1942 is_cp = true;
1943
3d26fa6b 1944 up_read(&sit_i->sentry_lock);
6e2c64ad
JK
1945
1946 return is_cp;
1947}
1948
0a8165d7 1949/*
351df4b2
JK
1950 * This function should be resided under the curseg_mutex lock
1951 */
1952static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
e79efe3b 1953 struct f2fs_summary *sum)
351df4b2
JK
1954{
1955 struct curseg_info *curseg = CURSEG_I(sbi, type);
1956 void *addr = curseg->sum_blk;
e79efe3b 1957 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
351df4b2 1958 memcpy(addr, sum, sizeof(struct f2fs_summary));
351df4b2
JK
1959}
1960
0a8165d7 1961/*
351df4b2
JK
1962 * Calculate the number of current summary pages for writing
1963 */
3fa06d7b 1964int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
351df4b2 1965{
351df4b2 1966 int valid_sum_count = 0;
9a47938b 1967 int i, sum_in_page;
351df4b2
JK
1968
1969 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1970 if (sbi->ckpt->alloc_type[i] == SSR)
1971 valid_sum_count += sbi->blocks_per_seg;
3fa06d7b
CY
1972 else {
1973 if (for_ra)
1974 valid_sum_count += le16_to_cpu(
1975 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
1976 else
1977 valid_sum_count += curseg_blkoff(sbi, i);
1978 }
351df4b2
JK
1979 }
1980
09cbfeaf 1981 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
9a47938b
FL
1982 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
1983 if (valid_sum_count <= sum_in_page)
351df4b2 1984 return 1;
9a47938b 1985 else if ((valid_sum_count - sum_in_page) <=
09cbfeaf 1986 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
351df4b2
JK
1987 return 2;
1988 return 3;
1989}
1990
0a8165d7 1991/*
351df4b2
JK
1992 * Caller should put this summary page
1993 */
1994struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
1995{
1996 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
1997}
1998
381722d2 1999void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
351df4b2
JK
2000{
2001 struct page *page = grab_meta_page(sbi, blk_addr);
381722d2 2002
0537b811 2003 memcpy(page_address(page), src, PAGE_SIZE);
351df4b2
JK
2004 set_page_dirty(page);
2005 f2fs_put_page(page, 1);
2006}
2007
381722d2
CY
2008static void write_sum_page(struct f2fs_sb_info *sbi,
2009 struct f2fs_summary_block *sum_blk, block_t blk_addr)
2010{
2011 update_meta_page(sbi, (void *)sum_blk, blk_addr);
2012}
2013
b7ad7512
CY
2014static void write_current_sum_page(struct f2fs_sb_info *sbi,
2015 int type, block_t blk_addr)
2016{
2017 struct curseg_info *curseg = CURSEG_I(sbi, type);
2018 struct page *page = grab_meta_page(sbi, blk_addr);
2019 struct f2fs_summary_block *src = curseg->sum_blk;
2020 struct f2fs_summary_block *dst;
2021
2022 dst = (struct f2fs_summary_block *)page_address(page);
2023
2024 mutex_lock(&curseg->curseg_mutex);
2025
2026 down_read(&curseg->journal_rwsem);
2027 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2028 up_read(&curseg->journal_rwsem);
2029
2030 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2031 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2032
2033 mutex_unlock(&curseg->curseg_mutex);
2034
2035 set_page_dirty(page);
2036 f2fs_put_page(page, 1);
2037}
2038
a7881893
JK
2039static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
2040{
2041 struct curseg_info *curseg = CURSEG_I(sbi, type);
2042 unsigned int segno = curseg->segno + 1;
2043 struct free_segmap_info *free_i = FREE_I(sbi);
2044
2045 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2046 return !test_bit(segno, free_i->free_segmap);
2047 return 0;
2048}
2049
0a8165d7 2050/*
351df4b2
JK
2051 * Find a new segment from the free segments bitmap to right order
2052 * This function should be returned with success, otherwise BUG
2053 */
2054static void get_new_segment(struct f2fs_sb_info *sbi,
2055 unsigned int *newseg, bool new_sec, int dir)
2056{
2057 struct free_segmap_info *free_i = FREE_I(sbi);
351df4b2 2058 unsigned int segno, secno, zoneno;
7cd8558b 2059 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
4ddb1a4d
JK
2060 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2061 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
351df4b2
JK
2062 unsigned int left_start = hint;
2063 bool init = true;
2064 int go_left = 0;
2065 int i;
2066
1a118ccf 2067 spin_lock(&free_i->segmap_lock);
351df4b2
JK
2068
2069 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2070 segno = find_next_zero_bit(free_i->free_segmap,
4ddb1a4d
JK
2071 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2072 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
351df4b2
JK
2073 goto got_it;
2074 }
2075find_other_zone:
7cd8558b
JK
2076 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2077 if (secno >= MAIN_SECS(sbi)) {
351df4b2
JK
2078 if (dir == ALLOC_RIGHT) {
2079 secno = find_next_zero_bit(free_i->free_secmap,
7cd8558b
JK
2080 MAIN_SECS(sbi), 0);
2081 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
351df4b2
JK
2082 } else {
2083 go_left = 1;
2084 left_start = hint - 1;
2085 }
2086 }
2087 if (go_left == 0)
2088 goto skip_left;
2089
2090 while (test_bit(left_start, free_i->free_secmap)) {
2091 if (left_start > 0) {
2092 left_start--;
2093 continue;
2094 }
2095 left_start = find_next_zero_bit(free_i->free_secmap,
7cd8558b
JK
2096 MAIN_SECS(sbi), 0);
2097 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
351df4b2
JK
2098 break;
2099 }
2100 secno = left_start;
2101skip_left:
4ddb1a4d
JK
2102 segno = GET_SEG_FROM_SEC(sbi, secno);
2103 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
351df4b2
JK
2104
2105 /* give up on finding another zone */
2106 if (!init)
2107 goto got_it;
2108 if (sbi->secs_per_zone == 1)
2109 goto got_it;
2110 if (zoneno == old_zoneno)
2111 goto got_it;
2112 if (dir == ALLOC_LEFT) {
2113 if (!go_left && zoneno + 1 >= total_zones)
2114 goto got_it;
2115 if (go_left && zoneno == 0)
2116 goto got_it;
2117 }
2118 for (i = 0; i < NR_CURSEG_TYPE; i++)
2119 if (CURSEG_I(sbi, i)->zone == zoneno)
2120 break;
2121
2122 if (i < NR_CURSEG_TYPE) {
2123 /* zone is in user, try another */
2124 if (go_left)
2125 hint = zoneno * sbi->secs_per_zone - 1;
2126 else if (zoneno + 1 >= total_zones)
2127 hint = 0;
2128 else
2129 hint = (zoneno + 1) * sbi->secs_per_zone;
2130 init = false;
2131 goto find_other_zone;
2132 }
2133got_it:
2134 /* set it as dirty segment in free segmap */
9850cf4a 2135 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
351df4b2
JK
2136 __set_inuse(sbi, segno);
2137 *newseg = segno;
1a118ccf 2138 spin_unlock(&free_i->segmap_lock);
351df4b2
JK
2139}
2140
2141static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2142{
2143 struct curseg_info *curseg = CURSEG_I(sbi, type);
2144 struct summary_footer *sum_footer;
2145
2146 curseg->segno = curseg->next_segno;
4ddb1a4d 2147 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
351df4b2
JK
2148 curseg->next_blkoff = 0;
2149 curseg->next_segno = NULL_SEGNO;
2150
2151 sum_footer = &(curseg->sum_blk->footer);
2152 memset(sum_footer, 0, sizeof(struct summary_footer));
2153 if (IS_DATASEG(type))
2154 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2155 if (IS_NODESEG(type))
2156 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2157 __set_sit_entry_type(sbi, type, curseg->segno, modified);
2158}
2159
7a20b8a6
JK
2160static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2161{
a7881893
JK
2162 /* if segs_per_sec is large than 1, we need to keep original policy. */
2163 if (sbi->segs_per_sec != 1)
2164 return CURSEG_I(sbi, type)->segno;
2165
b94929d9
YS
2166 if (test_opt(sbi, NOHEAP) &&
2167 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
7a20b8a6
JK
2168 return 0;
2169
e066b83c
JK
2170 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2171 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
07939627
JK
2172
2173 /* find segments from 0 to reuse freed segments */
2174 if (sbi->alloc_mode == ALLOC_MODE_REUSE)
2175 return 0;
2176
7a20b8a6
JK
2177 return CURSEG_I(sbi, type)->segno;
2178}
2179
0a8165d7 2180/*
351df4b2
JK
2181 * Allocate a current working segment.
2182 * This function always allocates a free segment in LFS manner.
2183 */
2184static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2185{
2186 struct curseg_info *curseg = CURSEG_I(sbi, type);
2187 unsigned int segno = curseg->segno;
2188 int dir = ALLOC_LEFT;
2189
2190 write_sum_page(sbi, curseg->sum_blk,
81fb5e87 2191 GET_SUM_BLOCK(sbi, segno));
351df4b2
JK
2192 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
2193 dir = ALLOC_RIGHT;
2194
2195 if (test_opt(sbi, NOHEAP))
2196 dir = ALLOC_RIGHT;
2197
7a20b8a6 2198 segno = __get_next_segno(sbi, type);
351df4b2
JK
2199 get_new_segment(sbi, &segno, new_sec, dir);
2200 curseg->next_segno = segno;
2201 reset_curseg(sbi, type, 1);
2202 curseg->alloc_type = LFS;
2203}
2204
2205static void __next_free_blkoff(struct f2fs_sb_info *sbi,
2206 struct curseg_info *seg, block_t start)
2207{
2208 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
e81c93cf 2209 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
60a3b782 2210 unsigned long *target_map = SIT_I(sbi)->tmp_map;
e81c93cf
CL
2211 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2212 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2213 int i, pos;
2214
2215 for (i = 0; i < entries; i++)
2216 target_map[i] = ckpt_map[i] | cur_map[i];
2217
2218 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2219
2220 seg->next_blkoff = pos;
351df4b2
JK
2221}
2222
0a8165d7 2223/*
351df4b2
JK
2224 * If a segment is written by LFS manner, next block offset is just obtained
2225 * by increasing the current block offset. However, if a segment is written by
2226 * SSR manner, next block offset obtained by calling __next_free_blkoff
2227 */
2228static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2229 struct curseg_info *seg)
2230{
2231 if (seg->alloc_type == SSR)
2232 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2233 else
2234 seg->next_blkoff++;
2235}
2236
0a8165d7 2237/*
e1c42045 2238 * This function always allocates a used segment(from dirty seglist) by SSR
351df4b2
JK
2239 * manner, so it should recover the existing segment information of valid blocks
2240 */
025d63a4 2241static void change_curseg(struct f2fs_sb_info *sbi, int type)
351df4b2
JK
2242{
2243 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2244 struct curseg_info *curseg = CURSEG_I(sbi, type);
2245 unsigned int new_segno = curseg->next_segno;
2246 struct f2fs_summary_block *sum_node;
2247 struct page *sum_page;
2248
2249 write_sum_page(sbi, curseg->sum_blk,
2250 GET_SUM_BLOCK(sbi, curseg->segno));
2251 __set_test_and_inuse(sbi, new_segno);
2252
2253 mutex_lock(&dirty_i->seglist_lock);
2254 __remove_dirty_segment(sbi, new_segno, PRE);
2255 __remove_dirty_segment(sbi, new_segno, DIRTY);
2256 mutex_unlock(&dirty_i->seglist_lock);
2257
2258 reset_curseg(sbi, type, 1);
2259 curseg->alloc_type = SSR;
2260 __next_free_blkoff(sbi, curseg, 0);
2261
025d63a4
CY
2262 sum_page = get_sum_page(sbi, new_segno);
2263 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2264 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2265 f2fs_put_page(sum_page, 1);
351df4b2
JK
2266}
2267
43727527
JK
2268static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
2269{
2270 struct curseg_info *curseg = CURSEG_I(sbi, type);
2271 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
e066b83c 2272 unsigned segno = NULL_SEGNO;
d27c3d89
CY
2273 int i, cnt;
2274 bool reversed = false;
c192f7a4
JK
2275
2276 /* need_SSR() already forces to do this */
e066b83c
JK
2277 if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
2278 curseg->next_segno = segno;
c192f7a4 2279 return 1;
e066b83c 2280 }
43727527 2281
70d625cb
JK
2282 /* For node segments, let's do SSR more intensively */
2283 if (IS_NODESEG(type)) {
d27c3d89
CY
2284 if (type >= CURSEG_WARM_NODE) {
2285 reversed = true;
2286 i = CURSEG_COLD_NODE;
2287 } else {
2288 i = CURSEG_HOT_NODE;
2289 }
2290 cnt = NR_CURSEG_NODE_TYPE;
70d625cb 2291 } else {
d27c3d89
CY
2292 if (type >= CURSEG_WARM_DATA) {
2293 reversed = true;
2294 i = CURSEG_COLD_DATA;
2295 } else {
2296 i = CURSEG_HOT_DATA;
2297 }
2298 cnt = NR_CURSEG_DATA_TYPE;
70d625cb 2299 }
43727527 2300
d27c3d89 2301 for (; cnt-- > 0; reversed ? i-- : i++) {
c192f7a4
JK
2302 if (i == type)
2303 continue;
e066b83c
JK
2304 if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
2305 curseg->next_segno = segno;
43727527 2306 return 1;
e066b83c 2307 }
c192f7a4 2308 }
43727527
JK
2309 return 0;
2310}
2311
351df4b2
JK
2312/*
2313 * flush out current segment and replace it with new segment
2314 * This function should be returned with success, otherwise BUG
2315 */
2316static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2317 int type, bool force)
2318{
a7881893
JK
2319 struct curseg_info *curseg = CURSEG_I(sbi, type);
2320
7b405275 2321 if (force)
351df4b2 2322 new_curseg(sbi, type, true);
5b6c6be2
JK
2323 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2324 type == CURSEG_WARM_NODE)
351df4b2 2325 new_curseg(sbi, type, false);
a7881893
JK
2326 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
2327 new_curseg(sbi, type, false);
351df4b2 2328 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
025d63a4 2329 change_curseg(sbi, type);
351df4b2
JK
2330 else
2331 new_curseg(sbi, type, false);
dcdfff65 2332
a7881893 2333 stat_inc_seg_type(sbi, curseg);
351df4b2
JK
2334}
2335
2336void allocate_new_segments(struct f2fs_sb_info *sbi)
2337{
6ae1be13
JK
2338 struct curseg_info *curseg;
2339 unsigned int old_segno;
351df4b2
JK
2340 int i;
2341
3d26fa6b
CY
2342 down_write(&SIT_I(sbi)->sentry_lock);
2343
6ae1be13
JK
2344 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2345 curseg = CURSEG_I(sbi, i);
2346 old_segno = curseg->segno;
2347 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
2348 locate_dirty_segment(sbi, old_segno);
2349 }
3d26fa6b
CY
2350
2351 up_write(&SIT_I(sbi)->sentry_lock);
351df4b2
JK
2352}
2353
2354static const struct segment_allocation default_salloc_ops = {
2355 .allocate_segment = allocate_segment_by_default,
2356};
2357
25290fa5
JK
2358bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2359{
2360 __u64 trim_start = cpc->trim_start;
2361 bool has_candidate = false;
2362
3d26fa6b 2363 down_write(&SIT_I(sbi)->sentry_lock);
25290fa5
JK
2364 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2365 if (add_discard_addrs(sbi, cpc, true)) {
2366 has_candidate = true;
2367 break;
2368 }
2369 }
3d26fa6b 2370 up_write(&SIT_I(sbi)->sentry_lock);
25290fa5
JK
2371
2372 cpc->trim_start = trim_start;
2373 return has_candidate;
2374}
2375
4b2fecc8
JK
2376int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2377{
f7ef9b83
JK
2378 __u64 start = F2FS_BYTES_TO_BLK(range->start);
2379 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
8412663d
CY
2380 unsigned int start_segno, end_segno, cur_segno;
2381 block_t start_block, end_block;
4b2fecc8 2382 struct cp_control cpc;
78997b56 2383 struct discard_policy dpolicy;
0ea80512 2384 unsigned long long trimmed = 0;
c34f42e2 2385 int err = 0;
4b2fecc8 2386
836b5a63 2387 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
4b2fecc8
JK
2388 return -EINVAL;
2389
7cd8558b 2390 if (end <= MAIN_BLKADDR(sbi))
4b2fecc8
JK
2391 goto out;
2392
ed214a11
YH
2393 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2394 f2fs_msg(sbi->sb, KERN_WARNING,
2395 "Found FS corruption, run fsck to fix.");
2396 goto out;
2397 }
2398
4b2fecc8 2399 /* start/end segment number in main_area */
7cd8558b
JK
2400 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
2401 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
2402 GET_SEGNO(sbi, end);
8412663d 2403
4b2fecc8 2404 cpc.reason = CP_DISCARD;
836b5a63 2405 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
4b2fecc8
JK
2406
2407 /* do checkpoint to issue discard commands safely */
8412663d
CY
2408 for (cur_segno = start_segno; cur_segno <= end_segno;
2409 cur_segno = cpc.trim_end + 1) {
2410 cpc.trim_start = cur_segno;
a66cdd98
JK
2411
2412 if (sbi->discard_blks == 0)
2413 break;
2414 else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
2415 cpc.trim_end = end_segno;
2416 else
2417 cpc.trim_end = min_t(unsigned int,
8412663d 2418 rounddown(cur_segno +
bba681cb
JK
2419 BATCHED_TRIM_SEGMENTS(sbi),
2420 sbi->segs_per_sec) - 1, end_segno);
2421
2422 mutex_lock(&sbi->gc_mutex);
c34f42e2 2423 err = write_checkpoint(sbi, &cpc);
bba681cb 2424 mutex_unlock(&sbi->gc_mutex);
e9328353
CY
2425 if (err)
2426 break;
74fa5f3d
CY
2427
2428 schedule();
bba681cb 2429 }
8412663d
CY
2430
2431 start_block = START_BLOCK(sbi, start_segno);
2432 end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1);
2433
78997b56
CY
2434 init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
2435 __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
0ea80512
CY
2436 trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
2437 start_block, end_block);
4b2fecc8 2438out:
0ea80512 2439 range->len = F2FS_BLK_TO_BYTES(trimmed);
c34f42e2 2440 return err;
4b2fecc8
JK
2441}
2442
351df4b2
JK
2443static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
2444{
2445 struct curseg_info *curseg = CURSEG_I(sbi, type);
2446 if (curseg->next_blkoff < sbi->blocks_per_seg)
2447 return true;
2448 return false;
2449}
2450
4f0a03d3
HL
2451int rw_hint_to_seg_type(enum rw_hint hint)
2452{
2453 switch (hint) {
2454 case WRITE_LIFE_SHORT:
2455 return CURSEG_HOT_DATA;
2456 case WRITE_LIFE_EXTREME:
2457 return CURSEG_COLD_DATA;
2458 default:
2459 return CURSEG_WARM_DATA;
2460 }
2461}
2462
0cdd3195
HL
2463/* This returns write hints for each segment type. This hints will be
2464 * passed down to block layer. There are mapping tables which depend on
2465 * the mount option 'whint_mode'.
2466 *
2467 * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
2468 *
2469 * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
2470 *
2471 * User F2FS Block
2472 * ---- ---- -----
2473 * META WRITE_LIFE_NOT_SET
2474 * HOT_NODE "
2475 * WARM_NODE "
2476 * COLD_NODE "
2477 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
2478 * extension list " "
2479 *
2480 * -- buffered io
2481 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2482 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2483 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
2484 * WRITE_LIFE_NONE " "
2485 * WRITE_LIFE_MEDIUM " "
2486 * WRITE_LIFE_LONG " "
2487 *
2488 * -- direct io
2489 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2490 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2491 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
2492 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
2493 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
2494 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
2495 *
f2e703f9
HL
2496 * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
2497 *
2498 * User F2FS Block
2499 * ---- ---- -----
2500 * META WRITE_LIFE_MEDIUM;
2501 * HOT_NODE WRITE_LIFE_NOT_SET
2502 * WARM_NODE "
2503 * COLD_NODE WRITE_LIFE_NONE
2504 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
2505 * extension list " "
2506 *
2507 * -- buffered io
2508 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2509 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2510 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG
2511 * WRITE_LIFE_NONE " "
2512 * WRITE_LIFE_MEDIUM " "
2513 * WRITE_LIFE_LONG " "
2514 *
2515 * -- direct io
2516 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2517 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2518 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
2519 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
2520 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
2521 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
0cdd3195
HL
2522 */
2523
2524enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi,
2525 enum page_type type, enum temp_type temp)
2526{
2527 if (sbi->whint_mode == WHINT_MODE_USER) {
2528 if (type == DATA) {
f2e703f9 2529 if (temp == WARM)
0cdd3195 2530 return WRITE_LIFE_NOT_SET;
f2e703f9
HL
2531 else if (temp == HOT)
2532 return WRITE_LIFE_SHORT;
2533 else if (temp == COLD)
2534 return WRITE_LIFE_EXTREME;
0cdd3195
HL
2535 } else {
2536 return WRITE_LIFE_NOT_SET;
2537 }
f2e703f9
HL
2538 } else if (sbi->whint_mode == WHINT_MODE_FS) {
2539 if (type == DATA) {
2540 if (temp == WARM)
2541 return WRITE_LIFE_LONG;
2542 else if (temp == HOT)
2543 return WRITE_LIFE_SHORT;
2544 else if (temp == COLD)
2545 return WRITE_LIFE_EXTREME;
2546 } else if (type == NODE) {
2547 if (temp == WARM || temp == HOT)
2548 return WRITE_LIFE_NOT_SET;
2549 else if (temp == COLD)
2550 return WRITE_LIFE_NONE;
2551 } else if (type == META) {
2552 return WRITE_LIFE_MEDIUM;
2553 }
0cdd3195 2554 }
f2e703f9 2555 return WRITE_LIFE_NOT_SET;
0cdd3195
HL
2556}
2557
81377bd6 2558static int __get_segment_type_2(struct f2fs_io_info *fio)
351df4b2 2559{
81377bd6 2560 if (fio->type == DATA)
351df4b2
JK
2561 return CURSEG_HOT_DATA;
2562 else
2563 return CURSEG_HOT_NODE;
2564}
2565
81377bd6 2566static int __get_segment_type_4(struct f2fs_io_info *fio)
351df4b2 2567{
81377bd6
JK
2568 if (fio->type == DATA) {
2569 struct inode *inode = fio->page->mapping->host;
351df4b2
JK
2570
2571 if (S_ISDIR(inode->i_mode))
2572 return CURSEG_HOT_DATA;
2573 else
2574 return CURSEG_COLD_DATA;
2575 } else {
81377bd6 2576 if (IS_DNODE(fio->page) && is_cold_node(fio->page))
a344b9fd 2577 return CURSEG_WARM_NODE;
351df4b2
JK
2578 else
2579 return CURSEG_COLD_NODE;
2580 }
2581}
2582
81377bd6 2583static int __get_segment_type_6(struct f2fs_io_info *fio)
351df4b2 2584{
81377bd6
JK
2585 if (fio->type == DATA) {
2586 struct inode *inode = fio->page->mapping->host;
351df4b2 2587
81377bd6 2588 if (is_cold_data(fio->page) || file_is_cold(inode))
351df4b2 2589 return CURSEG_COLD_DATA;
b6a06cbb
CY
2590 if (file_is_hot(inode) ||
2591 is_inode_flag_set(inode, FI_HOT_DATA))
ef095d19 2592 return CURSEG_HOT_DATA;
4f0a03d3 2593 return rw_hint_to_seg_type(inode->i_write_hint);
351df4b2 2594 } else {
81377bd6
JK
2595 if (IS_DNODE(fio->page))
2596 return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
351df4b2 2597 CURSEG_HOT_NODE;
ef095d19 2598 return CURSEG_COLD_NODE;
351df4b2
JK
2599 }
2600}
2601
81377bd6 2602static int __get_segment_type(struct f2fs_io_info *fio)
351df4b2 2603{
a912b54d
JK
2604 int type = 0;
2605
81377bd6 2606 switch (fio->sbi->active_logs) {
351df4b2 2607 case 2:
a912b54d
JK
2608 type = __get_segment_type_2(fio);
2609 break;
351df4b2 2610 case 4:
a912b54d
JK
2611 type = __get_segment_type_4(fio);
2612 break;
2613 case 6:
2614 type = __get_segment_type_6(fio);
2615 break;
2616 default:
2617 f2fs_bug_on(fio->sbi, true);
351df4b2 2618 }
81377bd6 2619
a912b54d
JK
2620 if (IS_HOT(type))
2621 fio->temp = HOT;
2622 else if (IS_WARM(type))
2623 fio->temp = WARM;
2624 else
2625 fio->temp = COLD;
2626 return type;
351df4b2
JK
2627}
2628
bfad7c2d
JK
2629void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
2630 block_t old_blkaddr, block_t *new_blkaddr,
fb830fc5
CY
2631 struct f2fs_summary *sum, int type,
2632 struct f2fs_io_info *fio, bool add_list)
351df4b2
JK
2633{
2634 struct sit_info *sit_i = SIT_I(sbi);
6ae1be13 2635 struct curseg_info *curseg = CURSEG_I(sbi, type);
351df4b2 2636
2b60311d
CY
2637 down_read(&SM_I(sbi)->curseg_lock);
2638
351df4b2 2639 mutex_lock(&curseg->curseg_mutex);
3d26fa6b 2640 down_write(&sit_i->sentry_lock);
351df4b2
JK
2641
2642 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
351df4b2 2643
4e6a8d9b
JK
2644 f2fs_wait_discard_bio(sbi, *new_blkaddr);
2645
351df4b2
JK
2646 /*
2647 * __add_sum_entry should be resided under the curseg_mutex
2648 * because, this function updates a summary entry in the
2649 * current summary block.
2650 */
e79efe3b 2651 __add_sum_entry(sbi, type, sum);
351df4b2 2652
351df4b2 2653 __refresh_next_blkoff(sbi, curseg);
dcdfff65
JK
2654
2655 stat_inc_block_count(sbi, curseg);
351df4b2 2656
65f1b80b
YS
2657 /*
2658 * SIT information should be updated before segment allocation,
2659 * since SSR needs latest valid block information.
2660 */
2661 update_sit_entry(sbi, *new_blkaddr, 1);
2662 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
2663 update_sit_entry(sbi, old_blkaddr, -1);
2664
c6f82fe9
JK
2665 if (!__has_curseg_space(sbi, type))
2666 sit_i->s_ops->allocate_segment(sbi, type, false);
65f1b80b 2667
351df4b2 2668 /*
65f1b80b
YS
2669 * segment dirty status should be updated after segment allocation,
2670 * so we just need to update status only one time after previous
2671 * segment being closed.
351df4b2 2672 */
65f1b80b
YS
2673 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
2674 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
5e443818 2675
3d26fa6b 2676 up_write(&sit_i->sentry_lock);
351df4b2 2677
704956ec 2678 if (page && IS_NODESEG(type)) {
351df4b2
JK
2679 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
2680
704956ec
CY
2681 f2fs_inode_chksum_set(sbi, page);
2682 }
2683
fb830fc5
CY
2684 if (add_list) {
2685 struct f2fs_bio_info *io;
2686
2687 INIT_LIST_HEAD(&fio->list);
2688 fio->in_list = true;
2689 io = sbi->write_io[fio->type] + fio->temp;
2690 spin_lock(&io->io_lock);
2691 list_add_tail(&fio->list, &io->io_list);
2692 spin_unlock(&io->io_lock);
2693 }
2694
bfad7c2d 2695 mutex_unlock(&curseg->curseg_mutex);
2b60311d
CY
2696
2697 up_read(&SM_I(sbi)->curseg_lock);
bfad7c2d
JK
2698}
2699
39d787be
CY
2700static void update_device_state(struct f2fs_io_info *fio)
2701{
2702 struct f2fs_sb_info *sbi = fio->sbi;
2703 unsigned int devidx;
2704
2705 if (!sbi->s_ndevs)
2706 return;
2707
2708 devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
2709
2710 /* update device state for fsync */
2711 set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
1228b482
CY
2712
2713 /* update device state for checkpoint */
2714 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
2715 spin_lock(&sbi->dev_lock);
2716 f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
2717 spin_unlock(&sbi->dev_lock);
2718 }
39d787be
CY
2719}
2720
05ca3632 2721static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
bfad7c2d 2722{
81377bd6 2723 int type = __get_segment_type(fio);
0a595eba 2724 int err;
bfad7c2d 2725
0a595eba 2726reallocate:
7a9d7548 2727 allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
fb830fc5 2728 &fio->new_blkaddr, sum, type, fio, true);
bfad7c2d 2729
351df4b2 2730 /* writeout dirty page into bdev */
b9109b0e 2731 err = f2fs_submit_page_write(fio);
0a595eba
JK
2732 if (err == -EAGAIN) {
2733 fio->old_blkaddr = fio->new_blkaddr;
2734 goto reallocate;
39d787be
CY
2735 } else if (!err) {
2736 update_device_state(fio);
0a595eba 2737 }
351df4b2
JK
2738}
2739
b0af6d49
CY
2740void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
2741 enum iostat_type io_type)
351df4b2 2742{
458e6197 2743 struct f2fs_io_info fio = {
05ca3632 2744 .sbi = sbi,
458e6197 2745 .type = META,
0cdd3195 2746 .temp = HOT,
04d328de 2747 .op = REQ_OP_WRITE,
70fd7614 2748 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
7a9d7548
CY
2749 .old_blkaddr = page->index,
2750 .new_blkaddr = page->index,
05ca3632 2751 .page = page,
4375a336 2752 .encrypted_page = NULL,
fb830fc5 2753 .in_list = false,
458e6197
JK
2754 };
2755
2b947003 2756 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
04d328de 2757 fio.op_flags &= ~REQ_META;
2b947003 2758
351df4b2 2759 set_page_writeback(page);
b9109b0e 2760 f2fs_submit_page_write(&fio);
b0af6d49
CY
2761
2762 f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
351df4b2
JK
2763}
2764
05ca3632 2765void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
351df4b2
JK
2766{
2767 struct f2fs_summary sum;
05ca3632 2768
351df4b2 2769 set_summary(&sum, nid, 0, 0);
05ca3632 2770 do_write_page(&sum, fio);
b0af6d49
CY
2771
2772 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
351df4b2
JK
2773}
2774
05ca3632 2775void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
351df4b2 2776{
05ca3632 2777 struct f2fs_sb_info *sbi = fio->sbi;
351df4b2
JK
2778 struct f2fs_summary sum;
2779 struct node_info ni;
2780
9850cf4a 2781 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
351df4b2
JK
2782 get_node_info(sbi, dn->nid, &ni);
2783 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
05ca3632 2784 do_write_page(&sum, fio);
f28b3434 2785 f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
b0af6d49
CY
2786
2787 f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
351df4b2
JK
2788}
2789
d1b3e72d 2790int rewrite_data_page(struct f2fs_io_info *fio)
351df4b2 2791{
b0af6d49
CY
2792 int err;
2793
7a9d7548 2794 fio->new_blkaddr = fio->old_blkaddr;
0cdd3195
HL
2795 /* i/o temperature is needed for passing down write hints */
2796 __get_segment_type(fio);
05ca3632 2797 stat_inc_inplace_blocks(fio->sbi);
b0af6d49
CY
2798
2799 err = f2fs_submit_page_bio(fio);
39d787be
CY
2800 if (!err)
2801 update_device_state(fio);
b0af6d49
CY
2802
2803 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
2804
2805 return err;
351df4b2
JK
2806}
2807
2b60311d
CY
2808static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
2809 unsigned int segno)
2810{
2811 int i;
2812
2813 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
2814 if (CURSEG_I(sbi, i)->segno == segno)
2815 break;
2816 }
2817 return i;
2818}
2819
4356e48e 2820void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
19f106bc 2821 block_t old_blkaddr, block_t new_blkaddr,
28bc106b 2822 bool recover_curseg, bool recover_newaddr)
351df4b2
JK
2823{
2824 struct sit_info *sit_i = SIT_I(sbi);
2825 struct curseg_info *curseg;
2826 unsigned int segno, old_cursegno;
2827 struct seg_entry *se;
2828 int type;
19f106bc 2829 unsigned short old_blkoff;
351df4b2
JK
2830
2831 segno = GET_SEGNO(sbi, new_blkaddr);
2832 se = get_seg_entry(sbi, segno);
2833 type = se->type;
2834
2b60311d
CY
2835 down_write(&SM_I(sbi)->curseg_lock);
2836
19f106bc
CY
2837 if (!recover_curseg) {
2838 /* for recovery flow */
2839 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
2840 if (old_blkaddr == NULL_ADDR)
2841 type = CURSEG_COLD_DATA;
2842 else
2843 type = CURSEG_WARM_DATA;
2844 }
2845 } else {
2b60311d
CY
2846 if (IS_CURSEG(sbi, segno)) {
2847 /* se->type is volatile as SSR allocation */
2848 type = __f2fs_get_curseg(sbi, segno);
2849 f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
2850 } else {
351df4b2 2851 type = CURSEG_WARM_DATA;
2b60311d 2852 }
351df4b2 2853 }
19f106bc 2854
2c190504 2855 f2fs_bug_on(sbi, !IS_DATASEG(type));
351df4b2
JK
2856 curseg = CURSEG_I(sbi, type);
2857
2858 mutex_lock(&curseg->curseg_mutex);
3d26fa6b 2859 down_write(&sit_i->sentry_lock);
351df4b2
JK
2860
2861 old_cursegno = curseg->segno;
19f106bc 2862 old_blkoff = curseg->next_blkoff;
351df4b2
JK
2863
2864 /* change the current segment */
2865 if (segno != curseg->segno) {
2866 curseg->next_segno = segno;
025d63a4 2867 change_curseg(sbi, type);
351df4b2
JK
2868 }
2869
491c0854 2870 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
e79efe3b 2871 __add_sum_entry(sbi, type, sum);
351df4b2 2872
28bc106b 2873 if (!recover_curseg || recover_newaddr)
6e2c64ad
JK
2874 update_sit_entry(sbi, new_blkaddr, 1);
2875 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
2876 update_sit_entry(sbi, old_blkaddr, -1);
2877
2878 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
2879 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
2880
351df4b2 2881 locate_dirty_segment(sbi, old_cursegno);
351df4b2 2882
19f106bc
CY
2883 if (recover_curseg) {
2884 if (old_cursegno != curseg->segno) {
2885 curseg->next_segno = old_cursegno;
025d63a4 2886 change_curseg(sbi, type);
19f106bc
CY
2887 }
2888 curseg->next_blkoff = old_blkoff;
2889 }
2890
3d26fa6b 2891 up_write(&sit_i->sentry_lock);
351df4b2 2892 mutex_unlock(&curseg->curseg_mutex);
2b60311d 2893 up_write(&SM_I(sbi)->curseg_lock);
351df4b2
JK
2894}
2895
528e3459
CY
2896void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
2897 block_t old_addr, block_t new_addr,
28bc106b
CY
2898 unsigned char version, bool recover_curseg,
2899 bool recover_newaddr)
528e3459
CY
2900{
2901 struct f2fs_summary sum;
2902
2903 set_summary(&sum, dn->nid, dn->ofs_in_node, version);
2904
28bc106b
CY
2905 __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
2906 recover_curseg, recover_newaddr);
528e3459 2907
f28b3434 2908 f2fs_update_data_blkaddr(dn, new_addr);
528e3459
CY
2909}
2910
93dfe2ac 2911void f2fs_wait_on_page_writeback(struct page *page,
fec1d657 2912 enum page_type type, bool ordered)
93dfe2ac 2913{
93dfe2ac 2914 if (PageWriteback(page)) {
4081363f
JK
2915 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
2916
b9109b0e
JK
2917 f2fs_submit_merged_write_cond(sbi, page->mapping->host,
2918 0, page->index, type);
fec1d657
JK
2919 if (ordered)
2920 wait_on_page_writeback(page);
2921 else
2922 wait_for_stable_page(page);
93dfe2ac
JK
2923 }
2924}
2925
d4c759ee 2926void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
08b39fbd
CY
2927{
2928 struct page *cpage;
2929
5d4c0af4 2930 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
08b39fbd
CY
2931 return;
2932
08b39fbd
CY
2933 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
2934 if (cpage) {
fec1d657 2935 f2fs_wait_on_page_writeback(cpage, DATA, true);
08b39fbd
CY
2936 f2fs_put_page(cpage, 1);
2937 }
2938}
2939
c376fc0f 2940static void read_compacted_summaries(struct f2fs_sb_info *sbi)
351df4b2
JK
2941{
2942 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2943 struct curseg_info *seg_i;
2944 unsigned char *kaddr;
2945 struct page *page;
2946 block_t start;
2947 int i, j, offset;
2948
2949 start = start_sum_block(sbi);
2950
2951 page = get_meta_page(sbi, start++);
2952 kaddr = (unsigned char *)page_address(page);
2953
2954 /* Step 1: restore nat cache */
2955 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
b7ad7512 2956 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
351df4b2
JK
2957
2958 /* Step 2: restore sit cache */
2959 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
b7ad7512 2960 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
351df4b2
JK
2961 offset = 2 * SUM_JOURNAL_SIZE;
2962
2963 /* Step 3: restore summary entries */
2964 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2965 unsigned short blk_off;
2966 unsigned int segno;
2967
2968 seg_i = CURSEG_I(sbi, i);
2969 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
2970 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
2971 seg_i->next_segno = segno;
2972 reset_curseg(sbi, i, 0);
2973 seg_i->alloc_type = ckpt->alloc_type[i];
2974 seg_i->next_blkoff = blk_off;
2975
2976 if (seg_i->alloc_type == SSR)
2977 blk_off = sbi->blocks_per_seg;
2978
2979 for (j = 0; j < blk_off; j++) {
2980 struct f2fs_summary *s;
2981 s = (struct f2fs_summary *)(kaddr + offset);
2982 seg_i->sum_blk->entries[j] = *s;
2983 offset += SUMMARY_SIZE;
09cbfeaf 2984 if (offset + SUMMARY_SIZE <= PAGE_SIZE -
351df4b2
JK
2985 SUM_FOOTER_SIZE)
2986 continue;
2987
2988 f2fs_put_page(page, 1);
2989 page = NULL;
2990
2991 page = get_meta_page(sbi, start++);
2992 kaddr = (unsigned char *)page_address(page);
2993 offset = 0;
2994 }
2995 }
2996 f2fs_put_page(page, 1);
351df4b2
JK
2997}
2998
2999static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3000{
3001 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3002 struct f2fs_summary_block *sum;
3003 struct curseg_info *curseg;
3004 struct page *new;
3005 unsigned short blk_off;
3006 unsigned int segno = 0;
3007 block_t blk_addr = 0;
3008
3009 /* get segment number and block addr */
3010 if (IS_DATASEG(type)) {
3011 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3012 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3013 CURSEG_HOT_DATA]);
119ee914 3014 if (__exist_node_summaries(sbi))
351df4b2
JK
3015 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
3016 else
3017 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3018 } else {
3019 segno = le32_to_cpu(ckpt->cur_node_segno[type -
3020 CURSEG_HOT_NODE]);
3021 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3022 CURSEG_HOT_NODE]);
119ee914 3023 if (__exist_node_summaries(sbi))
351df4b2
JK
3024 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3025 type - CURSEG_HOT_NODE);
3026 else
3027 blk_addr = GET_SUM_BLOCK(sbi, segno);
3028 }
3029
3030 new = get_meta_page(sbi, blk_addr);
3031 sum = (struct f2fs_summary_block *)page_address(new);
3032
3033 if (IS_NODESEG(type)) {
119ee914 3034 if (__exist_node_summaries(sbi)) {
351df4b2
JK
3035 struct f2fs_summary *ns = &sum->entries[0];
3036 int i;
3037 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3038 ns->version = 0;
3039 ns->ofs_in_node = 0;
3040 }
3041 } else {
c376fc0f 3042 restore_node_summary(sbi, segno, sum);
351df4b2
JK
3043 }
3044 }
3045
3046 /* set uncompleted segment to curseg */
3047 curseg = CURSEG_I(sbi, type);
3048 mutex_lock(&curseg->curseg_mutex);
b7ad7512
CY
3049
3050 /* update journal info */
3051 down_write(&curseg->journal_rwsem);
3052 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3053 up_write(&curseg->journal_rwsem);
3054
3055 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3056 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
351df4b2
JK
3057 curseg->next_segno = segno;
3058 reset_curseg(sbi, type, 0);
3059 curseg->alloc_type = ckpt->alloc_type[type];
3060 curseg->next_blkoff = blk_off;
3061 mutex_unlock(&curseg->curseg_mutex);
3062 f2fs_put_page(new, 1);
3063 return 0;
3064}
3065
3066static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3067{
21d3f8e1
JQ
3068 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3069 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
351df4b2 3070 int type = CURSEG_HOT_DATA;
e4fc5fbf 3071 int err;
351df4b2 3072
aaec2b1d 3073 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3fa06d7b
CY
3074 int npages = npages_for_summary_flush(sbi, true);
3075
3076 if (npages >= 2)
3077 ra_meta_pages(sbi, start_sum_block(sbi), npages,
26879fb1 3078 META_CP, true);
3fa06d7b 3079
351df4b2 3080 /* restore for compacted data summary */
c376fc0f 3081 read_compacted_summaries(sbi);
351df4b2
JK
3082 type = CURSEG_HOT_NODE;
3083 }
3084
119ee914 3085 if (__exist_node_summaries(sbi))
3fa06d7b 3086 ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
26879fb1 3087 NR_CURSEG_TYPE - type, META_CP, true);
3fa06d7b 3088
e4fc5fbf
CY
3089 for (; type <= CURSEG_COLD_NODE; type++) {
3090 err = read_normal_summaries(sbi, type);
3091 if (err)
3092 return err;
3093 }
3094
21d3f8e1
JQ
3095 /* sanity check for summary blocks */
3096 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
3097 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
3098 return -EINVAL;
3099
351df4b2
JK
3100 return 0;
3101}
3102
3103static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
3104{
3105 struct page *page;
3106 unsigned char *kaddr;
3107 struct f2fs_summary *summary;
3108 struct curseg_info *seg_i;
3109 int written_size = 0;
3110 int i, j;
3111
3112 page = grab_meta_page(sbi, blkaddr++);
3113 kaddr = (unsigned char *)page_address(page);
3114
3115 /* Step 1: write nat cache */
3116 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
b7ad7512 3117 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
351df4b2
JK
3118 written_size += SUM_JOURNAL_SIZE;
3119
3120 /* Step 2: write sit cache */
3121 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
b7ad7512 3122 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
351df4b2
JK
3123 written_size += SUM_JOURNAL_SIZE;
3124
351df4b2
JK
3125 /* Step 3: write summary entries */
3126 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3127 unsigned short blkoff;
3128 seg_i = CURSEG_I(sbi, i);
3129 if (sbi->ckpt->alloc_type[i] == SSR)
3130 blkoff = sbi->blocks_per_seg;
3131 else
3132 blkoff = curseg_blkoff(sbi, i);
3133
3134 for (j = 0; j < blkoff; j++) {
3135 if (!page) {
3136 page = grab_meta_page(sbi, blkaddr++);
3137 kaddr = (unsigned char *)page_address(page);
3138 written_size = 0;
3139 }
3140 summary = (struct f2fs_summary *)(kaddr + written_size);
3141 *summary = seg_i->sum_blk->entries[j];
3142 written_size += SUMMARY_SIZE;
351df4b2 3143
09cbfeaf 3144 if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
351df4b2
JK
3145 SUM_FOOTER_SIZE)
3146 continue;
3147
e8d61a74 3148 set_page_dirty(page);
351df4b2
JK
3149 f2fs_put_page(page, 1);
3150 page = NULL;
3151 }
3152 }
e8d61a74
CY
3153 if (page) {
3154 set_page_dirty(page);
351df4b2 3155 f2fs_put_page(page, 1);
e8d61a74 3156 }
351df4b2
JK
3157}
3158
3159static void write_normal_summaries(struct f2fs_sb_info *sbi,
3160 block_t blkaddr, int type)
3161{
3162 int i, end;
3163 if (IS_DATASEG(type))
3164 end = type + NR_CURSEG_DATA_TYPE;
3165 else
3166 end = type + NR_CURSEG_NODE_TYPE;
3167
b7ad7512
CY
3168 for (i = type; i < end; i++)
3169 write_current_sum_page(sbi, i, blkaddr + (i - type));
351df4b2
JK
3170}
3171
3172void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3173{
aaec2b1d 3174 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
351df4b2
JK
3175 write_compacted_summaries(sbi, start_blk);
3176 else
3177 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
3178}
3179
3180void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3181{
119ee914 3182 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
351df4b2
JK
3183}
3184
dfc08a12 3185int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
351df4b2
JK
3186 unsigned int val, int alloc)
3187{
3188 int i;
3189
3190 if (type == NAT_JOURNAL) {
dfc08a12
CY
3191 for (i = 0; i < nats_in_cursum(journal); i++) {
3192 if (le32_to_cpu(nid_in_journal(journal, i)) == val)
351df4b2
JK
3193 return i;
3194 }
dfc08a12
CY
3195 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
3196 return update_nats_in_cursum(journal, 1);
351df4b2 3197 } else if (type == SIT_JOURNAL) {
dfc08a12
CY
3198 for (i = 0; i < sits_in_cursum(journal); i++)
3199 if (le32_to_cpu(segno_in_journal(journal, i)) == val)
351df4b2 3200 return i;
dfc08a12
CY
3201 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
3202 return update_sits_in_cursum(journal, 1);
351df4b2
JK
3203 }
3204 return -1;
3205}
3206
3207static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
3208 unsigned int segno)
3209{
2cc22186 3210 return get_meta_page(sbi, current_sit_addr(sbi, segno));
351df4b2
JK
3211}
3212
3213static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
3214 unsigned int start)
3215{
3216 struct sit_info *sit_i = SIT_I(sbi);
068c3cd8 3217 struct page *page;
351df4b2 3218 pgoff_t src_off, dst_off;
351df4b2
JK
3219
3220 src_off = current_sit_addr(sbi, start);
3221 dst_off = next_sit_addr(sbi, src_off);
3222
068c3cd8
YH
3223 page = grab_meta_page(sbi, dst_off);
3224 seg_info_to_sit_page(sbi, page, start);
351df4b2 3225
068c3cd8 3226 set_page_dirty(page);
351df4b2
JK
3227 set_to_next_sit(sit_i, start);
3228
068c3cd8 3229 return page;
351df4b2
JK
3230}
3231
184a5cd2
CY
3232static struct sit_entry_set *grab_sit_entry_set(void)
3233{
3234 struct sit_entry_set *ses =
80c54505 3235 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
184a5cd2
CY
3236
3237 ses->entry_cnt = 0;
3238 INIT_LIST_HEAD(&ses->set_list);
3239 return ses;
3240}
3241
3242static void release_sit_entry_set(struct sit_entry_set *ses)
3243{
3244 list_del(&ses->set_list);
3245 kmem_cache_free(sit_entry_set_slab, ses);
3246}
3247
3248static void adjust_sit_entry_set(struct sit_entry_set *ses,
3249 struct list_head *head)
3250{
3251 struct sit_entry_set *next = ses;
3252
3253 if (list_is_last(&ses->set_list, head))
3254 return;
3255
3256 list_for_each_entry_continue(next, head, set_list)
3257 if (ses->entry_cnt <= next->entry_cnt)
3258 break;
3259
3260 list_move_tail(&ses->set_list, &next->set_list);
3261}
3262
3263static void add_sit_entry(unsigned int segno, struct list_head *head)
3264{
3265 struct sit_entry_set *ses;
3266 unsigned int start_segno = START_SEGNO(segno);
3267
3268 list_for_each_entry(ses, head, set_list) {
3269 if (ses->start_segno == start_segno) {
3270 ses->entry_cnt++;
3271 adjust_sit_entry_set(ses, head);
3272 return;
3273 }
3274 }
3275
3276 ses = grab_sit_entry_set();
3277
3278 ses->start_segno = start_segno;
3279 ses->entry_cnt++;
3280 list_add(&ses->set_list, head);
3281}
3282
3283static void add_sits_in_set(struct f2fs_sb_info *sbi)
3284{
3285 struct f2fs_sm_info *sm_info = SM_I(sbi);
3286 struct list_head *set_list = &sm_info->sit_entry_set;
3287 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
184a5cd2
CY
3288 unsigned int segno;
3289
7cd8558b 3290 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
184a5cd2
CY
3291 add_sit_entry(segno, set_list);
3292}
3293
3294static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
351df4b2
JK
3295{
3296 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
b7ad7512 3297 struct f2fs_journal *journal = curseg->journal;
351df4b2
JK
3298 int i;
3299
b7ad7512 3300 down_write(&curseg->journal_rwsem);
dfc08a12 3301 for (i = 0; i < sits_in_cursum(journal); i++) {
184a5cd2
CY
3302 unsigned int segno;
3303 bool dirtied;
3304
dfc08a12 3305 segno = le32_to_cpu(segno_in_journal(journal, i));
184a5cd2
CY
3306 dirtied = __mark_sit_entry_dirty(sbi, segno);
3307
3308 if (!dirtied)
3309 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
351df4b2 3310 }
dfc08a12 3311 update_sits_in_cursum(journal, -i);
b7ad7512 3312 up_write(&curseg->journal_rwsem);
351df4b2
JK
3313}
3314
0a8165d7 3315/*
351df4b2
JK
3316 * CP calls this function, which flushes SIT entries including sit_journal,
3317 * and moves prefree segs to free segs.
3318 */
4b2fecc8 3319void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
351df4b2
JK
3320{
3321 struct sit_info *sit_i = SIT_I(sbi);
3322 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
3323 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
b7ad7512 3324 struct f2fs_journal *journal = curseg->journal;
184a5cd2
CY
3325 struct sit_entry_set *ses, *tmp;
3326 struct list_head *head = &SM_I(sbi)->sit_entry_set;
184a5cd2 3327 bool to_journal = true;
4b2fecc8 3328 struct seg_entry *se;
351df4b2 3329
3d26fa6b 3330 down_write(&sit_i->sentry_lock);
351df4b2 3331
2b11a74b
WL
3332 if (!sit_i->dirty_sentries)
3333 goto out;
3334
351df4b2 3335 /*
184a5cd2
CY
3336 * add and account sit entries of dirty bitmap in sit entry
3337 * set temporarily
351df4b2 3338 */
184a5cd2 3339 add_sits_in_set(sbi);
351df4b2 3340
184a5cd2
CY
3341 /*
3342 * if there are no enough space in journal to store dirty sit
3343 * entries, remove all entries from journal and add and account
3344 * them in sit entry set.
3345 */
dfc08a12 3346 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
184a5cd2 3347 remove_sits_in_journal(sbi);
b2955550 3348
184a5cd2
CY
3349 /*
3350 * there are two steps to flush sit entries:
3351 * #1, flush sit entries to journal in current cold data summary block.
3352 * #2, flush sit entries to sit page.
3353 */
3354 list_for_each_entry_safe(ses, tmp, head, set_list) {
4a257ed6 3355 struct page *page = NULL;
184a5cd2
CY
3356 struct f2fs_sit_block *raw_sit = NULL;
3357 unsigned int start_segno = ses->start_segno;
3358 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
7cd8558b 3359 (unsigned long)MAIN_SEGS(sbi));
184a5cd2
CY
3360 unsigned int segno = start_segno;
3361
3362 if (to_journal &&
dfc08a12 3363 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
184a5cd2
CY
3364 to_journal = false;
3365
b7ad7512
CY
3366 if (to_journal) {
3367 down_write(&curseg->journal_rwsem);
3368 } else {
184a5cd2
CY
3369 page = get_next_sit_page(sbi, start_segno);
3370 raw_sit = page_address(page);
351df4b2 3371 }
351df4b2 3372
184a5cd2
CY
3373 /* flush dirty sit entries in region of current sit set */
3374 for_each_set_bit_from(segno, bitmap, end) {
3375 int offset, sit_offset;
4b2fecc8
JK
3376
3377 se = get_seg_entry(sbi, segno);
184a5cd2
CY
3378
3379 /* add discard candidates */
c473f1a9 3380 if (!(cpc->reason & CP_DISCARD)) {
4b2fecc8 3381 cpc->trim_start = segno;
25290fa5 3382 add_discard_addrs(sbi, cpc, false);
4b2fecc8 3383 }
184a5cd2
CY
3384
3385 if (to_journal) {
dfc08a12 3386 offset = lookup_journal_in_cursum(journal,
184a5cd2
CY
3387 SIT_JOURNAL, segno, 1);
3388 f2fs_bug_on(sbi, offset < 0);
dfc08a12 3389 segno_in_journal(journal, offset) =
184a5cd2
CY
3390 cpu_to_le32(segno);
3391 seg_info_to_raw_sit(se,
dfc08a12 3392 &sit_in_journal(journal, offset));
184a5cd2
CY
3393 } else {
3394 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
3395 seg_info_to_raw_sit(se,
3396 &raw_sit->entries[sit_offset]);
3397 }
351df4b2 3398
184a5cd2
CY
3399 __clear_bit(segno, bitmap);
3400 sit_i->dirty_sentries--;
3401 ses->entry_cnt--;
351df4b2
JK
3402 }
3403
b7ad7512
CY
3404 if (to_journal)
3405 up_write(&curseg->journal_rwsem);
3406 else
184a5cd2
CY
3407 f2fs_put_page(page, 1);
3408
3409 f2fs_bug_on(sbi, ses->entry_cnt);
3410 release_sit_entry_set(ses);
351df4b2 3411 }
184a5cd2
CY
3412
3413 f2fs_bug_on(sbi, !list_empty(head));
3414 f2fs_bug_on(sbi, sit_i->dirty_sentries);
184a5cd2 3415out:
c473f1a9 3416 if (cpc->reason & CP_DISCARD) {
650d3c4e
YH
3417 __u64 trim_start = cpc->trim_start;
3418
4b2fecc8 3419 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
25290fa5 3420 add_discard_addrs(sbi, cpc, false);
650d3c4e
YH
3421
3422 cpc->trim_start = trim_start;
4b2fecc8 3423 }
3d26fa6b 3424 up_write(&sit_i->sentry_lock);
351df4b2 3425
351df4b2
JK
3426 set_prefree_as_free_segments(sbi);
3427}
3428
3429static int build_sit_info(struct f2fs_sb_info *sbi)
3430{
3431 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
351df4b2
JK
3432 struct sit_info *sit_i;
3433 unsigned int sit_segs, start;
ae27d62e 3434 char *src_bitmap;
351df4b2
JK
3435 unsigned int bitmap_size;
3436
3437 /* allocate memory for SIT information */
acbf054d 3438 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
351df4b2
JK
3439 if (!sit_i)
3440 return -ENOMEM;
3441
3442 SM_I(sbi)->sit_info = sit_i;
3443
628b3d14 3444 sit_i->sentries = f2fs_kvzalloc(sbi, MAIN_SEGS(sbi) *
39307a8e 3445 sizeof(struct seg_entry), GFP_KERNEL);
351df4b2
JK
3446 if (!sit_i->sentries)
3447 return -ENOMEM;
3448
7cd8558b 3449 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
628b3d14
CY
3450 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, bitmap_size,
3451 GFP_KERNEL);
351df4b2
JK
3452 if (!sit_i->dirty_sentries_bitmap)
3453 return -ENOMEM;
3454
7cd8558b 3455 for (start = 0; start < MAIN_SEGS(sbi); start++) {
351df4b2 3456 sit_i->sentries[start].cur_valid_map
acbf054d 3457 = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
351df4b2 3458 sit_i->sentries[start].ckpt_valid_map
acbf054d 3459 = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
a66cdd98 3460 if (!sit_i->sentries[start].cur_valid_map ||
3e025740 3461 !sit_i->sentries[start].ckpt_valid_map)
351df4b2 3462 return -ENOMEM;
3e025740 3463
355e7891
CY
3464#ifdef CONFIG_F2FS_CHECK_FS
3465 sit_i->sentries[start].cur_valid_map_mir
acbf054d 3466 = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
355e7891
CY
3467 if (!sit_i->sentries[start].cur_valid_map_mir)
3468 return -ENOMEM;
3469#endif
3470
3e025740
JK
3471 if (f2fs_discard_en(sbi)) {
3472 sit_i->sentries[start].discard_map
acbf054d
CY
3473 = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
3474 GFP_KERNEL);
3e025740
JK
3475 if (!sit_i->sentries[start].discard_map)
3476 return -ENOMEM;
3477 }
351df4b2
JK
3478 }
3479
acbf054d 3480 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
60a3b782
JK
3481 if (!sit_i->tmp_map)
3482 return -ENOMEM;
3483
351df4b2 3484 if (sbi->segs_per_sec > 1) {
628b3d14 3485 sit_i->sec_entries = f2fs_kvzalloc(sbi, MAIN_SECS(sbi) *
39307a8e 3486 sizeof(struct sec_entry), GFP_KERNEL);
351df4b2
JK
3487 if (!sit_i->sec_entries)
3488 return -ENOMEM;
3489 }
3490
3491 /* get information related with SIT */
3492 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
3493
3494 /* setup SIT bitmap from ckeckpoint pack */
3495 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
3496 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
3497
ae27d62e
CY
3498 sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3499 if (!sit_i->sit_bitmap)
351df4b2 3500 return -ENOMEM;
351df4b2 3501
ae27d62e
CY
3502#ifdef CONFIG_F2FS_CHECK_FS
3503 sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3504 if (!sit_i->sit_bitmap_mir)
3505 return -ENOMEM;
3506#endif
3507
351df4b2
JK
3508 /* init SIT information */
3509 sit_i->s_ops = &default_salloc_ops;
3510
3511 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
3512 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
c79b7ff1 3513 sit_i->written_valid_blocks = 0;
351df4b2
JK
3514 sit_i->bitmap_size = bitmap_size;
3515 sit_i->dirty_sentries = 0;
3516 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
3517 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
48fbfe50 3518 sit_i->mounted_time = ktime_get_real_seconds();
3d26fa6b 3519 init_rwsem(&sit_i->sentry_lock);
351df4b2
JK
3520 return 0;
3521}
3522
3523static int build_free_segmap(struct f2fs_sb_info *sbi)
3524{
351df4b2
JK
3525 struct free_segmap_info *free_i;
3526 unsigned int bitmap_size, sec_bitmap_size;
3527
3528 /* allocate memory for free segmap information */
acbf054d 3529 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
351df4b2
JK
3530 if (!free_i)
3531 return -ENOMEM;
3532
3533 SM_I(sbi)->free_info = free_i;
3534
7cd8558b 3535 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
628b3d14 3536 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
351df4b2
JK
3537 if (!free_i->free_segmap)
3538 return -ENOMEM;
3539
7cd8558b 3540 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
628b3d14 3541 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
351df4b2
JK
3542 if (!free_i->free_secmap)
3543 return -ENOMEM;
3544
3545 /* set all segments as dirty temporarily */
3546 memset(free_i->free_segmap, 0xff, bitmap_size);
3547 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
3548
3549 /* init free segmap information */
7cd8558b 3550 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
351df4b2
JK
3551 free_i->free_segments = 0;
3552 free_i->free_sections = 0;
1a118ccf 3553 spin_lock_init(&free_i->segmap_lock);
351df4b2
JK
3554 return 0;
3555}
3556
3557static int build_curseg(struct f2fs_sb_info *sbi)
3558{
1042d60f 3559 struct curseg_info *array;
351df4b2
JK
3560 int i;
3561
4e6aad29 3562 array = f2fs_kzalloc(sbi, sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
351df4b2
JK
3563 if (!array)
3564 return -ENOMEM;
3565
3566 SM_I(sbi)->curseg_array = array;
3567
3568 for (i = 0; i < NR_CURSEG_TYPE; i++) {
3569 mutex_init(&array[i].curseg_mutex);
acbf054d 3570 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
351df4b2
JK
3571 if (!array[i].sum_blk)
3572 return -ENOMEM;
b7ad7512 3573 init_rwsem(&array[i].journal_rwsem);
acbf054d
CY
3574 array[i].journal = f2fs_kzalloc(sbi,
3575 sizeof(struct f2fs_journal), GFP_KERNEL);
b7ad7512
CY
3576 if (!array[i].journal)
3577 return -ENOMEM;
351df4b2
JK
3578 array[i].segno = NULL_SEGNO;
3579 array[i].next_blkoff = 0;
3580 }
3581 return restore_curseg_summaries(sbi);
3582}
3583
c39a1b34 3584static int build_sit_entries(struct f2fs_sb_info *sbi)
351df4b2
JK
3585{
3586 struct sit_info *sit_i = SIT_I(sbi);
3587 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
b7ad7512 3588 struct f2fs_journal *journal = curseg->journal;
9c094040
YH
3589 struct seg_entry *se;
3590 struct f2fs_sit_entry sit;
74de593a
CY
3591 int sit_blk_cnt = SIT_BLK_CNT(sbi);
3592 unsigned int i, start, end;
3593 unsigned int readed, start_blk = 0;
c39a1b34 3594 int err = 0;
351df4b2 3595
74de593a 3596 do {
664ba972
JK
3597 readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
3598 META_SIT, true);
74de593a
CY
3599
3600 start = start_blk * sit_i->sents_per_block;
3601 end = (start_blk + readed) * sit_i->sents_per_block;
3602
7cd8558b 3603 for (; start < end && start < MAIN_SEGS(sbi); start++) {
74de593a 3604 struct f2fs_sit_block *sit_blk;
74de593a
CY
3605 struct page *page;
3606
9c094040 3607 se = &sit_i->sentries[start];
74de593a
CY
3608 page = get_current_sit_page(sbi, start);
3609 sit_blk = (struct f2fs_sit_block *)page_address(page);
3610 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
3611 f2fs_put_page(page, 1);
d600af23 3612
c39a1b34
JK
3613 err = check_block_count(sbi, start, &sit);
3614 if (err)
3615 return err;
74de593a 3616 seg_info_from_raw_sit(se, &sit);
a66cdd98
JK
3617
3618 /* build discard map only one time */
3e025740 3619 if (f2fs_discard_en(sbi)) {
1f43e2ad
CY
3620 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3621 memset(se->discard_map, 0xff,
3622 SIT_VBLOCK_MAP_SIZE);
3623 } else {
3624 memcpy(se->discard_map,
3625 se->cur_valid_map,
3626 SIT_VBLOCK_MAP_SIZE);
3627 sbi->discard_blks +=
3628 sbi->blocks_per_seg -
3629 se->valid_blocks;
3630 }
3e025740 3631 }
a66cdd98 3632
d600af23
CY
3633 if (sbi->segs_per_sec > 1)
3634 get_sec_entry(sbi, start)->valid_blocks +=
3635 se->valid_blocks;
351df4b2 3636 }
74de593a
CY
3637 start_blk += readed;
3638 } while (start_blk < sit_blk_cnt);
d600af23
CY
3639
3640 down_read(&curseg->journal_rwsem);
3641 for (i = 0; i < sits_in_cursum(journal); i++) {
d600af23
CY
3642 unsigned int old_valid_blocks;
3643
3644 start = le32_to_cpu(segno_in_journal(journal, i));
3645 se = &sit_i->sentries[start];
3646 sit = sit_in_journal(journal, i);
3647
3648 old_valid_blocks = se->valid_blocks;
3649
c39a1b34
JK
3650 err = check_block_count(sbi, start, &sit);
3651 if (err)
3652 break;
d600af23
CY
3653 seg_info_from_raw_sit(se, &sit);
3654
3655 if (f2fs_discard_en(sbi)) {
1f43e2ad
CY
3656 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3657 memset(se->discard_map, 0xff,
3658 SIT_VBLOCK_MAP_SIZE);
3659 } else {
3660 memcpy(se->discard_map, se->cur_valid_map,
3661 SIT_VBLOCK_MAP_SIZE);
3662 sbi->discard_blks += old_valid_blocks -
3663 se->valid_blocks;
3664 }
d600af23
CY
3665 }
3666
3667 if (sbi->segs_per_sec > 1)
3668 get_sec_entry(sbi, start)->valid_blocks +=
3669 se->valid_blocks - old_valid_blocks;
3670 }
3671 up_read(&curseg->journal_rwsem);
c39a1b34 3672 return err;
351df4b2
JK
3673}
3674
3675static void init_free_segmap(struct f2fs_sb_info *sbi)
3676{
3677 unsigned int start;
3678 int type;
3679
7cd8558b 3680 for (start = 0; start < MAIN_SEGS(sbi); start++) {
351df4b2
JK
3681 struct seg_entry *sentry = get_seg_entry(sbi, start);
3682 if (!sentry->valid_blocks)
3683 __set_free(sbi, start);
c79b7ff1
JK
3684 else
3685 SIT_I(sbi)->written_valid_blocks +=
3686 sentry->valid_blocks;
351df4b2
JK
3687 }
3688
3689 /* set use the current segments */
3690 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
3691 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
3692 __set_test_and_inuse(sbi, curseg_t->segno);
3693 }
3694}
3695
3696static void init_dirty_segmap(struct f2fs_sb_info *sbi)
3697{
3698 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3699 struct free_segmap_info *free_i = FREE_I(sbi);
7cd8558b 3700 unsigned int segno = 0, offset = 0;
351df4b2
JK
3701 unsigned short valid_blocks;
3702
8736fbf0 3703 while (1) {
351df4b2 3704 /* find dirty segment based on free segmap */
7cd8558b
JK
3705 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
3706 if (segno >= MAIN_SEGS(sbi))
351df4b2
JK
3707 break;
3708 offset = segno + 1;
302bd348 3709 valid_blocks = get_valid_blocks(sbi, segno, false);
ec325b52 3710 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
351df4b2 3711 continue;
ec325b52
JK
3712 if (valid_blocks > sbi->blocks_per_seg) {
3713 f2fs_bug_on(sbi, 1);
3714 continue;
3715 }
351df4b2
JK
3716 mutex_lock(&dirty_i->seglist_lock);
3717 __locate_dirty_segment(sbi, segno, DIRTY);
3718 mutex_unlock(&dirty_i->seglist_lock);
3719 }
3720}
3721
5ec4e49f 3722static int init_victim_secmap(struct f2fs_sb_info *sbi)
351df4b2
JK
3723{
3724 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
7cd8558b 3725 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
351df4b2 3726
628b3d14 3727 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
5ec4e49f 3728 if (!dirty_i->victim_secmap)
351df4b2
JK
3729 return -ENOMEM;
3730 return 0;
3731}
3732
3733static int build_dirty_segmap(struct f2fs_sb_info *sbi)
3734{
3735 struct dirty_seglist_info *dirty_i;
3736 unsigned int bitmap_size, i;
3737
3738 /* allocate memory for dirty segments list information */
acbf054d
CY
3739 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
3740 GFP_KERNEL);
351df4b2
JK
3741 if (!dirty_i)
3742 return -ENOMEM;
3743
3744 SM_I(sbi)->dirty_info = dirty_i;
3745 mutex_init(&dirty_i->seglist_lock);
3746
7cd8558b 3747 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
351df4b2
JK
3748
3749 for (i = 0; i < NR_DIRTY_TYPE; i++) {
628b3d14
CY
3750 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
3751 GFP_KERNEL);
351df4b2
JK
3752 if (!dirty_i->dirty_segmap[i])
3753 return -ENOMEM;
3754 }
3755
3756 init_dirty_segmap(sbi);
5ec4e49f 3757 return init_victim_secmap(sbi);
351df4b2
JK
3758}
3759
0a8165d7 3760/*
351df4b2
JK
3761 * Update min, max modified time for cost-benefit GC algorithm
3762 */
3763static void init_min_max_mtime(struct f2fs_sb_info *sbi)
3764{
3765 struct sit_info *sit_i = SIT_I(sbi);
3766 unsigned int segno;
3767
3d26fa6b 3768 down_write(&sit_i->sentry_lock);
351df4b2
JK
3769
3770 sit_i->min_mtime = LLONG_MAX;
3771
7cd8558b 3772 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
351df4b2
JK
3773 unsigned int i;
3774 unsigned long long mtime = 0;
3775
3776 for (i = 0; i < sbi->segs_per_sec; i++)
3777 mtime += get_seg_entry(sbi, segno + i)->mtime;
3778
3779 mtime = div_u64(mtime, sbi->segs_per_sec);
3780
3781 if (sit_i->min_mtime > mtime)
3782 sit_i->min_mtime = mtime;
3783 }
3784 sit_i->max_mtime = get_mtime(sbi);
3d26fa6b 3785 up_write(&sit_i->sentry_lock);
351df4b2
JK
3786}
3787
3788int build_segment_manager(struct f2fs_sb_info *sbi)
3789{
3790 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3791 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1042d60f 3792 struct f2fs_sm_info *sm_info;
351df4b2
JK
3793 int err;
3794
acbf054d 3795 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
351df4b2
JK
3796 if (!sm_info)
3797 return -ENOMEM;
3798
3799 /* init sm info */
3800 sbi->sm_info = sm_info;
351df4b2
JK
3801 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3802 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3803 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
3804 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3805 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3806 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
3807 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
58c41035
JK
3808 sm_info->rec_prefree_segments = sm_info->main_segments *
3809 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
44a83499
JK
3810 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
3811 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
3812
52763a4b
JK
3813 if (!test_opt(sbi, LFS))
3814 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
216fbd64 3815 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
c1ce1b02 3816 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
ef095d19 3817 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
a2a12b67 3818 sm_info->min_ssr_sections = reserved_sections(sbi);
351df4b2 3819
bba681cb
JK
3820 sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
3821
184a5cd2
CY
3822 INIT_LIST_HEAD(&sm_info->sit_entry_set);
3823
2b60311d
CY
3824 init_rwsem(&sm_info->curseg_lock);
3825
d4fdf8ba 3826 if (!f2fs_readonly(sbi->sb)) {
2163d198
GZ
3827 err = create_flush_cmd_control(sbi);
3828 if (err)
a688b9d9 3829 return err;
6b4afdd7
JK
3830 }
3831
0b54fb84
JK
3832 err = create_discard_cmd_control(sbi);
3833 if (err)
3834 return err;
3835
351df4b2
JK
3836 err = build_sit_info(sbi);
3837 if (err)
3838 return err;
3839 err = build_free_segmap(sbi);
3840 if (err)
3841 return err;
3842 err = build_curseg(sbi);
3843 if (err)
3844 return err;
3845
3846 /* reinit free segmap based on SIT */
c39a1b34
JK
3847 err = build_sit_entries(sbi);
3848 if (err)
3849 return err;
351df4b2
JK
3850
3851 init_free_segmap(sbi);
3852 err = build_dirty_segmap(sbi);
3853 if (err)
3854 return err;
3855
3856 init_min_max_mtime(sbi);
3857 return 0;
3858}
3859
3860static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
3861 enum dirty_type dirty_type)
3862{
3863 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3864
3865 mutex_lock(&dirty_i->seglist_lock);
39307a8e 3866 kvfree(dirty_i->dirty_segmap[dirty_type]);
351df4b2
JK
3867 dirty_i->nr_dirty[dirty_type] = 0;
3868 mutex_unlock(&dirty_i->seglist_lock);
3869}
3870
5ec4e49f 3871static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
351df4b2
JK
3872{
3873 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
39307a8e 3874 kvfree(dirty_i->victim_secmap);
351df4b2
JK
3875}
3876
3877static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
3878{
3879 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3880 int i;
3881
3882 if (!dirty_i)
3883 return;
3884
3885 /* discard pre-free/dirty segments list */
3886 for (i = 0; i < NR_DIRTY_TYPE; i++)
3887 discard_dirty_segmap(sbi, i);
3888
5ec4e49f 3889 destroy_victim_secmap(sbi);
351df4b2
JK
3890 SM_I(sbi)->dirty_info = NULL;
3891 kfree(dirty_i);
3892}
3893
3894static void destroy_curseg(struct f2fs_sb_info *sbi)
3895{
3896 struct curseg_info *array = SM_I(sbi)->curseg_array;
3897 int i;
3898
3899 if (!array)
3900 return;
3901 SM_I(sbi)->curseg_array = NULL;
b7ad7512 3902 for (i = 0; i < NR_CURSEG_TYPE; i++) {
351df4b2 3903 kfree(array[i].sum_blk);
b7ad7512
CY
3904 kfree(array[i].journal);
3905 }
351df4b2
JK
3906 kfree(array);
3907}
3908
3909static void destroy_free_segmap(struct f2fs_sb_info *sbi)
3910{
3911 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
3912 if (!free_i)
3913 return;
3914 SM_I(sbi)->free_info = NULL;
39307a8e
JK
3915 kvfree(free_i->free_segmap);
3916 kvfree(free_i->free_secmap);
351df4b2
JK
3917 kfree(free_i);
3918}
3919
3920static void destroy_sit_info(struct f2fs_sb_info *sbi)
3921{
3922 struct sit_info *sit_i = SIT_I(sbi);
3923 unsigned int start;
3924
3925 if (!sit_i)
3926 return;
3927
3928 if (sit_i->sentries) {
7cd8558b 3929 for (start = 0; start < MAIN_SEGS(sbi); start++) {
351df4b2 3930 kfree(sit_i->sentries[start].cur_valid_map);
355e7891
CY
3931#ifdef CONFIG_F2FS_CHECK_FS
3932 kfree(sit_i->sentries[start].cur_valid_map_mir);
3933#endif
351df4b2 3934 kfree(sit_i->sentries[start].ckpt_valid_map);
a66cdd98 3935 kfree(sit_i->sentries[start].discard_map);
351df4b2
JK
3936 }
3937 }
60a3b782
JK
3938 kfree(sit_i->tmp_map);
3939
39307a8e
JK
3940 kvfree(sit_i->sentries);
3941 kvfree(sit_i->sec_entries);
3942 kvfree(sit_i->dirty_sentries_bitmap);
351df4b2
JK
3943
3944 SM_I(sbi)->sit_info = NULL;
3945 kfree(sit_i->sit_bitmap);
ae27d62e
CY
3946#ifdef CONFIG_F2FS_CHECK_FS
3947 kfree(sit_i->sit_bitmap_mir);
3948#endif
351df4b2
JK
3949 kfree(sit_i);
3950}
3951
3952void destroy_segment_manager(struct f2fs_sb_info *sbi)
3953{
3954 struct f2fs_sm_info *sm_info = SM_I(sbi);
a688b9d9 3955
3b03f724
CY
3956 if (!sm_info)
3957 return;
5eba8c5d 3958 destroy_flush_cmd_control(sbi, true);
f099405f 3959 destroy_discard_cmd_control(sbi);
351df4b2
JK
3960 destroy_dirty_segmap(sbi);
3961 destroy_curseg(sbi);
3962 destroy_free_segmap(sbi);
3963 destroy_sit_info(sbi);
3964 sbi->sm_info = NULL;
3965 kfree(sm_info);
3966}
7fd9e544
JK
3967
3968int __init create_segment_manager_caches(void)
3969{
3970 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
e8512d2e 3971 sizeof(struct discard_entry));
7fd9e544 3972 if (!discard_entry_slab)
184a5cd2
CY
3973 goto fail;
3974
b01a9201
JK
3975 discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
3976 sizeof(struct discard_cmd));
3977 if (!discard_cmd_slab)
6ab2a308 3978 goto destroy_discard_entry;
275b66b0 3979
184a5cd2 3980 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
c9ee0085 3981 sizeof(struct sit_entry_set));
184a5cd2 3982 if (!sit_entry_set_slab)
b01a9201 3983 goto destroy_discard_cmd;
88b88a66
JK
3984
3985 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
3986 sizeof(struct inmem_pages));
3987 if (!inmem_entry_slab)
3988 goto destroy_sit_entry_set;
7fd9e544 3989 return 0;
184a5cd2 3990
88b88a66
JK
3991destroy_sit_entry_set:
3992 kmem_cache_destroy(sit_entry_set_slab);
b01a9201
JK
3993destroy_discard_cmd:
3994 kmem_cache_destroy(discard_cmd_slab);
6ab2a308 3995destroy_discard_entry:
184a5cd2
CY
3996 kmem_cache_destroy(discard_entry_slab);
3997fail:
3998 return -ENOMEM;
7fd9e544
JK
3999}
4000
4001void destroy_segment_manager_caches(void)
4002{
184a5cd2 4003 kmem_cache_destroy(sit_entry_set_slab);
b01a9201 4004 kmem_cache_destroy(discard_cmd_slab);
7fd9e544 4005 kmem_cache_destroy(discard_entry_slab);
88b88a66 4006 kmem_cache_destroy(inmem_entry_slab);
7fd9e544 4007}