4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/kthread.h>
17 #include <linux/swap.h>
18 #include <linux/timer.h>
19 #include <linux/freezer.h>
20 #include <linux/sched/signal.h>
27 #include <trace/events/f2fs.h>
29 #define __reverse_ffz(x) __reverse_ffs(~(x))
31 static struct kmem_cache *discard_entry_slab;
32 static struct kmem_cache *discard_cmd_slab;
33 static struct kmem_cache *sit_entry_set_slab;
34 static struct kmem_cache *inmem_entry_slab;
36 static unsigned long __reverse_ulong(unsigned char *str)
38 unsigned long tmp = 0;
39 int shift = 24, idx = 0;
41 #if BITS_PER_LONG == 64
45 tmp |= (unsigned long)str[idx++] << shift;
46 shift -= BITS_PER_BYTE;
52 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
53 * MSB and LSB are reversed in a byte by f2fs_set_bit.
55 static inline unsigned long __reverse_ffs(unsigned long word)
59 #if BITS_PER_LONG == 64
60 if ((word & 0xffffffff00000000UL) == 0)
65 if ((word & 0xffff0000) == 0)
70 if ((word & 0xff00) == 0)
75 if ((word & 0xf0) == 0)
80 if ((word & 0xc) == 0)
85 if ((word & 0x2) == 0)
91 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
92 * f2fs_set_bit makes MSB and LSB reversed in a byte.
93 * @size must be integral times of unsigned long.
96 * f2fs_set_bit(0, bitmap) => 1000 0000
97 * f2fs_set_bit(7, bitmap) => 0000 0001
99 static unsigned long __find_rev_next_bit(const unsigned long *addr,
100 unsigned long size, unsigned long offset)
102 const unsigned long *p = addr + BIT_WORD(offset);
103 unsigned long result = size;
109 size -= (offset & ~(BITS_PER_LONG - 1));
110 offset %= BITS_PER_LONG;
116 tmp = __reverse_ulong((unsigned char *)p);
118 tmp &= ~0UL >> offset;
119 if (size < BITS_PER_LONG)
120 tmp &= (~0UL << (BITS_PER_LONG - size));
124 if (size <= BITS_PER_LONG)
126 size -= BITS_PER_LONG;
132 return result - size + __reverse_ffs(tmp);
135 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
136 unsigned long size, unsigned long offset)
138 const unsigned long *p = addr + BIT_WORD(offset);
139 unsigned long result = size;
145 size -= (offset & ~(BITS_PER_LONG - 1));
146 offset %= BITS_PER_LONG;
152 tmp = __reverse_ulong((unsigned char *)p);
155 tmp |= ~0UL << (BITS_PER_LONG - offset);
156 if (size < BITS_PER_LONG)
161 if (size <= BITS_PER_LONG)
163 size -= BITS_PER_LONG;
169 return result - size + __reverse_ffz(tmp);
172 bool need_SSR(struct f2fs_sb_info *sbi)
174 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
175 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
176 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
178 if (test_opt(sbi, LFS))
180 if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
183 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
184 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
187 void register_inmem_page(struct inode *inode, struct page *page)
189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
190 struct f2fs_inode_info *fi = F2FS_I(inode);
191 struct inmem_pages *new;
193 f2fs_trace_pid(page);
195 set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
196 SetPagePrivate(page);
198 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
200 /* add atomic page indices to the list */
202 INIT_LIST_HEAD(&new->list);
204 /* increase reference count with clean state */
205 mutex_lock(&fi->inmem_lock);
207 list_add_tail(&new->list, &fi->inmem_pages);
208 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
209 if (list_empty(&fi->inmem_ilist))
210 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
211 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
212 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
213 mutex_unlock(&fi->inmem_lock);
215 trace_f2fs_register_inmem_page(page, INMEM);
218 static int __revoke_inmem_pages(struct inode *inode,
219 struct list_head *head, bool drop, bool recover)
221 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
222 struct inmem_pages *cur, *tmp;
225 list_for_each_entry_safe(cur, tmp, head, list) {
226 struct page *page = cur->page;
229 trace_f2fs_commit_inmem_page(page, INMEM_DROP);
234 struct dnode_of_data dn;
237 trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
239 set_new_dnode(&dn, inode, NULL, NULL, 0);
240 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
242 if (err == -ENOMEM) {
243 congestion_wait(BLK_RW_ASYNC, HZ/50);
250 get_node_info(sbi, dn.nid, &ni);
251 if (cur->old_addr == NEW_ADDR) {
252 invalidate_blocks(sbi, dn.data_blkaddr);
253 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
255 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
256 cur->old_addr, ni.version, true, true);
260 /* we don't need to invalidate this in the sccessful status */
262 ClearPageUptodate(page);
263 set_page_private(page, 0);
264 ClearPagePrivate(page);
265 f2fs_put_page(page, 1);
267 list_del(&cur->list);
268 kmem_cache_free(inmem_entry_slab, cur);
269 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
274 void drop_inmem_pages_all(struct f2fs_sb_info *sbi)
276 struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
278 struct f2fs_inode_info *fi;
280 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
281 if (list_empty(head)) {
282 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
285 fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
286 inode = igrab(&fi->vfs_inode);
287 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
290 drop_inmem_pages(inode);
293 congestion_wait(BLK_RW_ASYNC, HZ/50);
298 void drop_inmem_pages(struct inode *inode)
300 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
301 struct f2fs_inode_info *fi = F2FS_I(inode);
303 mutex_lock(&fi->inmem_lock);
304 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
305 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
306 if (!list_empty(&fi->inmem_ilist))
307 list_del_init(&fi->inmem_ilist);
308 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
309 mutex_unlock(&fi->inmem_lock);
311 clear_inode_flag(inode, FI_ATOMIC_FILE);
312 clear_inode_flag(inode, FI_HOT_DATA);
313 stat_dec_atomic_write(inode);
316 void drop_inmem_page(struct inode *inode, struct page *page)
318 struct f2fs_inode_info *fi = F2FS_I(inode);
319 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
320 struct list_head *head = &fi->inmem_pages;
321 struct inmem_pages *cur = NULL;
323 f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
325 mutex_lock(&fi->inmem_lock);
326 list_for_each_entry(cur, head, list) {
327 if (cur->page == page)
331 f2fs_bug_on(sbi, !cur || cur->page != page);
332 list_del(&cur->list);
333 mutex_unlock(&fi->inmem_lock);
335 dec_page_count(sbi, F2FS_INMEM_PAGES);
336 kmem_cache_free(inmem_entry_slab, cur);
338 ClearPageUptodate(page);
339 set_page_private(page, 0);
340 ClearPagePrivate(page);
341 f2fs_put_page(page, 0);
343 trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
346 static int __commit_inmem_pages(struct inode *inode,
347 struct list_head *revoke_list)
349 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
350 struct f2fs_inode_info *fi = F2FS_I(inode);
351 struct inmem_pages *cur, *tmp;
352 struct f2fs_io_info fio = {
357 .op_flags = REQ_SYNC | REQ_PRIO,
358 .io_type = FS_DATA_IO,
360 pgoff_t last_idx = ULONG_MAX;
363 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
364 struct page *page = cur->page;
367 if (page->mapping == inode->i_mapping) {
368 trace_f2fs_commit_inmem_page(page, INMEM);
370 set_page_dirty(page);
371 f2fs_wait_on_page_writeback(page, DATA, true);
372 if (clear_page_dirty_for_io(page)) {
373 inode_dec_dirty_pages(inode);
374 remove_dirty_inode(inode);
378 fio.old_blkaddr = NULL_ADDR;
379 fio.encrypted_page = NULL;
380 fio.need_lock = LOCK_DONE;
381 err = do_write_data_page(&fio);
383 if (err == -ENOMEM) {
384 congestion_wait(BLK_RW_ASYNC, HZ/50);
391 /* record old blkaddr for revoking */
392 cur->old_addr = fio.old_blkaddr;
393 last_idx = page->index;
396 list_move_tail(&cur->list, revoke_list);
399 if (last_idx != ULONG_MAX)
400 f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
403 __revoke_inmem_pages(inode, revoke_list, false, false);
408 int commit_inmem_pages(struct inode *inode)
410 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
411 struct f2fs_inode_info *fi = F2FS_I(inode);
412 struct list_head revoke_list;
415 INIT_LIST_HEAD(&revoke_list);
416 f2fs_balance_fs(sbi, true);
419 set_inode_flag(inode, FI_ATOMIC_COMMIT);
421 mutex_lock(&fi->inmem_lock);
422 err = __commit_inmem_pages(inode, &revoke_list);
426 * try to revoke all committed pages, but still we could fail
427 * due to no memory or other reason, if that happened, EAGAIN
428 * will be returned, which means in such case, transaction is
429 * already not integrity, caller should use journal to do the
430 * recovery or rewrite & commit last transaction. For other
431 * error number, revoking was done by filesystem itself.
433 ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
437 /* drop all uncommitted pages */
438 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
440 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
441 if (!list_empty(&fi->inmem_ilist))
442 list_del_init(&fi->inmem_ilist);
443 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
444 mutex_unlock(&fi->inmem_lock);
446 clear_inode_flag(inode, FI_ATOMIC_COMMIT);
453 * This function balances dirty node and dentry pages.
454 * In addition, it controls garbage collection.
456 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
458 #ifdef CONFIG_F2FS_FAULT_INJECTION
459 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
460 f2fs_show_injection_info(FAULT_CHECKPOINT);
461 f2fs_stop_checkpoint(sbi, false);
465 /* balance_fs_bg is able to be pending */
466 if (need && excess_cached_nats(sbi))
467 f2fs_balance_fs_bg(sbi);
470 * We should do GC or end up with checkpoint, if there are so many dirty
471 * dir/node pages without enough free segments.
473 if (has_not_enough_free_secs(sbi, 0, 0)) {
474 mutex_lock(&sbi->gc_mutex);
475 f2fs_gc(sbi, false, false, NULL_SEGNO);
479 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
481 /* try to shrink extent cache when there is no enough memory */
482 if (!available_free_memory(sbi, EXTENT_CACHE))
483 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
485 /* check the # of cached NAT entries */
486 if (!available_free_memory(sbi, NAT_ENTRIES))
487 try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
489 if (!available_free_memory(sbi, FREE_NIDS))
490 try_to_free_nids(sbi, MAX_FREE_NIDS);
492 build_free_nids(sbi, false, false);
494 if (!is_idle(sbi) && !excess_dirty_nats(sbi))
497 /* checkpoint is the only way to shrink partial cached entries */
498 if (!available_free_memory(sbi, NAT_ENTRIES) ||
499 !available_free_memory(sbi, INO_ENTRIES) ||
500 excess_prefree_segs(sbi) ||
501 excess_dirty_nats(sbi) ||
502 f2fs_time_over(sbi, CP_TIME)) {
503 if (test_opt(sbi, DATA_FLUSH)) {
504 struct blk_plug plug;
506 blk_start_plug(&plug);
507 sync_dirty_inodes(sbi, FILE_INODE);
508 blk_finish_plug(&plug);
510 f2fs_sync_fs(sbi->sb, true);
511 stat_inc_bg_cp_count(sbi->stat_info);
515 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
516 struct block_device *bdev)
518 struct bio *bio = f2fs_bio_alloc(sbi, 0, true);
521 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
522 bio_set_dev(bio, bdev);
523 ret = submit_bio_wait(bio);
526 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
527 test_opt(sbi, FLUSH_MERGE), ret);
531 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
537 return __submit_flush_wait(sbi, sbi->sb->s_bdev);
539 for (i = 0; i < sbi->s_ndevs; i++) {
540 if (!is_dirty_device(sbi, ino, i, FLUSH_INO))
542 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
549 static int issue_flush_thread(void *data)
551 struct f2fs_sb_info *sbi = data;
552 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
553 wait_queue_head_t *q = &fcc->flush_wait_queue;
555 if (kthread_should_stop())
558 sb_start_intwrite(sbi->sb);
560 if (!llist_empty(&fcc->issue_list)) {
561 struct flush_cmd *cmd, *next;
564 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
565 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
567 cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
569 ret = submit_flush_wait(sbi, cmd->ino);
570 atomic_inc(&fcc->issued_flush);
572 llist_for_each_entry_safe(cmd, next,
573 fcc->dispatch_list, llnode) {
575 complete(&cmd->wait);
577 fcc->dispatch_list = NULL;
580 sb_end_intwrite(sbi->sb);
582 wait_event_interruptible(*q,
583 kthread_should_stop() || !llist_empty(&fcc->issue_list));
587 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
589 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
590 struct flush_cmd cmd;
593 if (test_opt(sbi, NOBARRIER))
596 if (!test_opt(sbi, FLUSH_MERGE)) {
597 ret = submit_flush_wait(sbi, ino);
598 atomic_inc(&fcc->issued_flush);
602 if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) {
603 ret = submit_flush_wait(sbi, ino);
604 atomic_dec(&fcc->issing_flush);
606 atomic_inc(&fcc->issued_flush);
611 init_completion(&cmd.wait);
613 llist_add(&cmd.llnode, &fcc->issue_list);
615 /* update issue_list before we wake up issue_flush thread */
618 if (waitqueue_active(&fcc->flush_wait_queue))
619 wake_up(&fcc->flush_wait_queue);
621 if (fcc->f2fs_issue_flush) {
622 wait_for_completion(&cmd.wait);
623 atomic_dec(&fcc->issing_flush);
625 struct llist_node *list;
627 list = llist_del_all(&fcc->issue_list);
629 wait_for_completion(&cmd.wait);
630 atomic_dec(&fcc->issing_flush);
632 struct flush_cmd *tmp, *next;
634 ret = submit_flush_wait(sbi, ino);
636 llist_for_each_entry_safe(tmp, next, list, llnode) {
639 atomic_dec(&fcc->issing_flush);
643 complete(&tmp->wait);
651 int create_flush_cmd_control(struct f2fs_sb_info *sbi)
653 dev_t dev = sbi->sb->s_bdev->bd_dev;
654 struct flush_cmd_control *fcc;
657 if (SM_I(sbi)->fcc_info) {
658 fcc = SM_I(sbi)->fcc_info;
659 if (fcc->f2fs_issue_flush)
664 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
667 atomic_set(&fcc->issued_flush, 0);
668 atomic_set(&fcc->issing_flush, 0);
669 init_waitqueue_head(&fcc->flush_wait_queue);
670 init_llist_head(&fcc->issue_list);
671 SM_I(sbi)->fcc_info = fcc;
672 if (!test_opt(sbi, FLUSH_MERGE))
676 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
677 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
678 if (IS_ERR(fcc->f2fs_issue_flush)) {
679 err = PTR_ERR(fcc->f2fs_issue_flush);
681 SM_I(sbi)->fcc_info = NULL;
688 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
690 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
692 if (fcc && fcc->f2fs_issue_flush) {
693 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
695 fcc->f2fs_issue_flush = NULL;
696 kthread_stop(flush_thread);
700 SM_I(sbi)->fcc_info = NULL;
704 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
711 for (i = 1; i < sbi->s_ndevs; i++) {
712 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
714 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
718 spin_lock(&sbi->dev_lock);
719 f2fs_clear_bit(i, (char *)&sbi->dirty_device);
720 spin_unlock(&sbi->dev_lock);
726 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
727 enum dirty_type dirty_type)
729 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
731 /* need not be added */
732 if (IS_CURSEG(sbi, segno))
735 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
736 dirty_i->nr_dirty[dirty_type]++;
738 if (dirty_type == DIRTY) {
739 struct seg_entry *sentry = get_seg_entry(sbi, segno);
740 enum dirty_type t = sentry->type;
742 if (unlikely(t >= DIRTY)) {
746 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
747 dirty_i->nr_dirty[t]++;
751 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
752 enum dirty_type dirty_type)
754 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
756 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
757 dirty_i->nr_dirty[dirty_type]--;
759 if (dirty_type == DIRTY) {
760 struct seg_entry *sentry = get_seg_entry(sbi, segno);
761 enum dirty_type t = sentry->type;
763 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
764 dirty_i->nr_dirty[t]--;
766 if (get_valid_blocks(sbi, segno, true) == 0)
767 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
768 dirty_i->victim_secmap);
773 * Should not occur error such as -ENOMEM.
774 * Adding dirty entry into seglist is not critical operation.
775 * If a given segment is one of current working segments, it won't be added.
777 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
779 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
780 unsigned short valid_blocks;
782 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
785 mutex_lock(&dirty_i->seglist_lock);
787 valid_blocks = get_valid_blocks(sbi, segno, false);
789 if (valid_blocks == 0) {
790 __locate_dirty_segment(sbi, segno, PRE);
791 __remove_dirty_segment(sbi, segno, DIRTY);
792 } else if (valid_blocks < sbi->blocks_per_seg) {
793 __locate_dirty_segment(sbi, segno, DIRTY);
795 /* Recovery routine with SSR needs this */
796 __remove_dirty_segment(sbi, segno, DIRTY);
799 mutex_unlock(&dirty_i->seglist_lock);
802 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
803 struct block_device *bdev, block_t lstart,
804 block_t start, block_t len)
806 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
807 struct list_head *pend_list;
808 struct discard_cmd *dc;
810 f2fs_bug_on(sbi, !len);
812 pend_list = &dcc->pend_list[plist_idx(len)];
814 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
815 INIT_LIST_HEAD(&dc->list);
823 init_completion(&dc->wait);
824 list_add_tail(&dc->list, pend_list);
825 atomic_inc(&dcc->discard_cmd_cnt);
826 dcc->undiscard_blks += len;
831 static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
832 struct block_device *bdev, block_t lstart,
833 block_t start, block_t len,
834 struct rb_node *parent, struct rb_node **p)
836 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
837 struct discard_cmd *dc;
839 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
841 rb_link_node(&dc->rb_node, parent, p);
842 rb_insert_color(&dc->rb_node, &dcc->root);
847 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
848 struct discard_cmd *dc)
850 if (dc->state == D_DONE)
851 atomic_dec(&dcc->issing_discard);
854 rb_erase(&dc->rb_node, &dcc->root);
855 dcc->undiscard_blks -= dc->len;
857 kmem_cache_free(discard_cmd_slab, dc);
859 atomic_dec(&dcc->discard_cmd_cnt);
862 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
863 struct discard_cmd *dc)
865 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
867 trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
869 f2fs_bug_on(sbi, dc->ref);
871 if (dc->error == -EOPNOTSUPP)
875 f2fs_msg(sbi->sb, KERN_INFO,
876 "Issue discard(%u, %u, %u) failed, ret: %d",
877 dc->lstart, dc->start, dc->len, dc->error);
878 __detach_discard_cmd(dcc, dc);
881 static void f2fs_submit_discard_endio(struct bio *bio)
883 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
885 dc->error = blk_status_to_errno(bio->bi_status);
887 complete_all(&dc->wait);
891 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
892 block_t start, block_t end)
894 #ifdef CONFIG_F2FS_CHECK_FS
895 struct seg_entry *sentry;
898 unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
902 segno = GET_SEGNO(sbi, blk);
903 sentry = get_seg_entry(sbi, segno);
904 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
906 if (end < START_BLOCK(sbi, segno + 1))
907 size = GET_BLKOFF_FROM_SEG0(sbi, end);
910 map = (unsigned long *)(sentry->cur_valid_map);
911 offset = __find_rev_next_bit(map, size, offset);
912 f2fs_bug_on(sbi, offset != size);
913 blk = START_BLOCK(sbi, segno + 1);
918 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
919 static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
920 struct discard_policy *dpolicy,
921 struct discard_cmd *dc)
923 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
924 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
925 &(dcc->fstrim_list) : &(dcc->wait_list);
926 struct bio *bio = NULL;
927 int flag = dpolicy->sync ? REQ_SYNC : 0;
929 if (dc->state != D_PREP)
932 trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
934 dc->error = __blkdev_issue_discard(dc->bdev,
935 SECTOR_FROM_BLOCK(dc->start),
936 SECTOR_FROM_BLOCK(dc->len),
939 /* should keep before submission to avoid D_DONE right away */
940 dc->state = D_SUBMIT;
941 atomic_inc(&dcc->issued_discard);
942 atomic_inc(&dcc->issing_discard);
944 bio->bi_private = dc;
945 bio->bi_end_io = f2fs_submit_discard_endio;
948 list_move_tail(&dc->list, wait_list);
949 __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
951 f2fs_update_iostat(sbi, FS_DISCARD, 1);
954 __remove_discard_cmd(sbi, dc);
958 static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
959 struct block_device *bdev, block_t lstart,
960 block_t start, block_t len,
961 struct rb_node **insert_p,
962 struct rb_node *insert_parent)
964 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
966 struct rb_node *parent = NULL;
967 struct discard_cmd *dc = NULL;
969 if (insert_p && insert_parent) {
970 parent = insert_parent;
975 p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
977 dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
984 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
985 struct discard_cmd *dc)
987 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
990 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
991 struct discard_cmd *dc, block_t blkaddr)
993 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
994 struct discard_info di = dc->di;
995 bool modified = false;
997 if (dc->state == D_DONE || dc->len == 1) {
998 __remove_discard_cmd(sbi, dc);
1002 dcc->undiscard_blks -= di.len;
1004 if (blkaddr > di.lstart) {
1005 dc->len = blkaddr - dc->lstart;
1006 dcc->undiscard_blks += dc->len;
1007 __relocate_discard_cmd(dcc, dc);
1011 if (blkaddr < di.lstart + di.len - 1) {
1013 __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1014 di.start + blkaddr + 1 - di.lstart,
1015 di.lstart + di.len - 1 - blkaddr,
1021 dcc->undiscard_blks += dc->len;
1022 __relocate_discard_cmd(dcc, dc);
1027 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1028 struct block_device *bdev, block_t lstart,
1029 block_t start, block_t len)
1031 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1032 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1033 struct discard_cmd *dc;
1034 struct discard_info di = {0};
1035 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1036 block_t end = lstart + len;
1038 mutex_lock(&dcc->cmd_lock);
1040 dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
1042 (struct rb_entry **)&prev_dc,
1043 (struct rb_entry **)&next_dc,
1044 &insert_p, &insert_parent, true);
1050 di.len = next_dc ? next_dc->lstart - lstart : len;
1051 di.len = min(di.len, len);
1056 struct rb_node *node;
1057 bool merged = false;
1058 struct discard_cmd *tdc = NULL;
1061 di.lstart = prev_dc->lstart + prev_dc->len;
1062 if (di.lstart < lstart)
1064 if (di.lstart >= end)
1067 if (!next_dc || next_dc->lstart > end)
1068 di.len = end - di.lstart;
1070 di.len = next_dc->lstart - di.lstart;
1071 di.start = start + di.lstart - lstart;
1077 if (prev_dc && prev_dc->state == D_PREP &&
1078 prev_dc->bdev == bdev &&
1079 __is_discard_back_mergeable(&di, &prev_dc->di)) {
1080 prev_dc->di.len += di.len;
1081 dcc->undiscard_blks += di.len;
1082 __relocate_discard_cmd(dcc, prev_dc);
1088 if (next_dc && next_dc->state == D_PREP &&
1089 next_dc->bdev == bdev &&
1090 __is_discard_front_mergeable(&di, &next_dc->di)) {
1091 next_dc->di.lstart = di.lstart;
1092 next_dc->di.len += di.len;
1093 next_dc->di.start = di.start;
1094 dcc->undiscard_blks += di.len;
1095 __relocate_discard_cmd(dcc, next_dc);
1097 __remove_discard_cmd(sbi, tdc);
1102 __insert_discard_tree(sbi, bdev, di.lstart, di.start,
1103 di.len, NULL, NULL);
1110 node = rb_next(&prev_dc->rb_node);
1111 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1114 mutex_unlock(&dcc->cmd_lock);
1117 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1118 struct block_device *bdev, block_t blkstart, block_t blklen)
1120 block_t lblkstart = blkstart;
1122 trace_f2fs_queue_discard(bdev, blkstart, blklen);
1125 int devi = f2fs_target_device_index(sbi, blkstart);
1127 blkstart -= FDEV(devi).start_blk;
1129 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1133 static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
1134 struct discard_policy *dpolicy,
1135 unsigned int start, unsigned int end)
1137 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1138 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1139 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1140 struct discard_cmd *dc;
1141 struct blk_plug plug;
1147 mutex_lock(&dcc->cmd_lock);
1148 f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
1150 dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
1152 (struct rb_entry **)&prev_dc,
1153 (struct rb_entry **)&next_dc,
1154 &insert_p, &insert_parent, true);
1158 blk_start_plug(&plug);
1160 while (dc && dc->lstart <= end) {
1161 struct rb_node *node;
1163 if (dc->len < dpolicy->granularity)
1166 if (dc->state != D_PREP) {
1167 list_move_tail(&dc->list, &dcc->fstrim_list);
1171 __submit_discard_cmd(sbi, dpolicy, dc);
1173 if (++issued >= dpolicy->max_requests) {
1174 start = dc->lstart + dc->len;
1176 blk_finish_plug(&plug);
1177 mutex_unlock(&dcc->cmd_lock);
1184 node = rb_next(&dc->rb_node);
1185 dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1187 if (fatal_signal_pending(current))
1191 blk_finish_plug(&plug);
1192 mutex_unlock(&dcc->cmd_lock);
1195 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1196 struct discard_policy *dpolicy)
1198 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1199 struct list_head *pend_list;
1200 struct discard_cmd *dc, *tmp;
1201 struct blk_plug plug;
1202 int i, iter = 0, issued = 0;
1203 bool io_interrupted = false;
1205 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1206 if (i + 1 < dpolicy->granularity)
1208 pend_list = &dcc->pend_list[i];
1210 mutex_lock(&dcc->cmd_lock);
1211 if (list_empty(pend_list))
1213 f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
1214 blk_start_plug(&plug);
1215 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1216 f2fs_bug_on(sbi, dc->state != D_PREP);
1218 if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1220 io_interrupted = true;
1224 __submit_discard_cmd(sbi, dpolicy, dc);
1227 if (++iter >= dpolicy->max_requests)
1230 blk_finish_plug(&plug);
1232 mutex_unlock(&dcc->cmd_lock);
1234 if (iter >= dpolicy->max_requests)
1238 if (!issued && io_interrupted)
1244 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1246 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1247 struct list_head *pend_list;
1248 struct discard_cmd *dc, *tmp;
1250 bool dropped = false;
1252 mutex_lock(&dcc->cmd_lock);
1253 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1254 pend_list = &dcc->pend_list[i];
1255 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1256 f2fs_bug_on(sbi, dc->state != D_PREP);
1257 __remove_discard_cmd(sbi, dc);
1261 mutex_unlock(&dcc->cmd_lock);
1266 void drop_discard_cmd(struct f2fs_sb_info *sbi)
1268 __drop_discard_cmd(sbi);
1271 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1272 struct discard_cmd *dc)
1274 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1275 unsigned int len = 0;
1277 wait_for_completion_io(&dc->wait);
1278 mutex_lock(&dcc->cmd_lock);
1279 f2fs_bug_on(sbi, dc->state != D_DONE);
1284 __remove_discard_cmd(sbi, dc);
1286 mutex_unlock(&dcc->cmd_lock);
1291 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1292 struct discard_policy *dpolicy,
1293 block_t start, block_t end)
1295 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1296 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1297 &(dcc->fstrim_list) : &(dcc->wait_list);
1298 struct discard_cmd *dc, *tmp;
1300 unsigned int trimmed = 0;
1305 mutex_lock(&dcc->cmd_lock);
1306 list_for_each_entry_safe(dc, tmp, wait_list, list) {
1307 if (dc->lstart + dc->len <= start || end <= dc->lstart)
1309 if (dc->len < dpolicy->granularity)
1311 if (dc->state == D_DONE && !dc->ref) {
1312 wait_for_completion_io(&dc->wait);
1315 __remove_discard_cmd(sbi, dc);
1322 mutex_unlock(&dcc->cmd_lock);
1325 trimmed += __wait_one_discard_bio(sbi, dc);
1332 static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1333 struct discard_policy *dpolicy)
1335 __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1338 /* This should be covered by global mutex, &sit_i->sentry_lock */
1339 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1341 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1342 struct discard_cmd *dc;
1343 bool need_wait = false;
1345 mutex_lock(&dcc->cmd_lock);
1346 dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
1348 if (dc->state == D_PREP) {
1349 __punch_discard_cmd(sbi, dc, blkaddr);
1355 mutex_unlock(&dcc->cmd_lock);
1358 __wait_one_discard_bio(sbi, dc);
1361 void stop_discard_thread(struct f2fs_sb_info *sbi)
1363 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1365 if (dcc && dcc->f2fs_issue_discard) {
1366 struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1368 dcc->f2fs_issue_discard = NULL;
1369 kthread_stop(discard_thread);
1373 /* This comes from f2fs_put_super */
1374 bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
1376 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1377 struct discard_policy dpolicy;
1380 init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity);
1381 __issue_discard_cmd(sbi, &dpolicy);
1382 dropped = __drop_discard_cmd(sbi);
1383 __wait_all_discard_cmd(sbi, &dpolicy);
1388 static int issue_discard_thread(void *data)
1390 struct f2fs_sb_info *sbi = data;
1391 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1392 wait_queue_head_t *q = &dcc->discard_wait_queue;
1393 struct discard_policy dpolicy;
1394 unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1400 init_discard_policy(&dpolicy, DPOLICY_BG,
1401 dcc->discard_granularity);
1403 wait_event_interruptible_timeout(*q,
1404 kthread_should_stop() || freezing(current) ||
1406 msecs_to_jiffies(wait_ms));
1407 if (try_to_freeze())
1409 if (f2fs_readonly(sbi->sb))
1411 if (kthread_should_stop())
1414 if (dcc->discard_wake)
1415 dcc->discard_wake = 0;
1417 if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
1418 init_discard_policy(&dpolicy, DPOLICY_FORCE, 1);
1420 sb_start_intwrite(sbi->sb);
1422 issued = __issue_discard_cmd(sbi, &dpolicy);
1424 __wait_all_discard_cmd(sbi, &dpolicy);
1425 wait_ms = dpolicy.min_interval;
1427 wait_ms = dpolicy.max_interval;
1430 sb_end_intwrite(sbi->sb);
1432 } while (!kthread_should_stop());
1436 #ifdef CONFIG_BLK_DEV_ZONED
1437 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1438 struct block_device *bdev, block_t blkstart, block_t blklen)
1440 sector_t sector, nr_sects;
1441 block_t lblkstart = blkstart;
1445 devi = f2fs_target_device_index(sbi, blkstart);
1446 blkstart -= FDEV(devi).start_blk;
1450 * We need to know the type of the zone: for conventional zones,
1451 * use regular discard if the drive supports it. For sequential
1452 * zones, reset the zone write pointer.
1454 switch (get_blkz_type(sbi, bdev, blkstart)) {
1456 case BLK_ZONE_TYPE_CONVENTIONAL:
1457 if (!blk_queue_discard(bdev_get_queue(bdev)))
1459 return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1460 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1461 case BLK_ZONE_TYPE_SEQWRITE_PREF:
1462 sector = SECTOR_FROM_BLOCK(blkstart);
1463 nr_sects = SECTOR_FROM_BLOCK(blklen);
1465 if (sector & (bdev_zone_sectors(bdev) - 1) ||
1466 nr_sects != bdev_zone_sectors(bdev)) {
1467 f2fs_msg(sbi->sb, KERN_INFO,
1468 "(%d) %s: Unaligned discard attempted (block %x + %x)",
1469 devi, sbi->s_ndevs ? FDEV(devi).path: "",
1473 trace_f2fs_issue_reset_zone(bdev, blkstart);
1474 return blkdev_reset_zones(bdev, sector,
1475 nr_sects, GFP_NOFS);
1477 /* Unknown zone type: broken device ? */
1483 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1484 struct block_device *bdev, block_t blkstart, block_t blklen)
1486 #ifdef CONFIG_BLK_DEV_ZONED
1487 if (f2fs_sb_has_blkzoned(sbi->sb) &&
1488 bdev_zoned_model(bdev) != BLK_ZONED_NONE)
1489 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1491 return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1494 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1495 block_t blkstart, block_t blklen)
1497 sector_t start = blkstart, len = 0;
1498 struct block_device *bdev;
1499 struct seg_entry *se;
1500 unsigned int offset;
1504 bdev = f2fs_target_device(sbi, blkstart, NULL);
1506 for (i = blkstart; i < blkstart + blklen; i++, len++) {
1508 struct block_device *bdev2 =
1509 f2fs_target_device(sbi, i, NULL);
1511 if (bdev2 != bdev) {
1512 err = __issue_discard_async(sbi, bdev,
1522 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1523 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1525 if (!f2fs_test_and_set_bit(offset, se->discard_map))
1526 sbi->discard_blks--;
1530 err = __issue_discard_async(sbi, bdev, start, len);
1534 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1537 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1538 int max_blocks = sbi->blocks_per_seg;
1539 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1540 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1541 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1542 unsigned long *discard_map = (unsigned long *)se->discard_map;
1543 unsigned long *dmap = SIT_I(sbi)->tmp_map;
1544 unsigned int start = 0, end = -1;
1545 bool force = (cpc->reason & CP_DISCARD);
1546 struct discard_entry *de = NULL;
1547 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1550 if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
1554 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
1555 SM_I(sbi)->dcc_info->nr_discards >=
1556 SM_I(sbi)->dcc_info->max_discards)
1560 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1561 for (i = 0; i < entries; i++)
1562 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1563 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1565 while (force || SM_I(sbi)->dcc_info->nr_discards <=
1566 SM_I(sbi)->dcc_info->max_discards) {
1567 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1568 if (start >= max_blocks)
1571 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1572 if (force && start && end != max_blocks
1573 && (end - start) < cpc->trim_minlen)
1580 de = f2fs_kmem_cache_alloc(discard_entry_slab,
1582 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1583 list_add_tail(&de->list, head);
1586 for (i = start; i < end; i++)
1587 __set_bit_le(i, (void *)de->discard_map);
1589 SM_I(sbi)->dcc_info->nr_discards += end - start;
1594 void release_discard_addrs(struct f2fs_sb_info *sbi)
1596 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
1597 struct discard_entry *entry, *this;
1600 list_for_each_entry_safe(entry, this, head, list) {
1601 list_del(&entry->list);
1602 kmem_cache_free(discard_entry_slab, entry);
1607 * Should call clear_prefree_segments after checkpoint is done.
1609 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1611 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1614 mutex_lock(&dirty_i->seglist_lock);
1615 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
1616 __set_test_and_free(sbi, segno);
1617 mutex_unlock(&dirty_i->seglist_lock);
1620 void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1622 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1623 struct list_head *head = &dcc->entry_list;
1624 struct discard_entry *entry, *this;
1625 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1626 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
1627 unsigned int start = 0, end = -1;
1628 unsigned int secno, start_segno;
1629 bool force = (cpc->reason & CP_DISCARD);
1631 mutex_lock(&dirty_i->seglist_lock);
1635 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1636 if (start >= MAIN_SEGS(sbi))
1638 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
1641 for (i = start; i < end; i++)
1642 clear_bit(i, prefree_map);
1644 dirty_i->nr_dirty[PRE] -= end - start;
1646 if (!test_opt(sbi, DISCARD))
1649 if (force && start >= cpc->trim_start &&
1650 (end - 1) <= cpc->trim_end)
1653 if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
1654 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
1655 (end - start) << sbi->log_blocks_per_seg);
1659 secno = GET_SEC_FROM_SEG(sbi, start);
1660 start_segno = GET_SEG_FROM_SEC(sbi, secno);
1661 if (!IS_CURSEC(sbi, secno) &&
1662 !get_valid_blocks(sbi, start, true))
1663 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
1664 sbi->segs_per_sec << sbi->log_blocks_per_seg);
1666 start = start_segno + sbi->segs_per_sec;
1672 mutex_unlock(&dirty_i->seglist_lock);
1674 /* send small discards */
1675 list_for_each_entry_safe(entry, this, head, list) {
1676 unsigned int cur_pos = 0, next_pos, len, total_len = 0;
1677 bool is_valid = test_bit_le(0, entry->discard_map);
1681 next_pos = find_next_zero_bit_le(entry->discard_map,
1682 sbi->blocks_per_seg, cur_pos);
1683 len = next_pos - cur_pos;
1685 if (f2fs_sb_has_blkzoned(sbi->sb) ||
1686 (force && len < cpc->trim_minlen))
1689 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
1693 next_pos = find_next_bit_le(entry->discard_map,
1694 sbi->blocks_per_seg, cur_pos);
1698 is_valid = !is_valid;
1700 if (cur_pos < sbi->blocks_per_seg)
1703 list_del(&entry->list);
1704 dcc->nr_discards -= total_len;
1705 kmem_cache_free(discard_entry_slab, entry);
1708 wake_up_discard_thread(sbi, false);
1711 void init_discard_policy(struct discard_policy *dpolicy,
1712 int discard_type, unsigned int granularity)
1715 dpolicy->type = discard_type;
1716 dpolicy->sync = true;
1717 dpolicy->granularity = granularity;
1719 dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1720 dpolicy->io_aware_gran = MAX_PLIST_NUM;
1722 if (discard_type == DPOLICY_BG) {
1723 dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1724 dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1725 dpolicy->io_aware = true;
1726 } else if (discard_type == DPOLICY_FORCE) {
1727 dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1728 dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1729 dpolicy->io_aware = false;
1730 } else if (discard_type == DPOLICY_FSTRIM) {
1731 dpolicy->io_aware = false;
1732 } else if (discard_type == DPOLICY_UMOUNT) {
1733 dpolicy->io_aware = false;
1737 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
1739 dev_t dev = sbi->sb->s_bdev->bd_dev;
1740 struct discard_cmd_control *dcc;
1743 if (SM_I(sbi)->dcc_info) {
1744 dcc = SM_I(sbi)->dcc_info;
1748 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
1752 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
1753 INIT_LIST_HEAD(&dcc->entry_list);
1754 for (i = 0; i < MAX_PLIST_NUM; i++)
1755 INIT_LIST_HEAD(&dcc->pend_list[i]);
1756 INIT_LIST_HEAD(&dcc->wait_list);
1757 INIT_LIST_HEAD(&dcc->fstrim_list);
1758 mutex_init(&dcc->cmd_lock);
1759 atomic_set(&dcc->issued_discard, 0);
1760 atomic_set(&dcc->issing_discard, 0);
1761 atomic_set(&dcc->discard_cmd_cnt, 0);
1762 dcc->nr_discards = 0;
1763 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
1764 dcc->undiscard_blks = 0;
1765 dcc->root = RB_ROOT;
1767 init_waitqueue_head(&dcc->discard_wait_queue);
1768 SM_I(sbi)->dcc_info = dcc;
1770 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
1771 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
1772 if (IS_ERR(dcc->f2fs_issue_discard)) {
1773 err = PTR_ERR(dcc->f2fs_issue_discard);
1775 SM_I(sbi)->dcc_info = NULL;
1782 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
1784 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1789 stop_discard_thread(sbi);
1792 SM_I(sbi)->dcc_info = NULL;
1795 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
1797 struct sit_info *sit_i = SIT_I(sbi);
1799 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
1800 sit_i->dirty_sentries++;
1807 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
1808 unsigned int segno, int modified)
1810 struct seg_entry *se = get_seg_entry(sbi, segno);
1813 __mark_sit_entry_dirty(sbi, segno);
1816 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
1818 struct seg_entry *se;
1819 unsigned int segno, offset;
1820 long int new_vblocks;
1822 #ifdef CONFIG_F2FS_CHECK_FS
1826 segno = GET_SEGNO(sbi, blkaddr);
1828 se = get_seg_entry(sbi, segno);
1829 new_vblocks = se->valid_blocks + del;
1830 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1832 f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
1833 (new_vblocks > sbi->blocks_per_seg)));
1835 se->valid_blocks = new_vblocks;
1836 se->mtime = get_mtime(sbi);
1837 SIT_I(sbi)->max_mtime = se->mtime;
1839 /* Update valid block bitmap */
1841 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
1842 #ifdef CONFIG_F2FS_CHECK_FS
1843 mir_exist = f2fs_test_and_set_bit(offset,
1844 se->cur_valid_map_mir);
1845 if (unlikely(exist != mir_exist)) {
1846 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1847 "when setting bitmap, blk:%u, old bit:%d",
1849 f2fs_bug_on(sbi, 1);
1852 if (unlikely(exist)) {
1853 f2fs_msg(sbi->sb, KERN_ERR,
1854 "Bitmap was wrongly set, blk:%u", blkaddr);
1855 f2fs_bug_on(sbi, 1);
1860 if (f2fs_discard_en(sbi) &&
1861 !f2fs_test_and_set_bit(offset, se->discard_map))
1862 sbi->discard_blks--;
1864 /* don't overwrite by SSR to keep node chain */
1865 if (IS_NODESEG(se->type)) {
1866 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
1867 se->ckpt_valid_blocks++;
1870 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
1871 #ifdef CONFIG_F2FS_CHECK_FS
1872 mir_exist = f2fs_test_and_clear_bit(offset,
1873 se->cur_valid_map_mir);
1874 if (unlikely(exist != mir_exist)) {
1875 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
1876 "when clearing bitmap, blk:%u, old bit:%d",
1878 f2fs_bug_on(sbi, 1);
1881 if (unlikely(!exist)) {
1882 f2fs_msg(sbi->sb, KERN_ERR,
1883 "Bitmap was wrongly cleared, blk:%u", blkaddr);
1884 f2fs_bug_on(sbi, 1);
1889 if (f2fs_discard_en(sbi) &&
1890 f2fs_test_and_clear_bit(offset, se->discard_map))
1891 sbi->discard_blks++;
1893 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
1894 se->ckpt_valid_blocks += del;
1896 __mark_sit_entry_dirty(sbi, segno);
1898 /* update total number of valid blocks to be written in ckpt area */
1899 SIT_I(sbi)->written_valid_blocks += del;
1901 if (sbi->segs_per_sec > 1)
1902 get_sec_entry(sbi, segno)->valid_blocks += del;
1905 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
1907 unsigned int segno = GET_SEGNO(sbi, addr);
1908 struct sit_info *sit_i = SIT_I(sbi);
1910 f2fs_bug_on(sbi, addr == NULL_ADDR);
1911 if (addr == NEW_ADDR)
1914 /* add it into sit main buffer */
1915 down_write(&sit_i->sentry_lock);
1917 update_sit_entry(sbi, addr, -1);
1919 /* add it into dirty seglist */
1920 locate_dirty_segment(sbi, segno);
1922 up_write(&sit_i->sentry_lock);
1925 bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
1927 struct sit_info *sit_i = SIT_I(sbi);
1928 unsigned int segno, offset;
1929 struct seg_entry *se;
1932 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
1935 down_read(&sit_i->sentry_lock);
1937 segno = GET_SEGNO(sbi, blkaddr);
1938 se = get_seg_entry(sbi, segno);
1939 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1941 if (f2fs_test_bit(offset, se->ckpt_valid_map))
1944 up_read(&sit_i->sentry_lock);
1950 * This function should be resided under the curseg_mutex lock
1952 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
1953 struct f2fs_summary *sum)
1955 struct curseg_info *curseg = CURSEG_I(sbi, type);
1956 void *addr = curseg->sum_blk;
1957 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
1958 memcpy(addr, sum, sizeof(struct f2fs_summary));
1962 * Calculate the number of current summary pages for writing
1964 int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
1966 int valid_sum_count = 0;
1969 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1970 if (sbi->ckpt->alloc_type[i] == SSR)
1971 valid_sum_count += sbi->blocks_per_seg;
1974 valid_sum_count += le16_to_cpu(
1975 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
1977 valid_sum_count += curseg_blkoff(sbi, i);
1981 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
1982 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
1983 if (valid_sum_count <= sum_in_page)
1985 else if ((valid_sum_count - sum_in_page) <=
1986 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
1992 * Caller should put this summary page
1994 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
1996 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
1999 void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
2001 struct page *page = grab_meta_page(sbi, blk_addr);
2003 memcpy(page_address(page), src, PAGE_SIZE);
2004 set_page_dirty(page);
2005 f2fs_put_page(page, 1);
2008 static void write_sum_page(struct f2fs_sb_info *sbi,
2009 struct f2fs_summary_block *sum_blk, block_t blk_addr)
2011 update_meta_page(sbi, (void *)sum_blk, blk_addr);
2014 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2015 int type, block_t blk_addr)
2017 struct curseg_info *curseg = CURSEG_I(sbi, type);
2018 struct page *page = grab_meta_page(sbi, blk_addr);
2019 struct f2fs_summary_block *src = curseg->sum_blk;
2020 struct f2fs_summary_block *dst;
2022 dst = (struct f2fs_summary_block *)page_address(page);
2024 mutex_lock(&curseg->curseg_mutex);
2026 down_read(&curseg->journal_rwsem);
2027 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2028 up_read(&curseg->journal_rwsem);
2030 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2031 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2033 mutex_unlock(&curseg->curseg_mutex);
2035 set_page_dirty(page);
2036 f2fs_put_page(page, 1);
2039 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
2041 struct curseg_info *curseg = CURSEG_I(sbi, type);
2042 unsigned int segno = curseg->segno + 1;
2043 struct free_segmap_info *free_i = FREE_I(sbi);
2045 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2046 return !test_bit(segno, free_i->free_segmap);
2051 * Find a new segment from the free segments bitmap to right order
2052 * This function should be returned with success, otherwise BUG
2054 static void get_new_segment(struct f2fs_sb_info *sbi,
2055 unsigned int *newseg, bool new_sec, int dir)
2057 struct free_segmap_info *free_i = FREE_I(sbi);
2058 unsigned int segno, secno, zoneno;
2059 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2060 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2061 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2062 unsigned int left_start = hint;
2067 spin_lock(&free_i->segmap_lock);
2069 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2070 segno = find_next_zero_bit(free_i->free_segmap,
2071 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2072 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2076 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2077 if (secno >= MAIN_SECS(sbi)) {
2078 if (dir == ALLOC_RIGHT) {
2079 secno = find_next_zero_bit(free_i->free_secmap,
2081 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2084 left_start = hint - 1;
2090 while (test_bit(left_start, free_i->free_secmap)) {
2091 if (left_start > 0) {
2095 left_start = find_next_zero_bit(free_i->free_secmap,
2097 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2102 segno = GET_SEG_FROM_SEC(sbi, secno);
2103 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2105 /* give up on finding another zone */
2108 if (sbi->secs_per_zone == 1)
2110 if (zoneno == old_zoneno)
2112 if (dir == ALLOC_LEFT) {
2113 if (!go_left && zoneno + 1 >= total_zones)
2115 if (go_left && zoneno == 0)
2118 for (i = 0; i < NR_CURSEG_TYPE; i++)
2119 if (CURSEG_I(sbi, i)->zone == zoneno)
2122 if (i < NR_CURSEG_TYPE) {
2123 /* zone is in user, try another */
2125 hint = zoneno * sbi->secs_per_zone - 1;
2126 else if (zoneno + 1 >= total_zones)
2129 hint = (zoneno + 1) * sbi->secs_per_zone;
2131 goto find_other_zone;
2134 /* set it as dirty segment in free segmap */
2135 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2136 __set_inuse(sbi, segno);
2138 spin_unlock(&free_i->segmap_lock);
2141 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2143 struct curseg_info *curseg = CURSEG_I(sbi, type);
2144 struct summary_footer *sum_footer;
2146 curseg->segno = curseg->next_segno;
2147 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2148 curseg->next_blkoff = 0;
2149 curseg->next_segno = NULL_SEGNO;
2151 sum_footer = &(curseg->sum_blk->footer);
2152 memset(sum_footer, 0, sizeof(struct summary_footer));
2153 if (IS_DATASEG(type))
2154 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2155 if (IS_NODESEG(type))
2156 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2157 __set_sit_entry_type(sbi, type, curseg->segno, modified);
2160 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2162 /* if segs_per_sec is large than 1, we need to keep original policy. */
2163 if (sbi->segs_per_sec != 1)
2164 return CURSEG_I(sbi, type)->segno;
2166 if (test_opt(sbi, NOHEAP) &&
2167 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
2170 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2171 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2173 /* find segments from 0 to reuse freed segments */
2174 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2177 return CURSEG_I(sbi, type)->segno;
2181 * Allocate a current working segment.
2182 * This function always allocates a free segment in LFS manner.
2184 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2186 struct curseg_info *curseg = CURSEG_I(sbi, type);
2187 unsigned int segno = curseg->segno;
2188 int dir = ALLOC_LEFT;
2190 write_sum_page(sbi, curseg->sum_blk,
2191 GET_SUM_BLOCK(sbi, segno));
2192 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
2195 if (test_opt(sbi, NOHEAP))
2198 segno = __get_next_segno(sbi, type);
2199 get_new_segment(sbi, &segno, new_sec, dir);
2200 curseg->next_segno = segno;
2201 reset_curseg(sbi, type, 1);
2202 curseg->alloc_type = LFS;
2205 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
2206 struct curseg_info *seg, block_t start)
2208 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
2209 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2210 unsigned long *target_map = SIT_I(sbi)->tmp_map;
2211 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2212 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2215 for (i = 0; i < entries; i++)
2216 target_map[i] = ckpt_map[i] | cur_map[i];
2218 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2220 seg->next_blkoff = pos;
2224 * If a segment is written by LFS manner, next block offset is just obtained
2225 * by increasing the current block offset. However, if a segment is written by
2226 * SSR manner, next block offset obtained by calling __next_free_blkoff
2228 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2229 struct curseg_info *seg)
2231 if (seg->alloc_type == SSR)
2232 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2238 * This function always allocates a used segment(from dirty seglist) by SSR
2239 * manner, so it should recover the existing segment information of valid blocks
2241 static void change_curseg(struct f2fs_sb_info *sbi, int type)
2243 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2244 struct curseg_info *curseg = CURSEG_I(sbi, type);
2245 unsigned int new_segno = curseg->next_segno;
2246 struct f2fs_summary_block *sum_node;
2247 struct page *sum_page;
2249 write_sum_page(sbi, curseg->sum_blk,
2250 GET_SUM_BLOCK(sbi, curseg->segno));
2251 __set_test_and_inuse(sbi, new_segno);
2253 mutex_lock(&dirty_i->seglist_lock);
2254 __remove_dirty_segment(sbi, new_segno, PRE);
2255 __remove_dirty_segment(sbi, new_segno, DIRTY);
2256 mutex_unlock(&dirty_i->seglist_lock);
2258 reset_curseg(sbi, type, 1);
2259 curseg->alloc_type = SSR;
2260 __next_free_blkoff(sbi, curseg, 0);
2262 sum_page = get_sum_page(sbi, new_segno);
2263 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2264 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2265 f2fs_put_page(sum_page, 1);
2268 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
2270 struct curseg_info *curseg = CURSEG_I(sbi, type);
2271 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2272 unsigned segno = NULL_SEGNO;
2274 bool reversed = false;
2276 /* need_SSR() already forces to do this */
2277 if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
2278 curseg->next_segno = segno;
2282 /* For node segments, let's do SSR more intensively */
2283 if (IS_NODESEG(type)) {
2284 if (type >= CURSEG_WARM_NODE) {
2286 i = CURSEG_COLD_NODE;
2288 i = CURSEG_HOT_NODE;
2290 cnt = NR_CURSEG_NODE_TYPE;
2292 if (type >= CURSEG_WARM_DATA) {
2294 i = CURSEG_COLD_DATA;
2296 i = CURSEG_HOT_DATA;
2298 cnt = NR_CURSEG_DATA_TYPE;
2301 for (; cnt-- > 0; reversed ? i-- : i++) {
2304 if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
2305 curseg->next_segno = segno;
2313 * flush out current segment and replace it with new segment
2314 * This function should be returned with success, otherwise BUG
2316 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2317 int type, bool force)
2319 struct curseg_info *curseg = CURSEG_I(sbi, type);
2322 new_curseg(sbi, type, true);
2323 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2324 type == CURSEG_WARM_NODE)
2325 new_curseg(sbi, type, false);
2326 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
2327 new_curseg(sbi, type, false);
2328 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
2329 change_curseg(sbi, type);
2331 new_curseg(sbi, type, false);
2333 stat_inc_seg_type(sbi, curseg);
2336 void allocate_new_segments(struct f2fs_sb_info *sbi)
2338 struct curseg_info *curseg;
2339 unsigned int old_segno;
2342 down_write(&SIT_I(sbi)->sentry_lock);
2344 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2345 curseg = CURSEG_I(sbi, i);
2346 old_segno = curseg->segno;
2347 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
2348 locate_dirty_segment(sbi, old_segno);
2351 up_write(&SIT_I(sbi)->sentry_lock);
2354 static const struct segment_allocation default_salloc_ops = {
2355 .allocate_segment = allocate_segment_by_default,
2358 bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2360 __u64 trim_start = cpc->trim_start;
2361 bool has_candidate = false;
2363 down_write(&SIT_I(sbi)->sentry_lock);
2364 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2365 if (add_discard_addrs(sbi, cpc, true)) {
2366 has_candidate = true;
2370 up_write(&SIT_I(sbi)->sentry_lock);
2372 cpc->trim_start = trim_start;
2373 return has_candidate;
2376 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2378 __u64 start = F2FS_BYTES_TO_BLK(range->start);
2379 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
2380 unsigned int start_segno, end_segno, cur_segno;
2381 block_t start_block, end_block;
2382 struct cp_control cpc;
2383 struct discard_policy dpolicy;
2384 unsigned long long trimmed = 0;
2387 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
2390 if (end <= MAIN_BLKADDR(sbi))
2393 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2394 f2fs_msg(sbi->sb, KERN_WARNING,
2395 "Found FS corruption, run fsck to fix.");
2399 /* start/end segment number in main_area */
2400 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
2401 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
2402 GET_SEGNO(sbi, end);
2404 cpc.reason = CP_DISCARD;
2405 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
2407 /* do checkpoint to issue discard commands safely */
2408 for (cur_segno = start_segno; cur_segno <= end_segno;
2409 cur_segno = cpc.trim_end + 1) {
2410 cpc.trim_start = cur_segno;
2412 if (sbi->discard_blks == 0)
2414 else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
2415 cpc.trim_end = end_segno;
2417 cpc.trim_end = min_t(unsigned int,
2418 rounddown(cur_segno +
2419 BATCHED_TRIM_SEGMENTS(sbi),
2420 sbi->segs_per_sec) - 1, end_segno);
2422 mutex_lock(&sbi->gc_mutex);
2423 err = write_checkpoint(sbi, &cpc);
2424 mutex_unlock(&sbi->gc_mutex);
2431 start_block = START_BLOCK(sbi, start_segno);
2432 end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1);
2434 init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
2435 __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
2436 trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
2437 start_block, end_block);
2439 range->len = F2FS_BLK_TO_BYTES(trimmed);
2443 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
2445 struct curseg_info *curseg = CURSEG_I(sbi, type);
2446 if (curseg->next_blkoff < sbi->blocks_per_seg)
2451 int rw_hint_to_seg_type(enum rw_hint hint)
2454 case WRITE_LIFE_SHORT:
2455 return CURSEG_HOT_DATA;
2456 case WRITE_LIFE_EXTREME:
2457 return CURSEG_COLD_DATA;
2459 return CURSEG_WARM_DATA;
2463 /* This returns write hints for each segment type. This hints will be
2464 * passed down to block layer. There are mapping tables which depend on
2465 * the mount option 'whint_mode'.
2467 * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
2469 * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
2473 * META WRITE_LIFE_NOT_SET
2477 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
2478 * extension list " "
2481 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2482 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2483 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
2484 * WRITE_LIFE_NONE " "
2485 * WRITE_LIFE_MEDIUM " "
2486 * WRITE_LIFE_LONG " "
2489 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2490 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2491 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
2492 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
2493 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
2494 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
2496 * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
2500 * META WRITE_LIFE_MEDIUM;
2501 * HOT_NODE WRITE_LIFE_NOT_SET
2503 * COLD_NODE WRITE_LIFE_NONE
2504 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
2505 * extension list " "
2508 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2509 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2510 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG
2511 * WRITE_LIFE_NONE " "
2512 * WRITE_LIFE_MEDIUM " "
2513 * WRITE_LIFE_LONG " "
2516 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2517 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2518 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
2519 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
2520 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
2521 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
2524 enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi,
2525 enum page_type type, enum temp_type temp)
2527 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
2530 return WRITE_LIFE_NOT_SET;
2531 else if (temp == HOT)
2532 return WRITE_LIFE_SHORT;
2533 else if (temp == COLD)
2534 return WRITE_LIFE_EXTREME;
2536 return WRITE_LIFE_NOT_SET;
2538 } else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
2541 return WRITE_LIFE_LONG;
2542 else if (temp == HOT)
2543 return WRITE_LIFE_SHORT;
2544 else if (temp == COLD)
2545 return WRITE_LIFE_EXTREME;
2546 } else if (type == NODE) {
2547 if (temp == WARM || temp == HOT)
2548 return WRITE_LIFE_NOT_SET;
2549 else if (temp == COLD)
2550 return WRITE_LIFE_NONE;
2551 } else if (type == META) {
2552 return WRITE_LIFE_MEDIUM;
2555 return WRITE_LIFE_NOT_SET;
2558 static int __get_segment_type_2(struct f2fs_io_info *fio)
2560 if (fio->type == DATA)
2561 return CURSEG_HOT_DATA;
2563 return CURSEG_HOT_NODE;
2566 static int __get_segment_type_4(struct f2fs_io_info *fio)
2568 if (fio->type == DATA) {
2569 struct inode *inode = fio->page->mapping->host;
2571 if (S_ISDIR(inode->i_mode))
2572 return CURSEG_HOT_DATA;
2574 return CURSEG_COLD_DATA;
2576 if (IS_DNODE(fio->page) && is_cold_node(fio->page))
2577 return CURSEG_WARM_NODE;
2579 return CURSEG_COLD_NODE;
2583 static int __get_segment_type_6(struct f2fs_io_info *fio)
2585 if (fio->type == DATA) {
2586 struct inode *inode = fio->page->mapping->host;
2588 if (is_cold_data(fio->page) || file_is_cold(inode))
2589 return CURSEG_COLD_DATA;
2590 if (file_is_hot(inode) ||
2591 is_inode_flag_set(inode, FI_HOT_DATA))
2592 return CURSEG_HOT_DATA;
2593 return rw_hint_to_seg_type(inode->i_write_hint);
2595 if (IS_DNODE(fio->page))
2596 return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
2598 return CURSEG_COLD_NODE;
2602 static int __get_segment_type(struct f2fs_io_info *fio)
2606 switch (F2FS_OPTION(fio->sbi).active_logs) {
2608 type = __get_segment_type_2(fio);
2611 type = __get_segment_type_4(fio);
2614 type = __get_segment_type_6(fio);
2617 f2fs_bug_on(fio->sbi, true);
2622 else if (IS_WARM(type))
2629 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
2630 block_t old_blkaddr, block_t *new_blkaddr,
2631 struct f2fs_summary *sum, int type,
2632 struct f2fs_io_info *fio, bool add_list)
2634 struct sit_info *sit_i = SIT_I(sbi);
2635 struct curseg_info *curseg = CURSEG_I(sbi, type);
2637 down_read(&SM_I(sbi)->curseg_lock);
2639 mutex_lock(&curseg->curseg_mutex);
2640 down_write(&sit_i->sentry_lock);
2642 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
2644 f2fs_wait_discard_bio(sbi, *new_blkaddr);
2647 * __add_sum_entry should be resided under the curseg_mutex
2648 * because, this function updates a summary entry in the
2649 * current summary block.
2651 __add_sum_entry(sbi, type, sum);
2653 __refresh_next_blkoff(sbi, curseg);
2655 stat_inc_block_count(sbi, curseg);
2658 * SIT information should be updated before segment allocation,
2659 * since SSR needs latest valid block information.
2661 update_sit_entry(sbi, *new_blkaddr, 1);
2662 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
2663 update_sit_entry(sbi, old_blkaddr, -1);
2665 if (!__has_curseg_space(sbi, type))
2666 sit_i->s_ops->allocate_segment(sbi, type, false);
2669 * segment dirty status should be updated after segment allocation,
2670 * so we just need to update status only one time after previous
2671 * segment being closed.
2673 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
2674 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
2676 up_write(&sit_i->sentry_lock);
2678 if (page && IS_NODESEG(type)) {
2679 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
2681 f2fs_inode_chksum_set(sbi, page);
2685 struct f2fs_bio_info *io;
2687 INIT_LIST_HEAD(&fio->list);
2688 fio->in_list = true;
2689 io = sbi->write_io[fio->type] + fio->temp;
2690 spin_lock(&io->io_lock);
2691 list_add_tail(&fio->list, &io->io_list);
2692 spin_unlock(&io->io_lock);
2695 mutex_unlock(&curseg->curseg_mutex);
2697 up_read(&SM_I(sbi)->curseg_lock);
2700 static void update_device_state(struct f2fs_io_info *fio)
2702 struct f2fs_sb_info *sbi = fio->sbi;
2703 unsigned int devidx;
2708 devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
2710 /* update device state for fsync */
2711 set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
2713 /* update device state for checkpoint */
2714 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
2715 spin_lock(&sbi->dev_lock);
2716 f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
2717 spin_unlock(&sbi->dev_lock);
2721 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
2723 int type = __get_segment_type(fio);
2727 allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
2728 &fio->new_blkaddr, sum, type, fio, true);
2730 /* writeout dirty page into bdev */
2731 err = f2fs_submit_page_write(fio);
2732 if (err == -EAGAIN) {
2733 fio->old_blkaddr = fio->new_blkaddr;
2736 update_device_state(fio);
2740 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
2741 enum iostat_type io_type)
2743 struct f2fs_io_info fio = {
2748 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
2749 .old_blkaddr = page->index,
2750 .new_blkaddr = page->index,
2752 .encrypted_page = NULL,
2756 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
2757 fio.op_flags &= ~REQ_META;
2759 set_page_writeback(page);
2760 f2fs_submit_page_write(&fio);
2762 f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
2765 void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
2767 struct f2fs_summary sum;
2769 set_summary(&sum, nid, 0, 0);
2770 do_write_page(&sum, fio);
2772 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
2775 void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
2777 struct f2fs_sb_info *sbi = fio->sbi;
2778 struct f2fs_summary sum;
2779 struct node_info ni;
2781 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
2782 get_node_info(sbi, dn->nid, &ni);
2783 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
2784 do_write_page(&sum, fio);
2785 f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
2787 f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
2790 int rewrite_data_page(struct f2fs_io_info *fio)
2793 struct f2fs_sb_info *sbi = fio->sbi;
2795 fio->new_blkaddr = fio->old_blkaddr;
2796 /* i/o temperature is needed for passing down write hints */
2797 __get_segment_type(fio);
2799 f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi,
2800 GET_SEGNO(sbi, fio->new_blkaddr))->type));
2802 stat_inc_inplace_blocks(fio->sbi);
2804 err = f2fs_submit_page_bio(fio);
2806 update_device_state(fio);
2808 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
2813 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
2818 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
2819 if (CURSEG_I(sbi, i)->segno == segno)
2825 void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
2826 block_t old_blkaddr, block_t new_blkaddr,
2827 bool recover_curseg, bool recover_newaddr)
2829 struct sit_info *sit_i = SIT_I(sbi);
2830 struct curseg_info *curseg;
2831 unsigned int segno, old_cursegno;
2832 struct seg_entry *se;
2834 unsigned short old_blkoff;
2836 segno = GET_SEGNO(sbi, new_blkaddr);
2837 se = get_seg_entry(sbi, segno);
2840 down_write(&SM_I(sbi)->curseg_lock);
2842 if (!recover_curseg) {
2843 /* for recovery flow */
2844 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
2845 if (old_blkaddr == NULL_ADDR)
2846 type = CURSEG_COLD_DATA;
2848 type = CURSEG_WARM_DATA;
2851 if (IS_CURSEG(sbi, segno)) {
2852 /* se->type is volatile as SSR allocation */
2853 type = __f2fs_get_curseg(sbi, segno);
2854 f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
2856 type = CURSEG_WARM_DATA;
2860 f2fs_bug_on(sbi, !IS_DATASEG(type));
2861 curseg = CURSEG_I(sbi, type);
2863 mutex_lock(&curseg->curseg_mutex);
2864 down_write(&sit_i->sentry_lock);
2866 old_cursegno = curseg->segno;
2867 old_blkoff = curseg->next_blkoff;
2869 /* change the current segment */
2870 if (segno != curseg->segno) {
2871 curseg->next_segno = segno;
2872 change_curseg(sbi, type);
2875 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
2876 __add_sum_entry(sbi, type, sum);
2878 if (!recover_curseg || recover_newaddr)
2879 update_sit_entry(sbi, new_blkaddr, 1);
2880 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
2881 update_sit_entry(sbi, old_blkaddr, -1);
2883 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
2884 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
2886 locate_dirty_segment(sbi, old_cursegno);
2888 if (recover_curseg) {
2889 if (old_cursegno != curseg->segno) {
2890 curseg->next_segno = old_cursegno;
2891 change_curseg(sbi, type);
2893 curseg->next_blkoff = old_blkoff;
2896 up_write(&sit_i->sentry_lock);
2897 mutex_unlock(&curseg->curseg_mutex);
2898 up_write(&SM_I(sbi)->curseg_lock);
2901 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
2902 block_t old_addr, block_t new_addr,
2903 unsigned char version, bool recover_curseg,
2904 bool recover_newaddr)
2906 struct f2fs_summary sum;
2908 set_summary(&sum, dn->nid, dn->ofs_in_node, version);
2910 __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
2911 recover_curseg, recover_newaddr);
2913 f2fs_update_data_blkaddr(dn, new_addr);
2916 void f2fs_wait_on_page_writeback(struct page *page,
2917 enum page_type type, bool ordered)
2919 if (PageWriteback(page)) {
2920 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
2922 f2fs_submit_merged_write_cond(sbi, page->mapping->host,
2923 0, page->index, type);
2925 wait_on_page_writeback(page);
2927 wait_for_stable_page(page);
2931 void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
2935 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
2938 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
2940 f2fs_wait_on_page_writeback(cpage, DATA, true);
2941 f2fs_put_page(cpage, 1);
2945 static void read_compacted_summaries(struct f2fs_sb_info *sbi)
2947 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2948 struct curseg_info *seg_i;
2949 unsigned char *kaddr;
2954 start = start_sum_block(sbi);
2956 page = get_meta_page(sbi, start++);
2957 kaddr = (unsigned char *)page_address(page);
2959 /* Step 1: restore nat cache */
2960 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
2961 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
2963 /* Step 2: restore sit cache */
2964 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
2965 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
2966 offset = 2 * SUM_JOURNAL_SIZE;
2968 /* Step 3: restore summary entries */
2969 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2970 unsigned short blk_off;
2973 seg_i = CURSEG_I(sbi, i);
2974 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
2975 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
2976 seg_i->next_segno = segno;
2977 reset_curseg(sbi, i, 0);
2978 seg_i->alloc_type = ckpt->alloc_type[i];
2979 seg_i->next_blkoff = blk_off;
2981 if (seg_i->alloc_type == SSR)
2982 blk_off = sbi->blocks_per_seg;
2984 for (j = 0; j < blk_off; j++) {
2985 struct f2fs_summary *s;
2986 s = (struct f2fs_summary *)(kaddr + offset);
2987 seg_i->sum_blk->entries[j] = *s;
2988 offset += SUMMARY_SIZE;
2989 if (offset + SUMMARY_SIZE <= PAGE_SIZE -
2993 f2fs_put_page(page, 1);
2996 page = get_meta_page(sbi, start++);
2997 kaddr = (unsigned char *)page_address(page);
3001 f2fs_put_page(page, 1);
3004 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3006 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3007 struct f2fs_summary_block *sum;
3008 struct curseg_info *curseg;
3010 unsigned short blk_off;
3011 unsigned int segno = 0;
3012 block_t blk_addr = 0;
3014 /* get segment number and block addr */
3015 if (IS_DATASEG(type)) {
3016 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3017 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3019 if (__exist_node_summaries(sbi))
3020 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
3022 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3024 segno = le32_to_cpu(ckpt->cur_node_segno[type -
3026 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3028 if (__exist_node_summaries(sbi))
3029 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3030 type - CURSEG_HOT_NODE);
3032 blk_addr = GET_SUM_BLOCK(sbi, segno);
3035 new = get_meta_page(sbi, blk_addr);
3036 sum = (struct f2fs_summary_block *)page_address(new);
3038 if (IS_NODESEG(type)) {
3039 if (__exist_node_summaries(sbi)) {
3040 struct f2fs_summary *ns = &sum->entries[0];
3042 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3044 ns->ofs_in_node = 0;
3047 restore_node_summary(sbi, segno, sum);
3051 /* set uncompleted segment to curseg */
3052 curseg = CURSEG_I(sbi, type);
3053 mutex_lock(&curseg->curseg_mutex);
3055 /* update journal info */
3056 down_write(&curseg->journal_rwsem);
3057 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3058 up_write(&curseg->journal_rwsem);
3060 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3061 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
3062 curseg->next_segno = segno;
3063 reset_curseg(sbi, type, 0);
3064 curseg->alloc_type = ckpt->alloc_type[type];
3065 curseg->next_blkoff = blk_off;
3066 mutex_unlock(&curseg->curseg_mutex);
3067 f2fs_put_page(new, 1);
3071 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3073 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3074 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3075 int type = CURSEG_HOT_DATA;
3078 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3079 int npages = npages_for_summary_flush(sbi, true);
3082 ra_meta_pages(sbi, start_sum_block(sbi), npages,
3085 /* restore for compacted data summary */
3086 read_compacted_summaries(sbi);
3087 type = CURSEG_HOT_NODE;
3090 if (__exist_node_summaries(sbi))
3091 ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
3092 NR_CURSEG_TYPE - type, META_CP, true);
3094 for (; type <= CURSEG_COLD_NODE; type++) {
3095 err = read_normal_summaries(sbi, type);
3100 /* sanity check for summary blocks */
3101 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
3102 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
3108 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
3111 unsigned char *kaddr;
3112 struct f2fs_summary *summary;
3113 struct curseg_info *seg_i;
3114 int written_size = 0;
3117 page = grab_meta_page(sbi, blkaddr++);
3118 kaddr = (unsigned char *)page_address(page);
3120 /* Step 1: write nat cache */
3121 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3122 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
3123 written_size += SUM_JOURNAL_SIZE;
3125 /* Step 2: write sit cache */
3126 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3127 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
3128 written_size += SUM_JOURNAL_SIZE;
3130 /* Step 3: write summary entries */
3131 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3132 unsigned short blkoff;
3133 seg_i = CURSEG_I(sbi, i);
3134 if (sbi->ckpt->alloc_type[i] == SSR)
3135 blkoff = sbi->blocks_per_seg;
3137 blkoff = curseg_blkoff(sbi, i);
3139 for (j = 0; j < blkoff; j++) {
3141 page = grab_meta_page(sbi, blkaddr++);
3142 kaddr = (unsigned char *)page_address(page);
3145 summary = (struct f2fs_summary *)(kaddr + written_size);
3146 *summary = seg_i->sum_blk->entries[j];
3147 written_size += SUMMARY_SIZE;
3149 if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
3153 set_page_dirty(page);
3154 f2fs_put_page(page, 1);
3159 set_page_dirty(page);
3160 f2fs_put_page(page, 1);
3164 static void write_normal_summaries(struct f2fs_sb_info *sbi,
3165 block_t blkaddr, int type)
3168 if (IS_DATASEG(type))
3169 end = type + NR_CURSEG_DATA_TYPE;
3171 end = type + NR_CURSEG_NODE_TYPE;
3173 for (i = type; i < end; i++)
3174 write_current_sum_page(sbi, i, blkaddr + (i - type));
3177 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3179 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
3180 write_compacted_summaries(sbi, start_blk);
3182 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
3185 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3187 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
3190 int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3191 unsigned int val, int alloc)
3195 if (type == NAT_JOURNAL) {
3196 for (i = 0; i < nats_in_cursum(journal); i++) {
3197 if (le32_to_cpu(nid_in_journal(journal, i)) == val)
3200 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
3201 return update_nats_in_cursum(journal, 1);
3202 } else if (type == SIT_JOURNAL) {
3203 for (i = 0; i < sits_in_cursum(journal); i++)
3204 if (le32_to_cpu(segno_in_journal(journal, i)) == val)
3206 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
3207 return update_sits_in_cursum(journal, 1);
3212 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
3215 return get_meta_page(sbi, current_sit_addr(sbi, segno));
3218 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
3221 struct sit_info *sit_i = SIT_I(sbi);
3223 pgoff_t src_off, dst_off;
3225 src_off = current_sit_addr(sbi, start);
3226 dst_off = next_sit_addr(sbi, src_off);
3228 page = grab_meta_page(sbi, dst_off);
3229 seg_info_to_sit_page(sbi, page, start);
3231 set_page_dirty(page);
3232 set_to_next_sit(sit_i, start);
3237 static struct sit_entry_set *grab_sit_entry_set(void)
3239 struct sit_entry_set *ses =
3240 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
3243 INIT_LIST_HEAD(&ses->set_list);
3247 static void release_sit_entry_set(struct sit_entry_set *ses)
3249 list_del(&ses->set_list);
3250 kmem_cache_free(sit_entry_set_slab, ses);
3253 static void adjust_sit_entry_set(struct sit_entry_set *ses,
3254 struct list_head *head)
3256 struct sit_entry_set *next = ses;
3258 if (list_is_last(&ses->set_list, head))
3261 list_for_each_entry_continue(next, head, set_list)
3262 if (ses->entry_cnt <= next->entry_cnt)
3265 list_move_tail(&ses->set_list, &next->set_list);
3268 static void add_sit_entry(unsigned int segno, struct list_head *head)
3270 struct sit_entry_set *ses;
3271 unsigned int start_segno = START_SEGNO(segno);
3273 list_for_each_entry(ses, head, set_list) {
3274 if (ses->start_segno == start_segno) {
3276 adjust_sit_entry_set(ses, head);
3281 ses = grab_sit_entry_set();
3283 ses->start_segno = start_segno;
3285 list_add(&ses->set_list, head);
3288 static void add_sits_in_set(struct f2fs_sb_info *sbi)
3290 struct f2fs_sm_info *sm_info = SM_I(sbi);
3291 struct list_head *set_list = &sm_info->sit_entry_set;
3292 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
3295 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
3296 add_sit_entry(segno, set_list);
3299 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
3301 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3302 struct f2fs_journal *journal = curseg->journal;
3305 down_write(&curseg->journal_rwsem);
3306 for (i = 0; i < sits_in_cursum(journal); i++) {
3310 segno = le32_to_cpu(segno_in_journal(journal, i));
3311 dirtied = __mark_sit_entry_dirty(sbi, segno);
3314 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
3316 update_sits_in_cursum(journal, -i);
3317 up_write(&curseg->journal_rwsem);
3321 * CP calls this function, which flushes SIT entries including sit_journal,
3322 * and moves prefree segs to free segs.
3324 void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3326 struct sit_info *sit_i = SIT_I(sbi);
3327 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
3328 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3329 struct f2fs_journal *journal = curseg->journal;
3330 struct sit_entry_set *ses, *tmp;
3331 struct list_head *head = &SM_I(sbi)->sit_entry_set;
3332 bool to_journal = true;
3333 struct seg_entry *se;
3335 down_write(&sit_i->sentry_lock);
3337 if (!sit_i->dirty_sentries)
3341 * add and account sit entries of dirty bitmap in sit entry
3344 add_sits_in_set(sbi);
3347 * if there are no enough space in journal to store dirty sit
3348 * entries, remove all entries from journal and add and account
3349 * them in sit entry set.
3351 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
3352 remove_sits_in_journal(sbi);
3355 * there are two steps to flush sit entries:
3356 * #1, flush sit entries to journal in current cold data summary block.
3357 * #2, flush sit entries to sit page.
3359 list_for_each_entry_safe(ses, tmp, head, set_list) {
3360 struct page *page = NULL;
3361 struct f2fs_sit_block *raw_sit = NULL;
3362 unsigned int start_segno = ses->start_segno;
3363 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
3364 (unsigned long)MAIN_SEGS(sbi));
3365 unsigned int segno = start_segno;
3368 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
3372 down_write(&curseg->journal_rwsem);
3374 page = get_next_sit_page(sbi, start_segno);
3375 raw_sit = page_address(page);
3378 /* flush dirty sit entries in region of current sit set */
3379 for_each_set_bit_from(segno, bitmap, end) {
3380 int offset, sit_offset;
3382 se = get_seg_entry(sbi, segno);
3384 /* add discard candidates */
3385 if (!(cpc->reason & CP_DISCARD)) {
3386 cpc->trim_start = segno;
3387 add_discard_addrs(sbi, cpc, false);
3391 offset = lookup_journal_in_cursum(journal,
3392 SIT_JOURNAL, segno, 1);
3393 f2fs_bug_on(sbi, offset < 0);
3394 segno_in_journal(journal, offset) =
3396 seg_info_to_raw_sit(se,
3397 &sit_in_journal(journal, offset));
3399 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
3400 seg_info_to_raw_sit(se,
3401 &raw_sit->entries[sit_offset]);
3404 __clear_bit(segno, bitmap);
3405 sit_i->dirty_sentries--;
3410 up_write(&curseg->journal_rwsem);
3412 f2fs_put_page(page, 1);
3414 f2fs_bug_on(sbi, ses->entry_cnt);
3415 release_sit_entry_set(ses);
3418 f2fs_bug_on(sbi, !list_empty(head));
3419 f2fs_bug_on(sbi, sit_i->dirty_sentries);
3421 if (cpc->reason & CP_DISCARD) {
3422 __u64 trim_start = cpc->trim_start;
3424 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
3425 add_discard_addrs(sbi, cpc, false);
3427 cpc->trim_start = trim_start;
3429 up_write(&sit_i->sentry_lock);
3431 set_prefree_as_free_segments(sbi);
3434 static int build_sit_info(struct f2fs_sb_info *sbi)
3436 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3437 struct sit_info *sit_i;
3438 unsigned int sit_segs, start;
3440 unsigned int bitmap_size;
3442 /* allocate memory for SIT information */
3443 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
3447 SM_I(sbi)->sit_info = sit_i;
3449 sit_i->sentries = f2fs_kvzalloc(sbi, MAIN_SEGS(sbi) *
3450 sizeof(struct seg_entry), GFP_KERNEL);
3451 if (!sit_i->sentries)
3454 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3455 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, bitmap_size,
3457 if (!sit_i->dirty_sentries_bitmap)
3460 for (start = 0; start < MAIN_SEGS(sbi); start++) {
3461 sit_i->sentries[start].cur_valid_map
3462 = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3463 sit_i->sentries[start].ckpt_valid_map
3464 = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3465 if (!sit_i->sentries[start].cur_valid_map ||
3466 !sit_i->sentries[start].ckpt_valid_map)
3469 #ifdef CONFIG_F2FS_CHECK_FS
3470 sit_i->sentries[start].cur_valid_map_mir
3471 = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3472 if (!sit_i->sentries[start].cur_valid_map_mir)
3476 if (f2fs_discard_en(sbi)) {
3477 sit_i->sentries[start].discard_map
3478 = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
3480 if (!sit_i->sentries[start].discard_map)
3485 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
3486 if (!sit_i->tmp_map)
3489 if (sbi->segs_per_sec > 1) {
3490 sit_i->sec_entries = f2fs_kvzalloc(sbi, MAIN_SECS(sbi) *
3491 sizeof(struct sec_entry), GFP_KERNEL);
3492 if (!sit_i->sec_entries)
3496 /* get information related with SIT */
3497 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
3499 /* setup SIT bitmap from ckeckpoint pack */
3500 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
3501 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
3503 sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3504 if (!sit_i->sit_bitmap)
3507 #ifdef CONFIG_F2FS_CHECK_FS
3508 sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
3509 if (!sit_i->sit_bitmap_mir)
3513 /* init SIT information */
3514 sit_i->s_ops = &default_salloc_ops;
3516 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
3517 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
3518 sit_i->written_valid_blocks = 0;
3519 sit_i->bitmap_size = bitmap_size;
3520 sit_i->dirty_sentries = 0;
3521 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
3522 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
3523 sit_i->mounted_time = ktime_get_real_seconds();
3524 init_rwsem(&sit_i->sentry_lock);
3528 static int build_free_segmap(struct f2fs_sb_info *sbi)
3530 struct free_segmap_info *free_i;
3531 unsigned int bitmap_size, sec_bitmap_size;
3533 /* allocate memory for free segmap information */
3534 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
3538 SM_I(sbi)->free_info = free_i;
3540 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3541 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
3542 if (!free_i->free_segmap)
3545 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
3546 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
3547 if (!free_i->free_secmap)
3550 /* set all segments as dirty temporarily */
3551 memset(free_i->free_segmap, 0xff, bitmap_size);
3552 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
3554 /* init free segmap information */
3555 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
3556 free_i->free_segments = 0;
3557 free_i->free_sections = 0;
3558 spin_lock_init(&free_i->segmap_lock);
3562 static int build_curseg(struct f2fs_sb_info *sbi)
3564 struct curseg_info *array;
3567 array = f2fs_kzalloc(sbi, sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
3571 SM_I(sbi)->curseg_array = array;
3573 for (i = 0; i < NR_CURSEG_TYPE; i++) {
3574 mutex_init(&array[i].curseg_mutex);
3575 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
3576 if (!array[i].sum_blk)
3578 init_rwsem(&array[i].journal_rwsem);
3579 array[i].journal = f2fs_kzalloc(sbi,
3580 sizeof(struct f2fs_journal), GFP_KERNEL);
3581 if (!array[i].journal)
3583 array[i].segno = NULL_SEGNO;
3584 array[i].next_blkoff = 0;
3586 return restore_curseg_summaries(sbi);
3589 static int build_sit_entries(struct f2fs_sb_info *sbi)
3591 struct sit_info *sit_i = SIT_I(sbi);
3592 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
3593 struct f2fs_journal *journal = curseg->journal;
3594 struct seg_entry *se;
3595 struct f2fs_sit_entry sit;
3596 int sit_blk_cnt = SIT_BLK_CNT(sbi);
3597 unsigned int i, start, end;
3598 unsigned int readed, start_blk = 0;
3602 readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
3605 start = start_blk * sit_i->sents_per_block;
3606 end = (start_blk + readed) * sit_i->sents_per_block;
3608 for (; start < end && start < MAIN_SEGS(sbi); start++) {
3609 struct f2fs_sit_block *sit_blk;
3612 se = &sit_i->sentries[start];
3613 page = get_current_sit_page(sbi, start);
3614 sit_blk = (struct f2fs_sit_block *)page_address(page);
3615 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
3616 f2fs_put_page(page, 1);
3618 err = check_block_count(sbi, start, &sit);
3621 seg_info_from_raw_sit(se, &sit);
3623 /* build discard map only one time */
3624 if (f2fs_discard_en(sbi)) {
3625 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3626 memset(se->discard_map, 0xff,
3627 SIT_VBLOCK_MAP_SIZE);
3629 memcpy(se->discard_map,
3631 SIT_VBLOCK_MAP_SIZE);
3632 sbi->discard_blks +=
3633 sbi->blocks_per_seg -
3638 if (sbi->segs_per_sec > 1)
3639 get_sec_entry(sbi, start)->valid_blocks +=
3642 start_blk += readed;
3643 } while (start_blk < sit_blk_cnt);
3645 down_read(&curseg->journal_rwsem);
3646 for (i = 0; i < sits_in_cursum(journal); i++) {
3647 unsigned int old_valid_blocks;
3649 start = le32_to_cpu(segno_in_journal(journal, i));
3650 se = &sit_i->sentries[start];
3651 sit = sit_in_journal(journal, i);
3653 old_valid_blocks = se->valid_blocks;
3655 err = check_block_count(sbi, start, &sit);
3658 seg_info_from_raw_sit(se, &sit);
3660 if (f2fs_discard_en(sbi)) {
3661 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
3662 memset(se->discard_map, 0xff,
3663 SIT_VBLOCK_MAP_SIZE);
3665 memcpy(se->discard_map, se->cur_valid_map,
3666 SIT_VBLOCK_MAP_SIZE);
3667 sbi->discard_blks += old_valid_blocks -
3672 if (sbi->segs_per_sec > 1)
3673 get_sec_entry(sbi, start)->valid_blocks +=
3674 se->valid_blocks - old_valid_blocks;
3676 up_read(&curseg->journal_rwsem);
3680 static void init_free_segmap(struct f2fs_sb_info *sbi)
3685 for (start = 0; start < MAIN_SEGS(sbi); start++) {
3686 struct seg_entry *sentry = get_seg_entry(sbi, start);
3687 if (!sentry->valid_blocks)
3688 __set_free(sbi, start);
3690 SIT_I(sbi)->written_valid_blocks +=
3691 sentry->valid_blocks;
3694 /* set use the current segments */
3695 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
3696 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
3697 __set_test_and_inuse(sbi, curseg_t->segno);
3701 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
3703 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3704 struct free_segmap_info *free_i = FREE_I(sbi);
3705 unsigned int segno = 0, offset = 0;
3706 unsigned short valid_blocks;
3709 /* find dirty segment based on free segmap */
3710 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
3711 if (segno >= MAIN_SEGS(sbi))
3714 valid_blocks = get_valid_blocks(sbi, segno, false);
3715 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
3717 if (valid_blocks > sbi->blocks_per_seg) {
3718 f2fs_bug_on(sbi, 1);
3721 mutex_lock(&dirty_i->seglist_lock);
3722 __locate_dirty_segment(sbi, segno, DIRTY);
3723 mutex_unlock(&dirty_i->seglist_lock);
3727 static int init_victim_secmap(struct f2fs_sb_info *sbi)
3729 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3730 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
3732 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
3733 if (!dirty_i->victim_secmap)
3738 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
3740 struct dirty_seglist_info *dirty_i;
3741 unsigned int bitmap_size, i;
3743 /* allocate memory for dirty segments list information */
3744 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
3749 SM_I(sbi)->dirty_info = dirty_i;
3750 mutex_init(&dirty_i->seglist_lock);
3752 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
3754 for (i = 0; i < NR_DIRTY_TYPE; i++) {
3755 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
3757 if (!dirty_i->dirty_segmap[i])
3761 init_dirty_segmap(sbi);
3762 return init_victim_secmap(sbi);
3766 * Update min, max modified time for cost-benefit GC algorithm
3768 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
3770 struct sit_info *sit_i = SIT_I(sbi);
3773 down_write(&sit_i->sentry_lock);
3775 sit_i->min_mtime = LLONG_MAX;
3777 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
3779 unsigned long long mtime = 0;
3781 for (i = 0; i < sbi->segs_per_sec; i++)
3782 mtime += get_seg_entry(sbi, segno + i)->mtime;
3784 mtime = div_u64(mtime, sbi->segs_per_sec);
3786 if (sit_i->min_mtime > mtime)
3787 sit_i->min_mtime = mtime;
3789 sit_i->max_mtime = get_mtime(sbi);
3790 up_write(&sit_i->sentry_lock);
3793 int build_segment_manager(struct f2fs_sb_info *sbi)
3795 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3796 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3797 struct f2fs_sm_info *sm_info;
3800 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
3805 sbi->sm_info = sm_info;
3806 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3807 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3808 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
3809 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3810 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3811 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
3812 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3813 sm_info->rec_prefree_segments = sm_info->main_segments *
3814 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
3815 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
3816 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
3818 if (!test_opt(sbi, LFS))
3819 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
3820 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
3821 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
3822 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
3823 sm_info->min_ssr_sections = reserved_sections(sbi);
3825 sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
3827 INIT_LIST_HEAD(&sm_info->sit_entry_set);
3829 init_rwsem(&sm_info->curseg_lock);
3831 if (!f2fs_readonly(sbi->sb)) {
3832 err = create_flush_cmd_control(sbi);
3837 err = create_discard_cmd_control(sbi);
3841 err = build_sit_info(sbi);
3844 err = build_free_segmap(sbi);
3847 err = build_curseg(sbi);
3851 /* reinit free segmap based on SIT */
3852 err = build_sit_entries(sbi);
3856 init_free_segmap(sbi);
3857 err = build_dirty_segmap(sbi);
3861 init_min_max_mtime(sbi);
3865 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
3866 enum dirty_type dirty_type)
3868 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3870 mutex_lock(&dirty_i->seglist_lock);
3871 kvfree(dirty_i->dirty_segmap[dirty_type]);
3872 dirty_i->nr_dirty[dirty_type] = 0;
3873 mutex_unlock(&dirty_i->seglist_lock);
3876 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
3878 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3879 kvfree(dirty_i->victim_secmap);
3882 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
3884 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
3890 /* discard pre-free/dirty segments list */
3891 for (i = 0; i < NR_DIRTY_TYPE; i++)
3892 discard_dirty_segmap(sbi, i);
3894 destroy_victim_secmap(sbi);
3895 SM_I(sbi)->dirty_info = NULL;
3899 static void destroy_curseg(struct f2fs_sb_info *sbi)
3901 struct curseg_info *array = SM_I(sbi)->curseg_array;
3906 SM_I(sbi)->curseg_array = NULL;
3907 for (i = 0; i < NR_CURSEG_TYPE; i++) {
3908 kfree(array[i].sum_blk);
3909 kfree(array[i].journal);
3914 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
3916 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
3919 SM_I(sbi)->free_info = NULL;
3920 kvfree(free_i->free_segmap);
3921 kvfree(free_i->free_secmap);
3925 static void destroy_sit_info(struct f2fs_sb_info *sbi)
3927 struct sit_info *sit_i = SIT_I(sbi);
3933 if (sit_i->sentries) {
3934 for (start = 0; start < MAIN_SEGS(sbi); start++) {
3935 kfree(sit_i->sentries[start].cur_valid_map);
3936 #ifdef CONFIG_F2FS_CHECK_FS
3937 kfree(sit_i->sentries[start].cur_valid_map_mir);
3939 kfree(sit_i->sentries[start].ckpt_valid_map);
3940 kfree(sit_i->sentries[start].discard_map);
3943 kfree(sit_i->tmp_map);
3945 kvfree(sit_i->sentries);
3946 kvfree(sit_i->sec_entries);
3947 kvfree(sit_i->dirty_sentries_bitmap);
3949 SM_I(sbi)->sit_info = NULL;
3950 kfree(sit_i->sit_bitmap);
3951 #ifdef CONFIG_F2FS_CHECK_FS
3952 kfree(sit_i->sit_bitmap_mir);
3957 void destroy_segment_manager(struct f2fs_sb_info *sbi)
3959 struct f2fs_sm_info *sm_info = SM_I(sbi);
3963 destroy_flush_cmd_control(sbi, true);
3964 destroy_discard_cmd_control(sbi);
3965 destroy_dirty_segmap(sbi);
3966 destroy_curseg(sbi);
3967 destroy_free_segmap(sbi);
3968 destroy_sit_info(sbi);
3969 sbi->sm_info = NULL;
3973 int __init create_segment_manager_caches(void)
3975 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
3976 sizeof(struct discard_entry));
3977 if (!discard_entry_slab)
3980 discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
3981 sizeof(struct discard_cmd));
3982 if (!discard_cmd_slab)
3983 goto destroy_discard_entry;
3985 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
3986 sizeof(struct sit_entry_set));
3987 if (!sit_entry_set_slab)
3988 goto destroy_discard_cmd;
3990 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
3991 sizeof(struct inmem_pages));
3992 if (!inmem_entry_slab)
3993 goto destroy_sit_entry_set;
3996 destroy_sit_entry_set:
3997 kmem_cache_destroy(sit_entry_set_slab);
3998 destroy_discard_cmd:
3999 kmem_cache_destroy(discard_cmd_slab);
4000 destroy_discard_entry:
4001 kmem_cache_destroy(discard_entry_slab);
4006 void destroy_segment_manager_caches(void)
4008 kmem_cache_destroy(sit_entry_set_slab);
4009 kmem_cache_destroy(discard_cmd_slab);
4010 kmem_cache_destroy(discard_entry_slab);
4011 kmem_cache_destroy(inmem_entry_slab);