1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs extent cache support
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
12 #include <linux/f2fs_fs.h>
16 #include <trace/events/f2fs.h>
18 static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
22 if (cached_re->ofs <= ofs &&
23 cached_re->ofs + cached_re->len > ofs) {
30 static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
33 struct rb_node *node = root->rb_node;
37 re = rb_entry(node, struct rb_entry, rb_node);
41 else if (ofs >= re->ofs + re->len)
42 node = node->rb_right;
49 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
50 struct rb_entry *cached_re, unsigned int ofs)
54 re = __lookup_rb_tree_fast(cached_re, ofs);
56 return __lookup_rb_tree_slow(root, ofs);
61 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
62 struct rb_root *root, struct rb_node **parent,
65 struct rb_node **p = &root->rb_node;
70 re = rb_entry(*parent, struct rb_entry, rb_node);
74 else if (ofs >= re->ofs + re->len)
84 * lookup rb entry in position of @ofs in rb-tree,
85 * if hit, return the entry, otherwise, return NULL
86 * @prev_ex: extent before ofs
87 * @next_ex: extent after ofs
88 * @insert_p: insert point for new extent at ofs
89 * in order to simpfy the insertion after.
90 * tree must stay unchanged between lookup and insertion.
92 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
93 struct rb_entry *cached_re,
95 struct rb_entry **prev_entry,
96 struct rb_entry **next_entry,
97 struct rb_node ***insert_p,
98 struct rb_node **insert_parent,
101 struct rb_node **pnode = &root->rb_node;
102 struct rb_node *parent = NULL, *tmp_node;
103 struct rb_entry *re = cached_re;
106 *insert_parent = NULL;
110 if (RB_EMPTY_ROOT(root))
114 if (re->ofs <= ofs && re->ofs + re->len > ofs)
115 goto lookup_neighbors;
120 re = rb_entry(*pnode, struct rb_entry, rb_node);
123 pnode = &(*pnode)->rb_left;
124 else if (ofs >= re->ofs + re->len)
125 pnode = &(*pnode)->rb_right;
127 goto lookup_neighbors;
131 *insert_parent = parent;
133 re = rb_entry(parent, struct rb_entry, rb_node);
135 if (parent && ofs > re->ofs)
136 tmp_node = rb_next(parent);
137 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
140 if (parent && ofs < re->ofs)
141 tmp_node = rb_prev(parent);
142 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
146 if (ofs == re->ofs || force) {
147 /* lookup prev node for merging backward later */
148 tmp_node = rb_prev(&re->rb_node);
149 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
151 if (ofs == re->ofs + re->len - 1 || force) {
152 /* lookup next node for merging frontward later */
153 tmp_node = rb_next(&re->rb_node);
154 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
159 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
160 struct rb_root *root)
162 #ifdef CONFIG_F2FS_CHECK_FS
163 struct rb_node *cur = rb_first(root), *next;
164 struct rb_entry *cur_re, *next_re;
174 cur_re = rb_entry(cur, struct rb_entry, rb_node);
175 next_re = rb_entry(next, struct rb_entry, rb_node);
177 if (cur_re->ofs + cur_re->len > next_re->ofs) {
178 f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, "
179 "cur(%u, %u) next(%u, %u)",
180 cur_re->ofs, cur_re->len,
181 next_re->ofs, next_re->len);
191 static struct kmem_cache *extent_tree_slab;
192 static struct kmem_cache *extent_node_slab;
194 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
195 struct extent_tree *et, struct extent_info *ei,
196 struct rb_node *parent, struct rb_node **p)
198 struct extent_node *en;
200 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
205 INIT_LIST_HEAD(&en->list);
208 rb_link_node(&en->rb_node, parent, p);
209 rb_insert_color(&en->rb_node, &et->root);
210 atomic_inc(&et->node_cnt);
211 atomic_inc(&sbi->total_ext_node);
215 static void __detach_extent_node(struct f2fs_sb_info *sbi,
216 struct extent_tree *et, struct extent_node *en)
218 rb_erase(&en->rb_node, &et->root);
219 atomic_dec(&et->node_cnt);
220 atomic_dec(&sbi->total_ext_node);
222 if (et->cached_en == en)
223 et->cached_en = NULL;
224 kmem_cache_free(extent_node_slab, en);
228 * Flow to release an extent_node:
230 * 2. __detach_extent_node
231 * 3. kmem_cache_free.
233 static void __release_extent_node(struct f2fs_sb_info *sbi,
234 struct extent_tree *et, struct extent_node *en)
236 spin_lock(&sbi->extent_lock);
237 f2fs_bug_on(sbi, list_empty(&en->list));
238 list_del_init(&en->list);
239 spin_unlock(&sbi->extent_lock);
241 __detach_extent_node(sbi, et, en);
244 static struct extent_tree *__grab_extent_tree(struct inode *inode)
246 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
247 struct extent_tree *et;
248 nid_t ino = inode->i_ino;
250 mutex_lock(&sbi->extent_tree_lock);
251 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
253 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
254 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
255 memset(et, 0, sizeof(struct extent_tree));
258 et->cached_en = NULL;
259 rwlock_init(&et->lock);
260 INIT_LIST_HEAD(&et->list);
261 atomic_set(&et->node_cnt, 0);
262 atomic_inc(&sbi->total_ext_tree);
264 atomic_dec(&sbi->total_zombie_tree);
265 list_del_init(&et->list);
267 mutex_unlock(&sbi->extent_tree_lock);
269 /* never died until evict_inode */
270 F2FS_I(inode)->extent_tree = et;
275 static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
276 struct extent_tree *et, struct extent_info *ei)
278 struct rb_node **p = &et->root.rb_node;
279 struct extent_node *en;
281 en = __attach_extent_node(sbi, et, ei, NULL, p);
285 et->largest = en->ei;
290 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
291 struct extent_tree *et)
293 struct rb_node *node, *next;
294 struct extent_node *en;
295 unsigned int count = atomic_read(&et->node_cnt);
297 node = rb_first(&et->root);
299 next = rb_next(node);
300 en = rb_entry(node, struct extent_node, rb_node);
301 __release_extent_node(sbi, et, en);
305 return count - atomic_read(&et->node_cnt);
308 static void __drop_largest_extent(struct extent_tree *et,
309 pgoff_t fofs, unsigned int len)
311 if (fofs < et->largest.fofs + et->largest.len &&
312 fofs + len > et->largest.fofs) {
314 et->largest_updated = true;
318 /* return true, if inode page is changed */
319 static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
321 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
322 struct extent_tree *et;
323 struct extent_node *en;
324 struct extent_info ei;
326 if (!f2fs_may_extent_tree(inode)) {
327 /* drop largest extent */
328 if (i_ext && i_ext->len) {
335 et = __grab_extent_tree(inode);
337 if (!i_ext || !i_ext->len)
340 get_extent_info(&ei, i_ext);
342 write_lock(&et->lock);
343 if (atomic_read(&et->node_cnt))
346 en = __init_extent_tree(sbi, et, &ei);
348 spin_lock(&sbi->extent_lock);
349 list_add_tail(&en->list, &sbi->extent_list);
350 spin_unlock(&sbi->extent_lock);
353 write_unlock(&et->lock);
357 bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
359 bool ret = __f2fs_init_extent_tree(inode, i_ext);
361 if (!F2FS_I(inode)->extent_tree)
362 set_inode_flag(inode, FI_NO_EXTENT);
367 static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
368 struct extent_info *ei)
370 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
371 struct extent_tree *et = F2FS_I(inode)->extent_tree;
372 struct extent_node *en;
375 f2fs_bug_on(sbi, !et);
377 trace_f2fs_lookup_extent_tree_start(inode, pgofs);
379 read_lock(&et->lock);
381 if (et->largest.fofs <= pgofs &&
382 et->largest.fofs + et->largest.len > pgofs) {
385 stat_inc_largest_node_hit(sbi);
389 en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
390 (struct rb_entry *)et->cached_en, pgofs);
394 if (en == et->cached_en)
395 stat_inc_cached_node_hit(sbi);
397 stat_inc_rbtree_node_hit(sbi);
400 spin_lock(&sbi->extent_lock);
401 if (!list_empty(&en->list)) {
402 list_move_tail(&en->list, &sbi->extent_list);
405 spin_unlock(&sbi->extent_lock);
408 stat_inc_total_hit(sbi);
409 read_unlock(&et->lock);
411 trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
415 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
416 struct extent_tree *et, struct extent_info *ei,
417 struct extent_node *prev_ex,
418 struct extent_node *next_ex)
420 struct extent_node *en = NULL;
422 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
423 prev_ex->ei.len += ei->len;
428 if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
429 next_ex->ei.fofs = ei->fofs;
430 next_ex->ei.blk = ei->blk;
431 next_ex->ei.len += ei->len;
433 __release_extent_node(sbi, et, prev_ex);
441 __try_update_largest_extent(et, en);
443 spin_lock(&sbi->extent_lock);
444 if (!list_empty(&en->list)) {
445 list_move_tail(&en->list, &sbi->extent_list);
448 spin_unlock(&sbi->extent_lock);
452 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
453 struct extent_tree *et, struct extent_info *ei,
454 struct rb_node **insert_p,
455 struct rb_node *insert_parent)
458 struct rb_node *parent = NULL;
459 struct extent_node *en = NULL;
461 if (insert_p && insert_parent) {
462 parent = insert_parent;
467 p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
469 en = __attach_extent_node(sbi, et, ei, parent, p);
473 __try_update_largest_extent(et, en);
475 /* update in global extent list */
476 spin_lock(&sbi->extent_lock);
477 list_add_tail(&en->list, &sbi->extent_list);
479 spin_unlock(&sbi->extent_lock);
483 static void f2fs_update_extent_tree_range(struct inode *inode,
484 pgoff_t fofs, block_t blkaddr, unsigned int len)
486 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
487 struct extent_tree *et = F2FS_I(inode)->extent_tree;
488 struct extent_node *en = NULL, *en1 = NULL;
489 struct extent_node *prev_en = NULL, *next_en = NULL;
490 struct extent_info ei, dei, prev;
491 struct rb_node **insert_p = NULL, *insert_parent = NULL;
492 unsigned int end = fofs + len;
493 unsigned int pos = (unsigned int)fofs;
494 bool updated = false;
499 trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
501 write_lock(&et->lock);
503 if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
504 write_unlock(&et->lock);
512 * drop largest extent before lookup, in case it's already
513 * been shrunk from extent tree
515 __drop_largest_extent(et, fofs, len);
517 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
518 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
519 (struct rb_entry *)et->cached_en, fofs,
520 (struct rb_entry **)&prev_en,
521 (struct rb_entry **)&next_en,
522 &insert_p, &insert_parent, false);
526 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
527 while (en && en->ei.fofs < end) {
528 unsigned int org_end;
529 int parts = 0; /* # of parts current extent split into */
531 next_en = en1 = NULL;
534 org_end = dei.fofs + dei.len;
535 f2fs_bug_on(sbi, pos >= org_end);
537 if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
538 en->ei.len = pos - en->ei.fofs;
543 if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
545 set_extent_info(&ei, end,
546 end - dei.fofs + dei.blk,
548 en1 = __insert_extent_tree(sbi, et, &ei,
553 en->ei.blk += end - dei.fofs;
554 en->ei.len -= end - dei.fofs;
561 struct rb_node *node = rb_next(&en->rb_node);
563 next_en = rb_entry_safe(node, struct extent_node,
568 __try_update_largest_extent(et, en);
570 __release_extent_node(sbi, et, en);
573 * if original extent is split into zero or two parts, extent
574 * tree has been altered by deletion or insertion, therefore
575 * invalidate pointers regard to tree.
579 insert_parent = NULL;
584 /* 3. update extent in extent cache */
587 set_extent_info(&ei, fofs, blkaddr, len);
588 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
589 __insert_extent_tree(sbi, et, &ei,
590 insert_p, insert_parent);
592 /* give up extent_cache, if split and small updates happen */
594 prev.len < F2FS_MIN_EXTENT_LEN &&
595 et->largest.len < F2FS_MIN_EXTENT_LEN) {
597 et->largest_updated = true;
598 set_inode_flag(inode, FI_NO_EXTENT);
602 if (is_inode_flag_set(inode, FI_NO_EXTENT))
603 __free_extent_tree(sbi, et);
605 if (et->largest_updated) {
606 et->largest_updated = false;
610 write_unlock(&et->lock);
613 f2fs_mark_inode_dirty_sync(inode, true);
616 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
618 struct extent_tree *et, *next;
619 struct extent_node *en;
620 unsigned int node_cnt = 0, tree_cnt = 0;
623 if (!test_opt(sbi, EXTENT_CACHE))
626 if (!atomic_read(&sbi->total_zombie_tree))
629 if (!mutex_trylock(&sbi->extent_tree_lock))
632 /* 1. remove unreferenced extent tree */
633 list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
634 if (atomic_read(&et->node_cnt)) {
635 write_lock(&et->lock);
636 node_cnt += __free_extent_tree(sbi, et);
637 write_unlock(&et->lock);
639 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
640 list_del_init(&et->list);
641 radix_tree_delete(&sbi->extent_tree_root, et->ino);
642 kmem_cache_free(extent_tree_slab, et);
643 atomic_dec(&sbi->total_ext_tree);
644 atomic_dec(&sbi->total_zombie_tree);
647 if (node_cnt + tree_cnt >= nr_shrink)
651 mutex_unlock(&sbi->extent_tree_lock);
654 /* 2. remove LRU extent entries */
655 if (!mutex_trylock(&sbi->extent_tree_lock))
658 remained = nr_shrink - (node_cnt + tree_cnt);
660 spin_lock(&sbi->extent_lock);
661 for (; remained > 0; remained--) {
662 if (list_empty(&sbi->extent_list))
664 en = list_first_entry(&sbi->extent_list,
665 struct extent_node, list);
667 if (!write_trylock(&et->lock)) {
668 /* refresh this extent node's position in extent list */
669 list_move_tail(&en->list, &sbi->extent_list);
673 list_del_init(&en->list);
674 spin_unlock(&sbi->extent_lock);
676 __detach_extent_node(sbi, et, en);
678 write_unlock(&et->lock);
680 spin_lock(&sbi->extent_lock);
682 spin_unlock(&sbi->extent_lock);
685 mutex_unlock(&sbi->extent_tree_lock);
687 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
689 return node_cnt + tree_cnt;
692 unsigned int f2fs_destroy_extent_node(struct inode *inode)
694 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
695 struct extent_tree *et = F2FS_I(inode)->extent_tree;
696 unsigned int node_cnt = 0;
698 if (!et || !atomic_read(&et->node_cnt))
701 write_lock(&et->lock);
702 node_cnt = __free_extent_tree(sbi, et);
703 write_unlock(&et->lock);
708 void f2fs_drop_extent_tree(struct inode *inode)
710 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
711 struct extent_tree *et = F2FS_I(inode)->extent_tree;
712 bool updated = false;
714 if (!f2fs_may_extent_tree(inode))
717 set_inode_flag(inode, FI_NO_EXTENT);
719 write_lock(&et->lock);
720 __free_extent_tree(sbi, et);
721 if (et->largest.len) {
725 write_unlock(&et->lock);
727 f2fs_mark_inode_dirty_sync(inode, true);
730 void f2fs_destroy_extent_tree(struct inode *inode)
732 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
733 struct extent_tree *et = F2FS_I(inode)->extent_tree;
734 unsigned int node_cnt = 0;
739 if (inode->i_nlink && !is_bad_inode(inode) &&
740 atomic_read(&et->node_cnt)) {
741 mutex_lock(&sbi->extent_tree_lock);
742 list_add_tail(&et->list, &sbi->zombie_list);
743 atomic_inc(&sbi->total_zombie_tree);
744 mutex_unlock(&sbi->extent_tree_lock);
748 /* free all extent info belong to this extent tree */
749 node_cnt = f2fs_destroy_extent_node(inode);
751 /* delete extent tree entry in radix tree */
752 mutex_lock(&sbi->extent_tree_lock);
753 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
754 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
755 kmem_cache_free(extent_tree_slab, et);
756 atomic_dec(&sbi->total_ext_tree);
757 mutex_unlock(&sbi->extent_tree_lock);
759 F2FS_I(inode)->extent_tree = NULL;
761 trace_f2fs_destroy_extent_tree(inode, node_cnt);
764 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
765 struct extent_info *ei)
767 if (!f2fs_may_extent_tree(inode))
770 return f2fs_lookup_extent_tree(inode, pgofs, ei);
773 void f2fs_update_extent_cache(struct dnode_of_data *dn)
778 if (!f2fs_may_extent_tree(dn->inode))
781 if (dn->data_blkaddr == NEW_ADDR)
784 blkaddr = dn->data_blkaddr;
786 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
788 f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
791 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
792 pgoff_t fofs, block_t blkaddr, unsigned int len)
795 if (!f2fs_may_extent_tree(dn->inode))
798 f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
801 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
803 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
804 mutex_init(&sbi->extent_tree_lock);
805 INIT_LIST_HEAD(&sbi->extent_list);
806 spin_lock_init(&sbi->extent_lock);
807 atomic_set(&sbi->total_ext_tree, 0);
808 INIT_LIST_HEAD(&sbi->zombie_list);
809 atomic_set(&sbi->total_zombie_tree, 0);
810 atomic_set(&sbi->total_ext_node, 0);
813 int __init f2fs_create_extent_cache(void)
815 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
816 sizeof(struct extent_tree));
817 if (!extent_tree_slab)
819 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
820 sizeof(struct extent_node));
821 if (!extent_node_slab) {
822 kmem_cache_destroy(extent_tree_slab);
828 void f2fs_destroy_extent_cache(void)
830 kmem_cache_destroy(extent_node_slab);
831 kmem_cache_destroy(extent_tree_slab);