1 // SPDX-License-Identifier: GPL-2.0
3 * fs/ext4/extents_status.c
5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
7 * Allison Henderson <achender@linux.vnet.ibm.com>
8 * Hugh Dickins <hughd@google.com>
9 * Zheng Liu <wenqing.lz@taobao.com>
11 * Ext4 extents status tree core functions.
13 #include <linux/list_sort.h>
14 #include <linux/proc_fs.h>
15 #include <linux/seq_file.h>
18 #include <trace/events/ext4.h>
21 * According to previous discussion in Ext4 Developer Workshop, we
22 * will introduce a new structure called io tree to track all extent
23 * status in order to solve some problems that we have met
24 * (e.g. Reservation space warning), and provide extent-level locking.
25 * Delay extent tree is the first step to achieve this goal. It is
26 * original built by Yongqiang Yang. At that time it is called delay
27 * extent tree, whose goal is only track delayed extents in memory to
28 * simplify the implementation of fiemap and bigalloc, and introduce
29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
30 * delay extent tree at the first commit. But for better understand
31 * what it does, it has been rename to extent status tree.
34 * Currently the first step has been done. All delayed extents are
35 * tracked in the tree. It maintains the delayed extent when a delayed
36 * allocation is issued, and the delayed extent is written out or
37 * invalidated. Therefore the implementation of fiemap and bigalloc
38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
40 * The following comment describes the implemenmtation of extent
41 * status tree and future works.
44 * In this step all extent status are tracked by extent status tree.
45 * Thus, we can first try to lookup a block mapping in this tree before
46 * finding it in extent tree. Hence, single extent cache can be removed
47 * because extent status tree can do a better job. Extents in status
48 * tree are loaded on-demand. Therefore, the extent status tree may not
49 * contain all of the extents in a file. Meanwhile we define a shrinker
50 * to reclaim memory from extent status tree because fragmented extent
51 * tree will make status tree cost too much memory. written/unwritten/-
52 * hole extents in the tree will be reclaimed by this shrinker when we
53 * are under high memory pressure. Delayed extents will not be
54 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
58 * Extent status tree implementation for ext4.
61 * ==========================================================================
62 * Extent status tree tracks all extent status.
64 * 1. Why we need to implement extent status tree?
66 * Without extent status tree, ext4 identifies a delayed extent by looking
67 * up page cache, this has several deficiencies - complicated, buggy,
68 * and inefficient code.
70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
71 * block or a range of blocks are belonged to a delayed extent.
73 * Let us have a look at how they do without extent status tree.
75 * FIEMAP looks up page cache to identify delayed allocations from holes.
78 * SEEK_HOLE/DATA has the same problem as FIEMAP.
81 * bigalloc looks up page cache to figure out if a block is
82 * already under delayed allocation or not to determine whether
83 * quota reserving is needed for the cluster.
86 * Writeout looks up whole page cache to see if a buffer is
87 * mapped, If there are not very many delayed buffers, then it is
90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
91 * bigalloc and writeout can figure out if a block or a range of
92 * blocks is under delayed allocation(belonged to a delayed extent) or
93 * not by searching the extent tree.
96 * ==========================================================================
97 * 2. Ext4 extent status tree impelmentation
100 * A extent is a range of blocks which are contiguous logically and
101 * physically. Unlike extent in extent tree, this extent in ext4 is
102 * a in-memory struct, there is no corresponding on-disk data. There
103 * is no limit on length of extent, so an extent can contain as many
104 * blocks as they are contiguous logically and physically.
106 * -- extent status tree
107 * Every inode has an extent status tree and all allocation blocks
108 * are added to the tree with different status. The extent in the
109 * tree are ordered by logical block no.
111 * -- operations on a extent status tree
112 * There are three important operations on a delayed extent tree: find
113 * next extent, adding a extent(a range of blocks) and removing a extent.
115 * -- race on a extent status tree
116 * Extent status tree is protected by inode->i_es_lock.
118 * -- memory consumption
119 * Fragmented extent tree will make extent status tree cost too much
120 * memory. Hence, we will reclaim written/unwritten/hole extents from
121 * the tree under a heavy memory pressure.
124 * ==========================================================================
125 * 3. Performance analysis
128 * 1. There is a cache extent for write access, so if writes are
129 * not very random, adding space operaions are in O(1) time.
132 * 2. Code is much simpler, more readable, more maintainable and
136 * ==========================================================================
139 * -- Refactor delayed space reservation
141 * -- Extent-level locking
144 static struct kmem_cache *ext4_es_cachep;
145 static struct kmem_cache *ext4_pending_cachep;
147 static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
148 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
149 ext4_lblk_t end, int *reserved);
150 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
151 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
152 struct ext4_inode_info *locked_ei);
153 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
156 int __init ext4_init_es(void)
158 ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT);
159 if (ext4_es_cachep == NULL)
164 void ext4_exit_es(void)
166 kmem_cache_destroy(ext4_es_cachep);
169 void ext4_es_init_tree(struct ext4_es_tree *tree)
171 tree->root = RB_ROOT;
172 tree->cache_es = NULL;
176 static void ext4_es_print_tree(struct inode *inode)
178 struct ext4_es_tree *tree;
179 struct rb_node *node;
181 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
182 tree = &EXT4_I(inode)->i_es_tree;
183 node = rb_first(&tree->root);
185 struct extent_status *es;
186 es = rb_entry(node, struct extent_status, rb_node);
187 printk(KERN_DEBUG " [%u/%u) %llu %x",
188 es->es_lblk, es->es_len,
189 ext4_es_pblock(es), ext4_es_status(es));
190 node = rb_next(node);
192 printk(KERN_DEBUG "\n");
195 #define ext4_es_print_tree(inode)
198 static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
200 BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
201 return es->es_lblk + es->es_len - 1;
205 * search through the tree for an delayed extent with a given offset. If
206 * it can't be found, try to find next extent.
208 static struct extent_status *__es_tree_search(struct rb_root *root,
211 struct rb_node *node = root->rb_node;
212 struct extent_status *es = NULL;
215 es = rb_entry(node, struct extent_status, rb_node);
216 if (lblk < es->es_lblk)
217 node = node->rb_left;
218 else if (lblk > ext4_es_end(es))
219 node = node->rb_right;
224 if (es && lblk < es->es_lblk)
227 if (es && lblk > ext4_es_end(es)) {
228 node = rb_next(&es->rb_node);
229 return node ? rb_entry(node, struct extent_status, rb_node) :
237 * ext4_es_find_extent_range - find extent with specified status within block
238 * range or next extent following block range in
239 * extents status tree
241 * @inode - file containing the range
242 * @matching_fn - pointer to function that matches extents with desired status
243 * @lblk - logical block defining start of range
244 * @end - logical block defining end of range
245 * @es - extent found, if any
247 * Find the first extent within the block range specified by @lblk and @end
248 * in the extents status tree that satisfies @matching_fn. If a match
249 * is found, it's returned in @es. If not, and a matching extent is found
250 * beyond the block range, it's returned in @es. If no match is found, an
251 * extent is returned in @es whose es_lblk, es_len, and es_pblk components
254 static void __es_find_extent_range(struct inode *inode,
255 int (*matching_fn)(struct extent_status *es),
256 ext4_lblk_t lblk, ext4_lblk_t end,
257 struct extent_status *es)
259 struct ext4_es_tree *tree = NULL;
260 struct extent_status *es1 = NULL;
261 struct rb_node *node;
266 tree = &EXT4_I(inode)->i_es_tree;
268 /* see if the extent has been cached */
269 es->es_lblk = es->es_len = es->es_pblk = 0;
270 es1 = READ_ONCE(tree->cache_es);
271 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
272 es_debug("%u cached by [%u/%u) %llu %x\n",
273 lblk, es1->es_lblk, es1->es_len,
274 ext4_es_pblock(es1), ext4_es_status(es1));
278 es1 = __es_tree_search(&tree->root, lblk);
281 if (es1 && !matching_fn(es1)) {
282 while ((node = rb_next(&es1->rb_node)) != NULL) {
283 es1 = rb_entry(node, struct extent_status, rb_node);
284 if (es1->es_lblk > end) {
288 if (matching_fn(es1))
293 if (es1 && matching_fn(es1)) {
294 WRITE_ONCE(tree->cache_es, es1);
295 es->es_lblk = es1->es_lblk;
296 es->es_len = es1->es_len;
297 es->es_pblk = es1->es_pblk;
303 * Locking for __es_find_extent_range() for external use
305 void ext4_es_find_extent_range(struct inode *inode,
306 int (*matching_fn)(struct extent_status *es),
307 ext4_lblk_t lblk, ext4_lblk_t end,
308 struct extent_status *es)
310 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
313 trace_ext4_es_find_extent_range_enter(inode, lblk);
315 read_lock(&EXT4_I(inode)->i_es_lock);
316 __es_find_extent_range(inode, matching_fn, lblk, end, es);
317 read_unlock(&EXT4_I(inode)->i_es_lock);
319 trace_ext4_es_find_extent_range_exit(inode, es);
323 * __es_scan_range - search block range for block with specified status
324 * in extents status tree
326 * @inode - file containing the range
327 * @matching_fn - pointer to function that matches extents with desired status
328 * @lblk - logical block defining start of range
329 * @end - logical block defining end of range
331 * Returns true if at least one block in the specified block range satisfies
332 * the criterion specified by @matching_fn, and false if not. If at least
333 * one extent has the specified status, then there is at least one block
334 * in the cluster with that status. Should only be called by code that has
337 static bool __es_scan_range(struct inode *inode,
338 int (*matching_fn)(struct extent_status *es),
339 ext4_lblk_t start, ext4_lblk_t end)
341 struct extent_status es;
343 __es_find_extent_range(inode, matching_fn, start, end, &es);
345 return false; /* no matching extent in the tree */
346 else if (es.es_lblk <= start &&
347 start < es.es_lblk + es.es_len)
349 else if (start <= es.es_lblk && es.es_lblk <= end)
355 * Locking for __es_scan_range() for external use
357 bool ext4_es_scan_range(struct inode *inode,
358 int (*matching_fn)(struct extent_status *es),
359 ext4_lblk_t lblk, ext4_lblk_t end)
363 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
366 read_lock(&EXT4_I(inode)->i_es_lock);
367 ret = __es_scan_range(inode, matching_fn, lblk, end);
368 read_unlock(&EXT4_I(inode)->i_es_lock);
374 * __es_scan_clu - search cluster for block with specified status in
375 * extents status tree
377 * @inode - file containing the cluster
378 * @matching_fn - pointer to function that matches extents with desired status
379 * @lblk - logical block in cluster to be searched
381 * Returns true if at least one extent in the cluster containing @lblk
382 * satisfies the criterion specified by @matching_fn, and false if not. If at
383 * least one extent has the specified status, then there is at least one block
384 * in the cluster with that status. Should only be called by code that has
387 static bool __es_scan_clu(struct inode *inode,
388 int (*matching_fn)(struct extent_status *es),
391 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
392 ext4_lblk_t lblk_start, lblk_end;
394 lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
395 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
397 return __es_scan_range(inode, matching_fn, lblk_start, lblk_end);
401 * Locking for __es_scan_clu() for external use
403 bool ext4_es_scan_clu(struct inode *inode,
404 int (*matching_fn)(struct extent_status *es),
409 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
412 read_lock(&EXT4_I(inode)->i_es_lock);
413 ret = __es_scan_clu(inode, matching_fn, lblk);
414 read_unlock(&EXT4_I(inode)->i_es_lock);
419 static void ext4_es_list_add(struct inode *inode)
421 struct ext4_inode_info *ei = EXT4_I(inode);
422 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
424 if (!list_empty(&ei->i_es_list))
427 spin_lock(&sbi->s_es_lock);
428 if (list_empty(&ei->i_es_list)) {
429 list_add_tail(&ei->i_es_list, &sbi->s_es_list);
430 sbi->s_es_nr_inode++;
432 spin_unlock(&sbi->s_es_lock);
435 static void ext4_es_list_del(struct inode *inode)
437 struct ext4_inode_info *ei = EXT4_I(inode);
438 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
440 spin_lock(&sbi->s_es_lock);
441 if (!list_empty(&ei->i_es_list)) {
442 list_del_init(&ei->i_es_list);
443 sbi->s_es_nr_inode--;
444 WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
446 spin_unlock(&sbi->s_es_lock);
449 static struct extent_status *
450 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
453 struct extent_status *es;
454 es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
462 * We don't count delayed extent because we never try to reclaim them
464 if (!ext4_es_is_delayed(es)) {
465 if (!EXT4_I(inode)->i_es_shk_nr++)
466 ext4_es_list_add(inode);
467 percpu_counter_inc(&EXT4_SB(inode->i_sb)->
468 s_es_stats.es_stats_shk_cnt);
471 EXT4_I(inode)->i_es_all_nr++;
472 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
477 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
479 EXT4_I(inode)->i_es_all_nr--;
480 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
482 /* Decrease the shrink counter when this es is not delayed */
483 if (!ext4_es_is_delayed(es)) {
484 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
485 if (!--EXT4_I(inode)->i_es_shk_nr)
486 ext4_es_list_del(inode);
487 percpu_counter_dec(&EXT4_SB(inode->i_sb)->
488 s_es_stats.es_stats_shk_cnt);
491 kmem_cache_free(ext4_es_cachep, es);
495 * Check whether or not two extents can be merged
497 * - logical block number is contiguous
498 * - physical block number is contiguous
501 static int ext4_es_can_be_merged(struct extent_status *es1,
502 struct extent_status *es2)
504 if (ext4_es_type(es1) != ext4_es_type(es2))
507 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
508 pr_warn("ES assertion failed when merging extents. "
509 "The sum of lengths of es1 (%d) and es2 (%d) "
510 "is bigger than allowed file size (%d)\n",
511 es1->es_len, es2->es_len, EXT_MAX_BLOCKS);
516 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
519 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
520 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
523 if (ext4_es_is_hole(es1))
526 /* we need to check delayed extent is without unwritten status */
527 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
533 static struct extent_status *
534 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
536 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
537 struct extent_status *es1;
538 struct rb_node *node;
540 node = rb_prev(&es->rb_node);
544 es1 = rb_entry(node, struct extent_status, rb_node);
545 if (ext4_es_can_be_merged(es1, es)) {
546 es1->es_len += es->es_len;
547 if (ext4_es_is_referenced(es))
548 ext4_es_set_referenced(es1);
549 rb_erase(&es->rb_node, &tree->root);
550 ext4_es_free_extent(inode, es);
557 static struct extent_status *
558 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
560 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
561 struct extent_status *es1;
562 struct rb_node *node;
564 node = rb_next(&es->rb_node);
568 es1 = rb_entry(node, struct extent_status, rb_node);
569 if (ext4_es_can_be_merged(es, es1)) {
570 es->es_len += es1->es_len;
571 if (ext4_es_is_referenced(es1))
572 ext4_es_set_referenced(es);
573 rb_erase(node, &tree->root);
574 ext4_es_free_extent(inode, es1);
580 #ifdef ES_AGGRESSIVE_TEST
581 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
583 static void ext4_es_insert_extent_ext_check(struct inode *inode,
584 struct extent_status *es)
586 struct ext4_ext_path *path = NULL;
587 struct ext4_extent *ex;
588 ext4_lblk_t ee_block;
589 ext4_fsblk_t ee_start;
590 unsigned short ee_len;
591 int depth, ee_status, es_status;
593 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
597 depth = ext_depth(inode);
598 ex = path[depth].p_ext;
602 ee_block = le32_to_cpu(ex->ee_block);
603 ee_start = ext4_ext_pblock(ex);
604 ee_len = ext4_ext_get_actual_len(ex);
606 ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
607 es_status = ext4_es_is_unwritten(es) ? 1 : 0;
610 * Make sure ex and es are not overlap when we try to insert
611 * a delayed/hole extent.
613 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
614 if (in_range(es->es_lblk, ee_block, ee_len)) {
615 pr_warn("ES insert assertion failed for "
616 "inode: %lu we can find an extent "
617 "at block [%d/%d/%llu/%c], but we "
618 "want to add a delayed/hole extent "
620 inode->i_ino, ee_block, ee_len,
621 ee_start, ee_status ? 'u' : 'w',
622 es->es_lblk, es->es_len,
623 ext4_es_pblock(es), ext4_es_status(es));
629 * We don't check ee_block == es->es_lblk, etc. because es
630 * might be a part of whole extent, vice versa.
632 if (es->es_lblk < ee_block ||
633 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
634 pr_warn("ES insert assertion failed for inode: %lu "
635 "ex_status [%d/%d/%llu/%c] != "
636 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
637 ee_block, ee_len, ee_start,
638 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
639 ext4_es_pblock(es), es_status ? 'u' : 'w');
643 if (ee_status ^ es_status) {
644 pr_warn("ES insert assertion failed for inode: %lu "
645 "ex_status [%d/%d/%llu/%c] != "
646 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
647 ee_block, ee_len, ee_start,
648 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
649 ext4_es_pblock(es), es_status ? 'u' : 'w');
653 * We can't find an extent on disk. So we need to make sure
654 * that we don't want to add an written/unwritten extent.
656 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
657 pr_warn("ES insert assertion failed for inode: %lu "
658 "can't find an extent at block %d but we want "
659 "to add a written/unwritten extent "
660 "[%d/%d/%llu/%x]\n", inode->i_ino,
661 es->es_lblk, es->es_lblk, es->es_len,
662 ext4_es_pblock(es), ext4_es_status(es));
666 ext4_free_ext_path(path);
669 static void ext4_es_insert_extent_ind_check(struct inode *inode,
670 struct extent_status *es)
672 struct ext4_map_blocks map;
676 * Here we call ext4_ind_map_blocks to lookup a block mapping because
677 * 'Indirect' structure is defined in indirect.c. So we couldn't
678 * access direct/indirect tree from outside. It is too dirty to define
679 * this function in indirect.c file.
682 map.m_lblk = es->es_lblk;
683 map.m_len = es->es_len;
685 retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
687 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
689 * We want to add a delayed/hole extent but this
690 * block has been allocated.
692 pr_warn("ES insert assertion failed for inode: %lu "
693 "We can find blocks but we want to add a "
694 "delayed/hole extent [%d/%d/%llu/%x]\n",
695 inode->i_ino, es->es_lblk, es->es_len,
696 ext4_es_pblock(es), ext4_es_status(es));
698 } else if (ext4_es_is_written(es)) {
699 if (retval != es->es_len) {
700 pr_warn("ES insert assertion failed for "
701 "inode: %lu retval %d != es_len %d\n",
702 inode->i_ino, retval, es->es_len);
705 if (map.m_pblk != ext4_es_pblock(es)) {
706 pr_warn("ES insert assertion failed for "
707 "inode: %lu m_pblk %llu != "
709 inode->i_ino, map.m_pblk,
715 * We don't need to check unwritten extent because
716 * indirect-based file doesn't have it.
720 } else if (retval == 0) {
721 if (ext4_es_is_written(es)) {
722 pr_warn("ES insert assertion failed for inode: %lu "
723 "We can't find the block but we want to add "
724 "a written extent [%d/%d/%llu/%x]\n",
725 inode->i_ino, es->es_lblk, es->es_len,
726 ext4_es_pblock(es), ext4_es_status(es));
732 static inline void ext4_es_insert_extent_check(struct inode *inode,
733 struct extent_status *es)
736 * We don't need to worry about the race condition because
737 * caller takes i_data_sem locking.
739 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
740 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
741 ext4_es_insert_extent_ext_check(inode, es);
743 ext4_es_insert_extent_ind_check(inode, es);
746 static inline void ext4_es_insert_extent_check(struct inode *inode,
747 struct extent_status *es)
752 static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
754 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
755 struct rb_node **p = &tree->root.rb_node;
756 struct rb_node *parent = NULL;
757 struct extent_status *es;
761 es = rb_entry(parent, struct extent_status, rb_node);
763 if (newes->es_lblk < es->es_lblk) {
764 if (ext4_es_can_be_merged(newes, es)) {
766 * Here we can modify es_lblk directly
767 * because it isn't overlapped.
769 es->es_lblk = newes->es_lblk;
770 es->es_len += newes->es_len;
771 if (ext4_es_is_written(es) ||
772 ext4_es_is_unwritten(es))
773 ext4_es_store_pblock(es,
775 es = ext4_es_try_to_merge_left(inode, es);
779 } else if (newes->es_lblk > ext4_es_end(es)) {
780 if (ext4_es_can_be_merged(es, newes)) {
781 es->es_len += newes->es_len;
782 es = ext4_es_try_to_merge_right(inode, es);
792 es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
796 rb_link_node(&es->rb_node, parent, p);
797 rb_insert_color(&es->rb_node, &tree->root);
805 * ext4_es_insert_extent() adds information to an inode's extent
808 * Return 0 on success, error code on failure.
810 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
811 ext4_lblk_t len, ext4_fsblk_t pblk,
814 struct extent_status newes;
815 ext4_lblk_t end = lblk + len - 1;
817 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
819 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
822 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
823 lblk, len, pblk, status, inode->i_ino);
830 if ((status & EXTENT_STATUS_DELAYED) &&
831 (status & EXTENT_STATUS_WRITTEN)) {
832 ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
833 " delayed and written which can potentially "
834 " cause data loss.", lblk, len);
838 newes.es_lblk = lblk;
840 ext4_es_store_pblock_status(&newes, pblk, status);
841 trace_ext4_es_insert_extent(inode, &newes);
843 ext4_es_insert_extent_check(inode, &newes);
845 write_lock(&EXT4_I(inode)->i_es_lock);
846 err = __es_remove_extent(inode, lblk, end, NULL);
850 err = __es_insert_extent(inode, &newes);
851 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
854 if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
857 if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
858 (status & EXTENT_STATUS_WRITTEN ||
859 status & EXTENT_STATUS_UNWRITTEN))
860 __revise_pending(inode, lblk, len);
863 write_unlock(&EXT4_I(inode)->i_es_lock);
865 ext4_es_print_tree(inode);
871 * ext4_es_cache_extent() inserts information into the extent status
872 * tree if and only if there isn't information about the range in
875 void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
876 ext4_lblk_t len, ext4_fsblk_t pblk,
879 struct extent_status *es;
880 struct extent_status newes;
881 ext4_lblk_t end = lblk + len - 1;
883 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
886 newes.es_lblk = lblk;
888 ext4_es_store_pblock_status(&newes, pblk, status);
889 trace_ext4_es_cache_extent(inode, &newes);
896 write_lock(&EXT4_I(inode)->i_es_lock);
898 es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
899 if (!es || es->es_lblk > end)
900 __es_insert_extent(inode, &newes);
901 write_unlock(&EXT4_I(inode)->i_es_lock);
905 * ext4_es_lookup_extent() looks up an extent in extent status tree.
907 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
909 * Return: 1 on found, 0 on not
911 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
912 ext4_lblk_t *next_lblk,
913 struct extent_status *es)
915 struct ext4_es_tree *tree;
916 struct ext4_es_stats *stats;
917 struct extent_status *es1 = NULL;
918 struct rb_node *node;
921 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
924 trace_ext4_es_lookup_extent_enter(inode, lblk);
925 es_debug("lookup extent in block %u\n", lblk);
927 tree = &EXT4_I(inode)->i_es_tree;
928 read_lock(&EXT4_I(inode)->i_es_lock);
930 /* find extent in cache firstly */
931 es->es_lblk = es->es_len = es->es_pblk = 0;
932 es1 = READ_ONCE(tree->cache_es);
933 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
934 es_debug("%u cached by [%u/%u)\n",
935 lblk, es1->es_lblk, es1->es_len);
940 node = tree->root.rb_node;
942 es1 = rb_entry(node, struct extent_status, rb_node);
943 if (lblk < es1->es_lblk)
944 node = node->rb_left;
945 else if (lblk > ext4_es_end(es1))
946 node = node->rb_right;
954 stats = &EXT4_SB(inode->i_sb)->s_es_stats;
957 es->es_lblk = es1->es_lblk;
958 es->es_len = es1->es_len;
959 es->es_pblk = es1->es_pblk;
960 if (!ext4_es_is_referenced(es1))
961 ext4_es_set_referenced(es1);
962 percpu_counter_inc(&stats->es_stats_cache_hits);
964 node = rb_next(&es1->rb_node);
966 es1 = rb_entry(node, struct extent_status,
968 *next_lblk = es1->es_lblk;
973 percpu_counter_inc(&stats->es_stats_cache_misses);
976 read_unlock(&EXT4_I(inode)->i_es_lock);
978 trace_ext4_es_lookup_extent_exit(inode, es, found);
984 bool first_do_lblk_found;
985 ext4_lblk_t first_do_lblk;
986 ext4_lblk_t last_do_lblk;
987 struct extent_status *left_es;
993 * init_rsvd - initialize reserved count data before removing block range
994 * in file from extent status tree
996 * @inode - file containing range
997 * @lblk - first block in range
998 * @es - pointer to first extent in range
999 * @rc - pointer to reserved count data
1001 * Assumes es is not NULL
1003 static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
1004 struct extent_status *es, struct rsvd_count *rc)
1006 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1007 struct rb_node *node;
1012 * for bigalloc, note the first delonly block in the range has not
1013 * been found, record the extent containing the block to the left of
1014 * the region to be removed, if any, and note that there's no partial
1017 if (sbi->s_cluster_ratio > 1) {
1018 rc->first_do_lblk_found = false;
1019 if (lblk > es->es_lblk) {
1022 node = rb_prev(&es->rb_node);
1023 rc->left_es = node ? rb_entry(node,
1024 struct extent_status,
1027 rc->partial = false;
1032 * count_rsvd - count the clusters containing delayed and not unwritten
1033 * (delonly) blocks in a range within an extent and add to
1034 * the running tally in rsvd_count
1036 * @inode - file containing extent
1037 * @lblk - first block in range
1038 * @len - length of range in blocks
1039 * @es - pointer to extent containing clusters to be counted
1040 * @rc - pointer to reserved count data
1042 * Tracks partial clusters found at the beginning and end of extents so
1043 * they aren't overcounted when they span adjacent extents
1045 static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
1046 struct extent_status *es, struct rsvd_count *rc)
1048 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1049 ext4_lblk_t i, end, nclu;
1051 if (!ext4_es_is_delonly(es))
1056 if (sbi->s_cluster_ratio == 1) {
1057 rc->ndelonly += (int) len;
1063 i = (lblk < es->es_lblk) ? es->es_lblk : lblk;
1064 end = lblk + (ext4_lblk_t) len - 1;
1065 end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
1067 /* record the first block of the first delonly extent seen */
1068 if (!rc->first_do_lblk_found) {
1069 rc->first_do_lblk = i;
1070 rc->first_do_lblk_found = true;
1073 /* update the last lblk in the region seen so far */
1074 rc->last_do_lblk = end;
1077 * if we're tracking a partial cluster and the current extent
1078 * doesn't start with it, count it and stop tracking
1080 if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) {
1082 rc->partial = false;
1086 * if the first cluster doesn't start on a cluster boundary but
1087 * ends on one, count it
1089 if (EXT4_LBLK_COFF(sbi, i) != 0) {
1090 if (end >= EXT4_LBLK_CFILL(sbi, i)) {
1092 rc->partial = false;
1093 i = EXT4_LBLK_CFILL(sbi, i) + 1;
1098 * if the current cluster starts on a cluster boundary, count the
1099 * number of whole delonly clusters in the extent
1101 if ((i + sbi->s_cluster_ratio - 1) <= end) {
1102 nclu = (end - i + 1) >> sbi->s_cluster_bits;
1103 rc->ndelonly += nclu;
1104 i += nclu << sbi->s_cluster_bits;
1108 * start tracking a partial cluster if there's a partial at the end
1109 * of the current extent and we're not already tracking one
1111 if (!rc->partial && i <= end) {
1113 rc->lclu = EXT4_B2C(sbi, i);
1118 * __pr_tree_search - search for a pending cluster reservation
1120 * @root - root of pending reservation tree
1121 * @lclu - logical cluster to search for
1123 * Returns the pending reservation for the cluster identified by @lclu
1124 * if found. If not, returns a reservation for the next cluster if any,
1125 * and if not, returns NULL.
1127 static struct pending_reservation *__pr_tree_search(struct rb_root *root,
1130 struct rb_node *node = root->rb_node;
1131 struct pending_reservation *pr = NULL;
1134 pr = rb_entry(node, struct pending_reservation, rb_node);
1135 if (lclu < pr->lclu)
1136 node = node->rb_left;
1137 else if (lclu > pr->lclu)
1138 node = node->rb_right;
1142 if (pr && lclu < pr->lclu)
1144 if (pr && lclu > pr->lclu) {
1145 node = rb_next(&pr->rb_node);
1146 return node ? rb_entry(node, struct pending_reservation,
1153 * get_rsvd - calculates and returns the number of cluster reservations to be
1154 * released when removing a block range from the extent status tree
1155 * and releases any pending reservations within the range
1157 * @inode - file containing block range
1158 * @end - last block in range
1159 * @right_es - pointer to extent containing next block beyond end or NULL
1160 * @rc - pointer to reserved count data
1162 * The number of reservations to be released is equal to the number of
1163 * clusters containing delayed and not unwritten (delonly) blocks within
1164 * the range, minus the number of clusters still containing delonly blocks
1165 * at the ends of the range, and minus the number of pending reservations
1168 static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
1169 struct extent_status *right_es,
1170 struct rsvd_count *rc)
1172 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1173 struct pending_reservation *pr;
1174 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
1175 struct rb_node *node;
1176 ext4_lblk_t first_lclu, last_lclu;
1177 bool left_delonly, right_delonly, count_pending;
1178 struct extent_status *es;
1180 if (sbi->s_cluster_ratio > 1) {
1181 /* count any remaining partial cluster */
1185 if (rc->ndelonly == 0)
1188 first_lclu = EXT4_B2C(sbi, rc->first_do_lblk);
1189 last_lclu = EXT4_B2C(sbi, rc->last_do_lblk);
1192 * decrease the delonly count by the number of clusters at the
1193 * ends of the range that still contain delonly blocks -
1194 * these clusters still need to be reserved
1196 left_delonly = right_delonly = false;
1199 while (es && ext4_es_end(es) >=
1200 EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) {
1201 if (ext4_es_is_delonly(es)) {
1203 left_delonly = true;
1206 node = rb_prev(&es->rb_node);
1209 es = rb_entry(node, struct extent_status, rb_node);
1211 if (right_es && (!left_delonly || first_lclu != last_lclu)) {
1212 if (end < ext4_es_end(right_es)) {
1215 node = rb_next(&right_es->rb_node);
1216 es = node ? rb_entry(node, struct extent_status,
1219 while (es && es->es_lblk <=
1220 EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) {
1221 if (ext4_es_is_delonly(es)) {
1223 right_delonly = true;
1226 node = rb_next(&es->rb_node);
1229 es = rb_entry(node, struct extent_status,
1235 * Determine the block range that should be searched for
1236 * pending reservations, if any. Clusters on the ends of the
1237 * original removed range containing delonly blocks are
1238 * excluded. They've already been accounted for and it's not
1239 * possible to determine if an associated pending reservation
1240 * should be released with the information available in the
1241 * extents status tree.
1243 if (first_lclu == last_lclu) {
1244 if (left_delonly | right_delonly)
1245 count_pending = false;
1247 count_pending = true;
1253 if (first_lclu <= last_lclu)
1254 count_pending = true;
1256 count_pending = false;
1260 * a pending reservation found between first_lclu and last_lclu
1261 * represents an allocated cluster that contained at least one
1262 * delonly block, so the delonly total must be reduced by one
1263 * for each pending reservation found and released
1265 if (count_pending) {
1266 pr = __pr_tree_search(&tree->root, first_lclu);
1267 while (pr && pr->lclu <= last_lclu) {
1269 node = rb_next(&pr->rb_node);
1270 rb_erase(&pr->rb_node, &tree->root);
1271 kmem_cache_free(ext4_pending_cachep, pr);
1274 pr = rb_entry(node, struct pending_reservation,
1279 return rc->ndelonly;
1284 * __es_remove_extent - removes block range from extent status tree
1286 * @inode - file containing range
1287 * @lblk - first block in range
1288 * @end - last block in range
1289 * @reserved - number of cluster reservations released
1291 * If @reserved is not NULL and delayed allocation is enabled, counts
1292 * block/cluster reservations freed by removing range and if bigalloc
1293 * enabled cancels pending reservations as needed. Returns 0 on success,
1294 * error code on failure.
1296 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
1297 ext4_lblk_t end, int *reserved)
1299 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
1300 struct rb_node *node;
1301 struct extent_status *es;
1302 struct extent_status orig_es;
1303 ext4_lblk_t len1, len2;
1306 bool count_reserved = true;
1307 struct rsvd_count rc;
1309 if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
1310 count_reserved = false;
1314 es = __es_tree_search(&tree->root, lblk);
1317 if (es->es_lblk > end)
1320 /* Simply invalidate cache_es. */
1321 tree->cache_es = NULL;
1323 init_rsvd(inode, lblk, es, &rc);
1325 orig_es.es_lblk = es->es_lblk;
1326 orig_es.es_len = es->es_len;
1327 orig_es.es_pblk = es->es_pblk;
1329 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
1330 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
1335 struct extent_status newes;
1337 newes.es_lblk = end + 1;
1338 newes.es_len = len2;
1339 block = 0x7FDEADBEEFULL;
1340 if (ext4_es_is_written(&orig_es) ||
1341 ext4_es_is_unwritten(&orig_es))
1342 block = ext4_es_pblock(&orig_es) +
1343 orig_es.es_len - len2;
1344 ext4_es_store_pblock_status(&newes, block,
1345 ext4_es_status(&orig_es));
1346 err = __es_insert_extent(inode, &newes);
1348 es->es_lblk = orig_es.es_lblk;
1349 es->es_len = orig_es.es_len;
1350 if ((err == -ENOMEM) &&
1351 __es_shrink(EXT4_SB(inode->i_sb),
1352 128, EXT4_I(inode)))
1357 es->es_lblk = end + 1;
1359 if (ext4_es_is_written(es) ||
1360 ext4_es_is_unwritten(es)) {
1361 block = orig_es.es_pblk + orig_es.es_len - len2;
1362 ext4_es_store_pblock(es, block);
1366 count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
1368 goto out_get_reserved;
1373 count_rsvd(inode, lblk, orig_es.es_len - len1,
1375 node = rb_next(&es->rb_node);
1377 es = rb_entry(node, struct extent_status, rb_node);
1382 while (es && ext4_es_end(es) <= end) {
1384 count_rsvd(inode, es->es_lblk, es->es_len, es, &rc);
1385 node = rb_next(&es->rb_node);
1386 rb_erase(&es->rb_node, &tree->root);
1387 ext4_es_free_extent(inode, es);
1392 es = rb_entry(node, struct extent_status, rb_node);
1395 if (es && es->es_lblk < end + 1) {
1396 ext4_lblk_t orig_len = es->es_len;
1398 len1 = ext4_es_end(es) - end;
1400 count_rsvd(inode, es->es_lblk, orig_len - len1,
1402 es->es_lblk = end + 1;
1404 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
1405 block = es->es_pblk + orig_len - len1;
1406 ext4_es_store_pblock(es, block);
1412 *reserved = get_rsvd(inode, end, es, &rc);
1418 * ext4_es_remove_extent - removes block range from extent status tree
1420 * @inode - file containing range
1421 * @lblk - first block in range
1422 * @len - number of blocks to remove
1424 * Reduces block/cluster reservation count and for bigalloc cancels pending
1425 * reservations as needed. Returns 0 on success, error code on failure.
1427 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
1434 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
1437 trace_ext4_es_remove_extent(inode, lblk, len);
1438 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
1439 lblk, len, inode->i_ino);
1444 end = lblk + len - 1;
1448 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
1449 * so that we are sure __es_shrink() is done with the inode before it
1452 write_lock(&EXT4_I(inode)->i_es_lock);
1453 err = __es_remove_extent(inode, lblk, end, &reserved);
1454 write_unlock(&EXT4_I(inode)->i_es_lock);
1455 ext4_es_print_tree(inode);
1456 ext4_da_release_space(inode, reserved);
1460 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
1461 struct ext4_inode_info *locked_ei)
1463 struct ext4_inode_info *ei;
1464 struct ext4_es_stats *es_stats;
1469 int retried = 0, nr_skipped = 0;
1471 es_stats = &sbi->s_es_stats;
1472 start_time = ktime_get();
1475 spin_lock(&sbi->s_es_lock);
1476 nr_to_walk = sbi->s_es_nr_inode;
1477 while (nr_to_walk-- > 0) {
1478 if (list_empty(&sbi->s_es_list)) {
1479 spin_unlock(&sbi->s_es_lock);
1482 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
1484 /* Move the inode to the tail */
1485 list_move_tail(&ei->i_es_list, &sbi->s_es_list);
1488 * Normally we try hard to avoid shrinking precached inodes,
1489 * but we will as a last resort.
1491 if (!retried && ext4_test_inode_state(&ei->vfs_inode,
1492 EXT4_STATE_EXT_PRECACHED)) {
1497 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
1502 * Now we hold i_es_lock which protects us from inode reclaim
1503 * freeing inode under us
1505 spin_unlock(&sbi->s_es_lock);
1507 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan);
1508 write_unlock(&ei->i_es_lock);
1510 if (nr_to_scan <= 0)
1512 spin_lock(&sbi->s_es_lock);
1514 spin_unlock(&sbi->s_es_lock);
1517 * If we skipped any inodes, and we weren't able to make any
1518 * forward progress, try again to scan precached inodes.
1520 if ((nr_shrunk == 0) && nr_skipped && !retried) {
1525 if (locked_ei && nr_shrunk == 0)
1526 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan);
1529 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1530 if (likely(es_stats->es_stats_scan_time))
1531 es_stats->es_stats_scan_time = (scan_time +
1532 es_stats->es_stats_scan_time*3) / 4;
1534 es_stats->es_stats_scan_time = scan_time;
1535 if (scan_time > es_stats->es_stats_max_scan_time)
1536 es_stats->es_stats_max_scan_time = scan_time;
1537 if (likely(es_stats->es_stats_shrunk))
1538 es_stats->es_stats_shrunk = (nr_shrunk +
1539 es_stats->es_stats_shrunk*3) / 4;
1541 es_stats->es_stats_shrunk = nr_shrunk;
1543 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
1544 nr_skipped, retried);
1548 static unsigned long ext4_es_count(struct shrinker *shrink,
1549 struct shrink_control *sc)
1552 struct ext4_sb_info *sbi;
1554 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
1555 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1556 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
1560 static unsigned long ext4_es_scan(struct shrinker *shrink,
1561 struct shrink_control *sc)
1563 struct ext4_sb_info *sbi = container_of(shrink,
1564 struct ext4_sb_info, s_es_shrinker);
1565 int nr_to_scan = sc->nr_to_scan;
1568 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1569 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
1571 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
1573 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1574 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
1578 int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v)
1580 struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private);
1581 struct ext4_es_stats *es_stats = &sbi->s_es_stats;
1582 struct ext4_inode_info *ei, *max = NULL;
1583 unsigned int inode_cnt = 0;
1585 if (v != SEQ_START_TOKEN)
1588 /* here we just find an inode that has the max nr. of objects */
1589 spin_lock(&sbi->s_es_lock);
1590 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
1592 if (max && max->i_es_all_nr < ei->i_es_all_nr)
1597 spin_unlock(&sbi->s_es_lock);
1599 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
1600 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
1601 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
1602 seq_printf(seq, " %lld/%lld cache hits/misses\n",
1603 percpu_counter_sum_positive(&es_stats->es_stats_cache_hits),
1604 percpu_counter_sum_positive(&es_stats->es_stats_cache_misses));
1606 seq_printf(seq, " %d inodes on list\n", inode_cnt);
1608 seq_printf(seq, "average:\n %llu us scan time\n",
1609 div_u64(es_stats->es_stats_scan_time, 1000));
1610 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk);
1613 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1614 " %llu us max scan time\n",
1615 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
1616 div_u64(es_stats->es_stats_max_scan_time, 1000));
1621 int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
1625 /* Make sure we have enough bits for physical block number */
1626 BUILD_BUG_ON(ES_SHIFT < 48);
1627 INIT_LIST_HEAD(&sbi->s_es_list);
1628 sbi->s_es_nr_inode = 0;
1629 spin_lock_init(&sbi->s_es_lock);
1630 sbi->s_es_stats.es_stats_shrunk = 0;
1631 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0,
1635 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0,
1639 sbi->s_es_stats.es_stats_scan_time = 0;
1640 sbi->s_es_stats.es_stats_max_scan_time = 0;
1641 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
1644 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
1648 sbi->s_es_shrinker.scan_objects = ext4_es_scan;
1649 sbi->s_es_shrinker.count_objects = ext4_es_count;
1650 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
1651 err = register_shrinker(&sbi->s_es_shrinker, "ext4-es:%s",
1658 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1660 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1662 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
1664 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
1668 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
1670 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
1671 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
1672 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1673 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1674 unregister_shrinker(&sbi->s_es_shrinker);
1678 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
1679 * most *nr_to_scan extents, update *nr_to_scan accordingly.
1681 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
1682 * Increment *nr_shrunk by the number of reclaimed extents. Also update
1683 * ei->i_es_shrink_lblk to where we should continue scanning.
1685 static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
1686 int *nr_to_scan, int *nr_shrunk)
1688 struct inode *inode = &ei->vfs_inode;
1689 struct ext4_es_tree *tree = &ei->i_es_tree;
1690 struct extent_status *es;
1691 struct rb_node *node;
1693 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
1697 while (*nr_to_scan > 0) {
1698 if (es->es_lblk > end) {
1699 ei->i_es_shrink_lblk = end + 1;
1704 node = rb_next(&es->rb_node);
1706 * We can't reclaim delayed extent from status tree because
1707 * fiemap, bigallic, and seek_data/hole need to use it.
1709 if (ext4_es_is_delayed(es))
1711 if (ext4_es_is_referenced(es)) {
1712 ext4_es_clear_referenced(es);
1716 rb_erase(&es->rb_node, &tree->root);
1717 ext4_es_free_extent(inode, es);
1722 es = rb_entry(node, struct extent_status, rb_node);
1724 ei->i_es_shrink_lblk = es->es_lblk;
1727 ei->i_es_shrink_lblk = 0;
1731 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
1733 struct inode *inode = &ei->vfs_inode;
1735 ext4_lblk_t start = ei->i_es_shrink_lblk;
1736 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1737 DEFAULT_RATELIMIT_BURST);
1739 if (ei->i_es_shk_nr == 0)
1742 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
1744 ext4_warning(inode->i_sb, "forced shrink of precached extents");
1746 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) &&
1748 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk);
1750 ei->i_es_tree.cache_es = NULL;
1755 * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove
1756 * discretionary entries from the extent status cache. (Some entries
1757 * must be present for proper operations.)
1759 void ext4_clear_inode_es(struct inode *inode)
1761 struct ext4_inode_info *ei = EXT4_I(inode);
1762 struct extent_status *es;
1763 struct ext4_es_tree *tree;
1764 struct rb_node *node;
1766 write_lock(&ei->i_es_lock);
1767 tree = &EXT4_I(inode)->i_es_tree;
1768 tree->cache_es = NULL;
1769 node = rb_first(&tree->root);
1771 es = rb_entry(node, struct extent_status, rb_node);
1772 node = rb_next(node);
1773 if (!ext4_es_is_delayed(es)) {
1774 rb_erase(&es->rb_node, &tree->root);
1775 ext4_es_free_extent(inode, es);
1778 ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
1779 write_unlock(&ei->i_es_lock);
1783 static void ext4_print_pending_tree(struct inode *inode)
1785 struct ext4_pending_tree *tree;
1786 struct rb_node *node;
1787 struct pending_reservation *pr;
1789 printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino);
1790 tree = &EXT4_I(inode)->i_pending_tree;
1791 node = rb_first(&tree->root);
1793 pr = rb_entry(node, struct pending_reservation, rb_node);
1794 printk(KERN_DEBUG " %u", pr->lclu);
1795 node = rb_next(node);
1797 printk(KERN_DEBUG "\n");
1800 #define ext4_print_pending_tree(inode)
1803 int __init ext4_init_pending(void)
1805 ext4_pending_cachep = KMEM_CACHE(pending_reservation, SLAB_RECLAIM_ACCOUNT);
1806 if (ext4_pending_cachep == NULL)
1811 void ext4_exit_pending(void)
1813 kmem_cache_destroy(ext4_pending_cachep);
1816 void ext4_init_pending_tree(struct ext4_pending_tree *tree)
1818 tree->root = RB_ROOT;
1822 * __get_pending - retrieve a pointer to a pending reservation
1824 * @inode - file containing the pending cluster reservation
1825 * @lclu - logical cluster of interest
1827 * Returns a pointer to a pending reservation if it's a member of
1828 * the set, and NULL if not. Must be called holding i_es_lock.
1830 static struct pending_reservation *__get_pending(struct inode *inode,
1833 struct ext4_pending_tree *tree;
1834 struct rb_node *node;
1835 struct pending_reservation *pr = NULL;
1837 tree = &EXT4_I(inode)->i_pending_tree;
1838 node = (&tree->root)->rb_node;
1841 pr = rb_entry(node, struct pending_reservation, rb_node);
1842 if (lclu < pr->lclu)
1843 node = node->rb_left;
1844 else if (lclu > pr->lclu)
1845 node = node->rb_right;
1846 else if (lclu == pr->lclu)
1853 * __insert_pending - adds a pending cluster reservation to the set of
1854 * pending reservations
1856 * @inode - file containing the cluster
1857 * @lblk - logical block in the cluster to be added
1859 * Returns 0 on successful insertion and -ENOMEM on failure. If the
1860 * pending reservation is already in the set, returns successfully.
1862 static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
1864 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1865 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
1866 struct rb_node **p = &tree->root.rb_node;
1867 struct rb_node *parent = NULL;
1868 struct pending_reservation *pr;
1872 lclu = EXT4_B2C(sbi, lblk);
1873 /* search to find parent for insertion */
1876 pr = rb_entry(parent, struct pending_reservation, rb_node);
1878 if (lclu < pr->lclu) {
1880 } else if (lclu > pr->lclu) {
1881 p = &(*p)->rb_right;
1883 /* pending reservation already inserted */
1888 pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
1895 rb_link_node(&pr->rb_node, parent, p);
1896 rb_insert_color(&pr->rb_node, &tree->root);
1903 * __remove_pending - removes a pending cluster reservation from the set
1904 * of pending reservations
1906 * @inode - file containing the cluster
1907 * @lblk - logical block in the pending cluster reservation to be removed
1909 * Returns successfully if pending reservation is not a member of the set.
1911 static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
1913 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1914 struct pending_reservation *pr;
1915 struct ext4_pending_tree *tree;
1917 pr = __get_pending(inode, EXT4_B2C(sbi, lblk));
1919 tree = &EXT4_I(inode)->i_pending_tree;
1920 rb_erase(&pr->rb_node, &tree->root);
1921 kmem_cache_free(ext4_pending_cachep, pr);
1926 * ext4_remove_pending - removes a pending cluster reservation from the set
1927 * of pending reservations
1929 * @inode - file containing the cluster
1930 * @lblk - logical block in the pending cluster reservation to be removed
1932 * Locking for external use of __remove_pending.
1934 void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk)
1936 struct ext4_inode_info *ei = EXT4_I(inode);
1938 write_lock(&ei->i_es_lock);
1939 __remove_pending(inode, lblk);
1940 write_unlock(&ei->i_es_lock);
1944 * ext4_is_pending - determine whether a cluster has a pending reservation
1947 * @inode - file containing the cluster
1948 * @lblk - logical block in the cluster
1950 * Returns true if there's a pending reservation for the cluster in the
1951 * set of pending reservations, and false if not.
1953 bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk)
1955 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1956 struct ext4_inode_info *ei = EXT4_I(inode);
1959 read_lock(&ei->i_es_lock);
1960 ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL);
1961 read_unlock(&ei->i_es_lock);
1967 * ext4_es_insert_delayed_block - adds a delayed block to the extents status
1968 * tree, adding a pending reservation where
1971 * @inode - file containing the newly added block
1972 * @lblk - logical block to be added
1973 * @allocated - indicates whether a physical cluster has been allocated for
1974 * the logical cluster that contains the block
1976 * Returns 0 on success, negative error code on failure.
1978 int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
1981 struct extent_status newes;
1984 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
1987 es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
1988 lblk, inode->i_ino);
1990 newes.es_lblk = lblk;
1992 ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
1993 trace_ext4_es_insert_delayed_block(inode, &newes, allocated);
1995 ext4_es_insert_extent_check(inode, &newes);
1997 write_lock(&EXT4_I(inode)->i_es_lock);
1999 err = __es_remove_extent(inode, lblk, lblk, NULL);
2003 err = __es_insert_extent(inode, &newes);
2004 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
2005 128, EXT4_I(inode)))
2011 __insert_pending(inode, lblk);
2014 write_unlock(&EXT4_I(inode)->i_es_lock);
2016 ext4_es_print_tree(inode);
2017 ext4_print_pending_tree(inode);
2023 * __es_delayed_clu - count number of clusters containing blocks that
2026 * @inode - file containing block range
2027 * @start - logical block defining start of range
2028 * @end - logical block defining end of range
2030 * Returns the number of clusters containing only delayed (not delayed
2031 * and unwritten) blocks in the range specified by @start and @end. Any
2032 * cluster or part of a cluster within the range and containing a delayed
2033 * and not unwritten block within the range is counted as a whole cluster.
2035 static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
2038 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
2039 struct extent_status *es;
2040 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2041 struct rb_node *node;
2042 ext4_lblk_t first_lclu, last_lclu;
2043 unsigned long long last_counted_lclu;
2046 /* guaranteed to be unequal to any ext4_lblk_t value */
2047 last_counted_lclu = ~0ULL;
2049 es = __es_tree_search(&tree->root, start);
2051 while (es && (es->es_lblk <= end)) {
2052 if (ext4_es_is_delonly(es)) {
2053 if (es->es_lblk <= start)
2054 first_lclu = EXT4_B2C(sbi, start);
2056 first_lclu = EXT4_B2C(sbi, es->es_lblk);
2058 if (ext4_es_end(es) >= end)
2059 last_lclu = EXT4_B2C(sbi, end);
2061 last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
2063 if (first_lclu == last_counted_lclu)
2064 n += last_lclu - first_lclu;
2066 n += last_lclu - first_lclu + 1;
2067 last_counted_lclu = last_lclu;
2069 node = rb_next(&es->rb_node);
2072 es = rb_entry(node, struct extent_status, rb_node);
2079 * ext4_es_delayed_clu - count number of clusters containing blocks that
2080 * are both delayed and unwritten
2082 * @inode - file containing block range
2083 * @lblk - logical block defining start of range
2084 * @len - number of blocks in range
2086 * Locking for external use of __es_delayed_clu().
2088 unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
2091 struct ext4_inode_info *ei = EXT4_I(inode);
2098 end = lblk + len - 1;
2099 WARN_ON(end < lblk);
2101 read_lock(&ei->i_es_lock);
2103 n = __es_delayed_clu(inode, lblk, end);
2105 read_unlock(&ei->i_es_lock);
2111 * __revise_pending - makes, cancels, or leaves unchanged pending cluster
2112 * reservations for a specified block range depending
2113 * upon the presence or absence of delayed blocks
2114 * outside the range within clusters at the ends of the
2117 * @inode - file containing the range
2118 * @lblk - logical block defining the start of range
2119 * @len - length of range in blocks
2121 * Used after a newly allocated extent is added to the extents status tree.
2122 * Requires that the extents in the range have either written or unwritten
2123 * status. Must be called while holding i_es_lock.
2125 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
2128 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2129 ext4_lblk_t end = lblk + len - 1;
2130 ext4_lblk_t first, last;
2131 bool f_del = false, l_del = false;
2137 * Two cases - block range within single cluster and block range
2138 * spanning two or more clusters. Note that a cluster belonging
2139 * to a range starting and/or ending on a cluster boundary is treated
2140 * as if it does not contain a delayed extent. The new range may
2141 * have allocated space for previously delayed blocks out to the
2142 * cluster boundary, requiring that any pre-existing pending
2143 * reservation be canceled. Because this code only looks at blocks
2144 * outside the range, it should revise pending reservations
2145 * correctly even if the extent represented by the range can't be
2146 * inserted in the extents status tree due to ENOSPC.
2149 if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
2150 first = EXT4_LBLK_CMASK(sbi, lblk);
2152 f_del = __es_scan_range(inode, &ext4_es_is_delonly,
2155 __insert_pending(inode, first);
2157 last = EXT4_LBLK_CMASK(sbi, end) +
2158 sbi->s_cluster_ratio - 1;
2160 l_del = __es_scan_range(inode,
2161 &ext4_es_is_delonly,
2164 __insert_pending(inode, last);
2166 __remove_pending(inode, last);
2169 first = EXT4_LBLK_CMASK(sbi, lblk);
2171 f_del = __es_scan_range(inode, &ext4_es_is_delonly,
2174 __insert_pending(inode, first);
2176 __remove_pending(inode, first);
2178 last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
2180 l_del = __es_scan_range(inode, &ext4_es_is_delonly,
2183 __insert_pending(inode, last);
2185 __remove_pending(inode, last);