1 // SPDX-License-Identifier: GPL-2.0
4 #include "tree-mod-log.h"
14 struct tree_mod_elem {
18 enum btrfs_mod_log_op op;
21 * This is used for BTRFS_MOD_LOG_KEY_* and BTRFS_MOD_LOG_MOVE_KEYS
26 /* This is used for BTRFS_MOD_LOG_KEY* and BTRFS_MOD_LOG_ROOT_REPLACE. */
29 /* Those are used for op == BTRFS_MOD_LOG_KEY_{REPLACE,REMOVE}. */
30 struct btrfs_disk_key key;
33 /* This is used for op == BTRFS_MOD_LOG_MOVE_KEYS. */
39 /* This is used for op == BTRFS_MOD_LOG_ROOT_REPLACE. */
40 struct tree_mod_root old_root;
44 * Pull a new tree mod seq number for our operation.
46 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
48 return atomic64_inc_return(&fs_info->tree_mod_seq);
52 * This adds a new blocker to the tree mod log's blocker list if the @elem
53 * passed does not already have a sequence number set. So when a caller expects
54 * to record tree modifications, it should ensure to set elem->seq to zero
55 * before calling btrfs_get_tree_mod_seq.
56 * Returns a fresh, unused tree log modification sequence number, even if no new
59 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
60 struct btrfs_seq_list *elem)
62 write_lock(&fs_info->tree_mod_log_lock);
64 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
65 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
66 set_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags);
68 write_unlock(&fs_info->tree_mod_log_lock);
73 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
74 struct btrfs_seq_list *elem)
76 struct rb_root *tm_root;
79 struct tree_mod_elem *tm;
80 u64 min_seq = BTRFS_SEQ_LAST;
81 u64 seq_putting = elem->seq;
86 write_lock(&fs_info->tree_mod_log_lock);
87 list_del(&elem->list);
90 if (list_empty(&fs_info->tree_mod_seq_list)) {
91 clear_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags);
93 struct btrfs_seq_list *first;
95 first = list_first_entry(&fs_info->tree_mod_seq_list,
96 struct btrfs_seq_list, list);
97 if (seq_putting > first->seq) {
99 * Blocker with lower sequence number exists, we cannot
100 * remove anything from the log.
102 write_unlock(&fs_info->tree_mod_log_lock);
105 min_seq = first->seq;
109 * Anything that's lower than the lowest existing (read: blocked)
110 * sequence number can be removed from the tree.
112 tm_root = &fs_info->tree_mod_log;
113 for (node = rb_first(tm_root); node; node = next) {
114 next = rb_next(node);
115 tm = rb_entry(node, struct tree_mod_elem, node);
116 if (tm->seq >= min_seq)
118 rb_erase(node, tm_root);
121 write_unlock(&fs_info->tree_mod_log_lock);
125 * Key order of the log:
126 * node/leaf start address -> sequence
128 * The 'start address' is the logical address of the *new* root node for root
129 * replace operations, or the logical address of the affected block for all
132 static noinline int tree_mod_log_insert(struct btrfs_fs_info *fs_info,
133 struct tree_mod_elem *tm)
135 struct rb_root *tm_root;
136 struct rb_node **new;
137 struct rb_node *parent = NULL;
138 struct tree_mod_elem *cur;
140 lockdep_assert_held_write(&fs_info->tree_mod_log_lock);
142 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
144 tm_root = &fs_info->tree_mod_log;
145 new = &tm_root->rb_node;
147 cur = rb_entry(*new, struct tree_mod_elem, node);
149 if (cur->logical < tm->logical)
150 new = &((*new)->rb_left);
151 else if (cur->logical > tm->logical)
152 new = &((*new)->rb_right);
153 else if (cur->seq < tm->seq)
154 new = &((*new)->rb_left);
155 else if (cur->seq > tm->seq)
156 new = &((*new)->rb_right);
161 rb_link_node(&tm->node, parent, new);
162 rb_insert_color(&tm->node, tm_root);
167 * Determines if logging can be omitted. Returns true if it can. Otherwise, it
168 * returns false with the tree_mod_log_lock acquired. The caller must hold
169 * this until all tree mod log insertions are recorded in the rb tree and then
170 * write unlock fs_info::tree_mod_log_lock.
172 static inline bool tree_mod_dont_log(struct btrfs_fs_info *fs_info,
173 struct extent_buffer *eb)
175 if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
177 if (eb && btrfs_header_level(eb) == 0)
180 write_lock(&fs_info->tree_mod_log_lock);
181 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
182 write_unlock(&fs_info->tree_mod_log_lock);
189 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
190 static inline bool tree_mod_need_log(const struct btrfs_fs_info *fs_info,
191 struct extent_buffer *eb)
193 if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
195 if (eb && btrfs_header_level(eb) == 0)
201 static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb,
203 enum btrfs_mod_log_op op)
205 struct tree_mod_elem *tm;
207 tm = kzalloc(sizeof(*tm), GFP_NOFS);
211 tm->logical = eb->start;
212 if (op != BTRFS_MOD_LOG_KEY_ADD) {
213 btrfs_node_key(eb, &tm->key, slot);
214 tm->blockptr = btrfs_node_blockptr(eb, slot);
218 tm->generation = btrfs_node_ptr_generation(eb, slot);
219 RB_CLEAR_NODE(&tm->node);
224 int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
225 enum btrfs_mod_log_op op)
227 struct tree_mod_elem *tm;
230 if (!tree_mod_need_log(eb->fs_info, eb))
233 tm = alloc_tree_mod_elem(eb, slot, op);
237 if (tree_mod_dont_log(eb->fs_info, eb)) {
242 ret = tree_mod_log_insert(eb->fs_info, tm);
243 write_unlock(&eb->fs_info->tree_mod_log_lock);
250 int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb,
251 int dst_slot, int src_slot,
254 struct tree_mod_elem *tm = NULL;
255 struct tree_mod_elem **tm_list = NULL;
260 if (!tree_mod_need_log(eb->fs_info, eb))
263 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
267 tm = kzalloc(sizeof(*tm), GFP_NOFS);
273 tm->logical = eb->start;
275 tm->move.dst_slot = dst_slot;
276 tm->move.nr_items = nr_items;
277 tm->op = BTRFS_MOD_LOG_MOVE_KEYS;
279 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
280 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
281 BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING);
288 if (tree_mod_dont_log(eb->fs_info, eb))
293 * When we override something during the move, we log these removals.
294 * This can only happen when we move towards the beginning of the
295 * buffer, i.e. dst_slot < src_slot.
297 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
298 ret = tree_mod_log_insert(eb->fs_info, tm_list[i]);
303 ret = tree_mod_log_insert(eb->fs_info, tm);
306 write_unlock(&eb->fs_info->tree_mod_log_lock);
312 for (i = 0; i < nr_items; i++) {
313 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
314 rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
318 write_unlock(&eb->fs_info->tree_mod_log_lock);
325 static inline int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
326 struct tree_mod_elem **tm_list,
332 for (i = nritems - 1; i >= 0; i--) {
333 ret = tree_mod_log_insert(fs_info, tm_list[i]);
335 for (j = nritems - 1; j > i; j--)
336 rb_erase(&tm_list[j]->node,
337 &fs_info->tree_mod_log);
345 int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root,
346 struct extent_buffer *new_root,
349 struct btrfs_fs_info *fs_info = old_root->fs_info;
350 struct tree_mod_elem *tm = NULL;
351 struct tree_mod_elem **tm_list = NULL;
356 if (!tree_mod_need_log(fs_info, NULL))
359 if (log_removal && btrfs_header_level(old_root) > 0) {
360 nritems = btrfs_header_nritems(old_root);
361 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
367 for (i = 0; i < nritems; i++) {
368 tm_list[i] = alloc_tree_mod_elem(old_root, i,
369 BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING);
377 tm = kzalloc(sizeof(*tm), GFP_NOFS);
383 tm->logical = new_root->start;
384 tm->old_root.logical = old_root->start;
385 tm->old_root.level = btrfs_header_level(old_root);
386 tm->generation = btrfs_header_generation(old_root);
387 tm->op = BTRFS_MOD_LOG_ROOT_REPLACE;
389 if (tree_mod_dont_log(fs_info, NULL))
393 ret = tree_mod_log_free_eb(fs_info, tm_list, nritems);
395 ret = tree_mod_log_insert(fs_info, tm);
397 write_unlock(&fs_info->tree_mod_log_lock);
406 for (i = 0; i < nritems; i++)
415 static struct tree_mod_elem *__tree_mod_log_search(struct btrfs_fs_info *fs_info,
416 u64 start, u64 min_seq,
419 struct rb_root *tm_root;
420 struct rb_node *node;
421 struct tree_mod_elem *cur = NULL;
422 struct tree_mod_elem *found = NULL;
424 read_lock(&fs_info->tree_mod_log_lock);
425 tm_root = &fs_info->tree_mod_log;
426 node = tm_root->rb_node;
428 cur = rb_entry(node, struct tree_mod_elem, node);
429 if (cur->logical < start) {
430 node = node->rb_left;
431 } else if (cur->logical > start) {
432 node = node->rb_right;
433 } else if (cur->seq < min_seq) {
434 node = node->rb_left;
435 } else if (!smallest) {
436 /* We want the node with the highest seq */
438 BUG_ON(found->seq > cur->seq);
440 node = node->rb_left;
441 } else if (cur->seq > min_seq) {
442 /* We want the node with the smallest seq */
444 BUG_ON(found->seq < cur->seq);
446 node = node->rb_right;
452 read_unlock(&fs_info->tree_mod_log_lock);
458 * This returns the element from the log with the smallest time sequence
459 * value that's in the log (the oldest log item). Any element with a time
460 * sequence lower than min_seq will be ignored.
462 static struct tree_mod_elem *tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info,
463 u64 start, u64 min_seq)
465 return __tree_mod_log_search(fs_info, start, min_seq, true);
469 * This returns the element from the log with the largest time sequence
470 * value that's in the log (the most recent log item). Any element with
471 * a time sequence lower than min_seq will be ignored.
473 static struct tree_mod_elem *tree_mod_log_search(struct btrfs_fs_info *fs_info,
474 u64 start, u64 min_seq)
476 return __tree_mod_log_search(fs_info, start, min_seq, false);
479 int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
480 struct extent_buffer *src,
481 unsigned long dst_offset,
482 unsigned long src_offset,
485 struct btrfs_fs_info *fs_info = dst->fs_info;
487 struct tree_mod_elem **tm_list = NULL;
488 struct tree_mod_elem **tm_list_add, **tm_list_rem;
492 if (!tree_mod_need_log(fs_info, NULL))
495 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
498 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
503 tm_list_add = tm_list;
504 tm_list_rem = tm_list + nr_items;
505 for (i = 0; i < nr_items; i++) {
506 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
507 BTRFS_MOD_LOG_KEY_REMOVE);
508 if (!tm_list_rem[i]) {
513 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
514 BTRFS_MOD_LOG_KEY_ADD);
515 if (!tm_list_add[i]) {
521 if (tree_mod_dont_log(fs_info, NULL))
525 for (i = 0; i < nr_items; i++) {
526 ret = tree_mod_log_insert(fs_info, tm_list_rem[i]);
529 ret = tree_mod_log_insert(fs_info, tm_list_add[i]);
534 write_unlock(&fs_info->tree_mod_log_lock);
540 for (i = 0; i < nr_items * 2; i++) {
541 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
542 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
546 write_unlock(&fs_info->tree_mod_log_lock);
552 int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb)
554 struct tree_mod_elem **tm_list = NULL;
559 if (!tree_mod_need_log(eb->fs_info, eb))
562 nritems = btrfs_header_nritems(eb);
563 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
567 for (i = 0; i < nritems; i++) {
568 tm_list[i] = alloc_tree_mod_elem(eb, i,
569 BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING);
576 if (tree_mod_dont_log(eb->fs_info, eb))
579 ret = tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
580 write_unlock(&eb->fs_info->tree_mod_log_lock);
588 for (i = 0; i < nritems; i++)
596 * Returns the logical address of the oldest predecessor of the given root.
597 * Entries older than time_seq are ignored.
599 static struct tree_mod_elem *tree_mod_log_oldest_root(struct extent_buffer *eb_root,
602 struct tree_mod_elem *tm;
603 struct tree_mod_elem *found = NULL;
604 u64 root_logical = eb_root->start;
611 * The very last operation that's logged for a root is the replacement
612 * operation (if it is replaced at all). This has the logical address
613 * of the *new* root, making it the very first operation that's logged
617 tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
622 * If there are no tree operation for the oldest root, we simply
623 * return it. This should only happen if that (old) root is at
630 * If there's an operation that's not a root replacement, we
631 * found the oldest version of our root. Normally, we'll find a
632 * BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
634 if (tm->op != BTRFS_MOD_LOG_ROOT_REPLACE)
638 root_logical = tm->old_root.logical;
642 /* If there's no old root to return, return what we found instead */
651 * tm is a pointer to the first operation to rewind within eb. Then, all
652 * previous operations will be rewound (until we reach something older than
655 static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
656 struct extent_buffer *eb,
658 struct tree_mod_elem *first_tm)
661 struct rb_node *next;
662 struct tree_mod_elem *tm = first_tm;
665 unsigned long p_size = sizeof(struct btrfs_key_ptr);
667 n = btrfs_header_nritems(eb);
668 read_lock(&fs_info->tree_mod_log_lock);
669 while (tm && tm->seq >= time_seq) {
671 * All the operations are recorded with the operator used for
672 * the modification. As we're going backwards, we do the
673 * opposite of each operation here.
676 case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING:
677 BUG_ON(tm->slot < n);
679 case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING:
680 case BTRFS_MOD_LOG_KEY_REMOVE:
681 btrfs_set_node_key(eb, &tm->key, tm->slot);
682 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
683 btrfs_set_node_ptr_generation(eb, tm->slot,
687 case BTRFS_MOD_LOG_KEY_REPLACE:
688 BUG_ON(tm->slot >= n);
689 btrfs_set_node_key(eb, &tm->key, tm->slot);
690 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
691 btrfs_set_node_ptr_generation(eb, tm->slot,
694 case BTRFS_MOD_LOG_KEY_ADD:
695 /* if a move operation is needed it's in the log */
698 case BTRFS_MOD_LOG_MOVE_KEYS:
699 o_dst = btrfs_node_key_ptr_offset(tm->slot);
700 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
701 memmove_extent_buffer(eb, o_dst, o_src,
702 tm->move.nr_items * p_size);
704 case BTRFS_MOD_LOG_ROOT_REPLACE:
706 * This operation is special. For roots, this must be
707 * handled explicitly before rewinding.
708 * For non-roots, this operation may exist if the node
709 * was a root: root A -> child B; then A gets empty and
710 * B is promoted to the new root. In the mod log, we'll
711 * have a root-replace operation for B, a tree block
712 * that is no root. We simply ignore that operation.
716 next = rb_next(&tm->node);
719 tm = rb_entry(next, struct tree_mod_elem, node);
720 if (tm->logical != first_tm->logical)
723 read_unlock(&fs_info->tree_mod_log_lock);
724 btrfs_set_header_nritems(eb, n);
728 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
729 * is returned. If rewind operations happen, a fresh buffer is returned. The
730 * returned buffer is always read-locked. If the returned buffer is not the
731 * input buffer, the lock on the input buffer is released and the input buffer
732 * is freed (its refcount is decremented).
734 struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
735 struct btrfs_path *path,
736 struct extent_buffer *eb,
739 struct extent_buffer *eb_rewin;
740 struct tree_mod_elem *tm;
745 if (btrfs_header_level(eb) == 0)
748 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
752 if (tm->op == BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
753 BUG_ON(tm->slot != 0);
754 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
756 btrfs_tree_read_unlock(eb);
757 free_extent_buffer(eb);
760 btrfs_set_header_bytenr(eb_rewin, eb->start);
761 btrfs_set_header_backref_rev(eb_rewin,
762 btrfs_header_backref_rev(eb));
763 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
764 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
766 eb_rewin = btrfs_clone_extent_buffer(eb);
768 btrfs_tree_read_unlock(eb);
769 free_extent_buffer(eb);
774 btrfs_tree_read_unlock(eb);
775 free_extent_buffer(eb);
777 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
778 eb_rewin, btrfs_header_level(eb_rewin));
779 btrfs_tree_read_lock(eb_rewin);
780 tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
781 WARN_ON(btrfs_header_nritems(eb_rewin) >
782 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
788 * Rewind the state of @root's root node to the given @time_seq value.
789 * If there are no changes, the current root->root_node is returned. If anything
790 * changed in between, there's a fresh buffer allocated on which the rewind
791 * operations are done. In any case, the returned buffer is read locked.
792 * Returns NULL on error (with no locks held).
794 struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq)
796 struct btrfs_fs_info *fs_info = root->fs_info;
797 struct tree_mod_elem *tm;
798 struct extent_buffer *eb = NULL;
799 struct extent_buffer *eb_root;
800 u64 eb_root_owner = 0;
801 struct extent_buffer *old;
802 struct tree_mod_root *old_root = NULL;
803 u64 old_generation = 0;
807 eb_root = btrfs_read_lock_root_node(root);
808 tm = tree_mod_log_oldest_root(eb_root, time_seq);
812 if (tm->op == BTRFS_MOD_LOG_ROOT_REPLACE) {
813 old_root = &tm->old_root;
814 old_generation = tm->generation;
815 logical = old_root->logical;
816 level = old_root->level;
818 logical = eb_root->start;
819 level = btrfs_header_level(eb_root);
822 tm = tree_mod_log_search(fs_info, logical, time_seq);
823 if (old_root && tm && tm->op != BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
824 btrfs_tree_read_unlock(eb_root);
825 free_extent_buffer(eb_root);
826 old = read_tree_block(fs_info, logical, root->root_key.objectid,
828 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
830 free_extent_buffer(old);
832 "failed to read tree block %llu from get_old_root",
835 struct tree_mod_elem *tm2;
837 btrfs_tree_read_lock(old);
838 eb = btrfs_clone_extent_buffer(old);
840 * After the lookup for the most recent tree mod operation
841 * above and before we locked and cloned the extent buffer
842 * 'old', a new tree mod log operation may have been added.
843 * So lookup for a more recent one to make sure the number
844 * of mod log operations we replay is consistent with the
845 * number of items we have in the cloned extent buffer,
846 * otherwise we can hit a BUG_ON when rewinding the extent
849 tm2 = tree_mod_log_search(fs_info, logical, time_seq);
850 btrfs_tree_read_unlock(old);
851 free_extent_buffer(old);
853 ASSERT(tm2 == tm || tm2->seq > tm->seq);
854 if (!tm2 || tm2->seq < tm->seq) {
855 free_extent_buffer(eb);
860 } else if (old_root) {
861 eb_root_owner = btrfs_header_owner(eb_root);
862 btrfs_tree_read_unlock(eb_root);
863 free_extent_buffer(eb_root);
864 eb = alloc_dummy_extent_buffer(fs_info, logical);
866 eb = btrfs_clone_extent_buffer(eb_root);
867 btrfs_tree_read_unlock(eb_root);
868 free_extent_buffer(eb_root);
874 btrfs_set_header_bytenr(eb, eb->start);
875 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
876 btrfs_set_header_owner(eb, eb_root_owner);
877 btrfs_set_header_level(eb, old_root->level);
878 btrfs_set_header_generation(eb, old_generation);
880 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
881 btrfs_header_level(eb));
882 btrfs_tree_read_lock(eb);
884 tree_mod_log_rewind(fs_info, eb, time_seq, tm);
886 WARN_ON(btrfs_header_level(eb) != 0);
887 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
892 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
894 struct tree_mod_elem *tm;
896 struct extent_buffer *eb_root = btrfs_root_node(root);
898 tm = tree_mod_log_oldest_root(eb_root, time_seq);
899 if (tm && tm->op == BTRFS_MOD_LOG_ROOT_REPLACE)
900 level = tm->old_root.level;
902 level = btrfs_header_level(eb_root);
904 free_extent_buffer(eb_root);
910 * Return the lowest sequence number in the tree modification log.
912 * Return the sequence number of the oldest tree modification log user, which
913 * corresponds to the lowest sequence number of all existing users. If there are
914 * no users it returns 0.
916 u64 btrfs_tree_mod_log_lowest_seq(struct btrfs_fs_info *fs_info)
920 read_lock(&fs_info->tree_mod_log_lock);
921 if (!list_empty(&fs_info->tree_mod_seq_list)) {
922 struct btrfs_seq_list *elem;
924 elem = list_first_entry(&fs_info->tree_mod_seq_list,
925 struct btrfs_seq_list, list);
928 read_unlock(&fs_info->tree_mod_log_lock);