1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
11 #include "delayed-ref.h"
12 #include "transaction.h"
14 #include "space-info.h"
15 #include "tree-mod-log.h"
18 struct kmem_cache *btrfs_delayed_ref_head_cachep;
19 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
20 struct kmem_cache *btrfs_delayed_data_ref_cachep;
21 struct kmem_cache *btrfs_delayed_extent_op_cachep;
23 * delayed back reference update tracking. For subvolume trees
24 * we queue up extent allocations and backref maintenance for
25 * delayed processing. This avoids deep call chains where we
26 * add extents in the middle of btrfs_search_slot, and it allows
27 * us to buffer up frequently modified backrefs in an rb tree instead
28 * of hammering updates on the extent allocation tree.
31 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
33 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
38 spin_lock(&global_rsv->lock);
39 reserved = global_rsv->reserved;
40 spin_unlock(&global_rsv->lock);
43 * Since the global reserve is just kind of magic we don't really want
44 * to rely on it to save our bacon, so if our size is more than the
45 * delayed_refs_rsv and the global rsv then it's time to think about
48 spin_lock(&delayed_refs_rsv->lock);
49 reserved += delayed_refs_rsv->reserved;
50 if (delayed_refs_rsv->size >= reserved)
52 spin_unlock(&delayed_refs_rsv->lock);
56 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
59 atomic_read(&trans->transaction->delayed_refs.num_entries);
64 avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
65 val = num_entries * avg_runtime;
66 if (val >= NSEC_PER_SEC)
68 if (val >= NSEC_PER_SEC / 2)
71 return btrfs_check_space_for_delayed_refs(trans->fs_info);
75 * Release a ref head's reservation.
77 * @fs_info: the filesystem
78 * @nr: number of items to drop
80 * Drops the delayed ref head's count from the delayed refs rsv and free any
81 * excess reservation we had.
83 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
85 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
86 u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
90 * We have to check the mount option here because we could be enabling
91 * the free space tree for the first time and don't have the compat_ro
94 * We need extra reservations if we have the free space tree because
95 * we'll have to modify that tree as well.
97 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
100 released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
102 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
107 * Adjust the size of the delayed refs rsv.
109 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
110 * it'll calculate the additional size and add it to the delayed_refs_rsv.
112 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
114 struct btrfs_fs_info *fs_info = trans->fs_info;
115 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
118 if (!trans->delayed_ref_updates)
121 num_bytes = btrfs_calc_insert_metadata_size(fs_info,
122 trans->delayed_ref_updates);
124 * We have to check the mount option here because we could be enabling
125 * the free space tree for the first time and don't have the compat_ro
128 * We need extra reservations if we have the free space tree because
129 * we'll have to modify that tree as well.
131 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
134 spin_lock(&delayed_rsv->lock);
135 delayed_rsv->size += num_bytes;
136 delayed_rsv->full = false;
137 spin_unlock(&delayed_rsv->lock);
138 trans->delayed_ref_updates = 0;
142 * Transfer bytes to our delayed refs rsv.
144 * @fs_info: the filesystem
145 * @src: source block rsv to transfer from
146 * @num_bytes: number of bytes to transfer
148 * This transfers up to the num_bytes amount from the src rsv to the
149 * delayed_refs_rsv. Any extra bytes are returned to the space info.
151 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
152 struct btrfs_block_rsv *src,
155 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
158 spin_lock(&src->lock);
159 src->reserved -= num_bytes;
160 src->size -= num_bytes;
161 spin_unlock(&src->lock);
163 spin_lock(&delayed_refs_rsv->lock);
164 if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
165 u64 delta = delayed_refs_rsv->size -
166 delayed_refs_rsv->reserved;
167 if (num_bytes > delta) {
168 to_free = num_bytes - delta;
177 delayed_refs_rsv->reserved += num_bytes;
178 if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
179 delayed_refs_rsv->full = true;
180 spin_unlock(&delayed_refs_rsv->lock);
183 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
186 btrfs_space_info_free_bytes_may_use(fs_info,
187 delayed_refs_rsv->space_info, to_free);
191 * Refill based on our delayed refs usage.
193 * @fs_info: the filesystem
194 * @flush: control how we can flush for this reservation.
196 * This will refill the delayed block_rsv up to 1 items size worth of space and
197 * will return -ENOSPC if we can't make the reservation.
199 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
200 enum btrfs_reserve_flush_enum flush)
202 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
203 u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
207 spin_lock(&block_rsv->lock);
208 if (block_rsv->reserved < block_rsv->size) {
209 num_bytes = block_rsv->size - block_rsv->reserved;
210 num_bytes = min(num_bytes, limit);
212 spin_unlock(&block_rsv->lock);
217 ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
220 btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
221 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
227 * compare two delayed tree backrefs with same bytenr and type
229 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
230 struct btrfs_delayed_tree_ref *ref2)
232 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
233 if (ref1->root < ref2->root)
235 if (ref1->root > ref2->root)
238 if (ref1->parent < ref2->parent)
240 if (ref1->parent > ref2->parent)
247 * compare two delayed data backrefs with same bytenr and type
249 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
250 struct btrfs_delayed_data_ref *ref2)
252 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
253 if (ref1->root < ref2->root)
255 if (ref1->root > ref2->root)
257 if (ref1->objectid < ref2->objectid)
259 if (ref1->objectid > ref2->objectid)
261 if (ref1->offset < ref2->offset)
263 if (ref1->offset > ref2->offset)
266 if (ref1->parent < ref2->parent)
268 if (ref1->parent > ref2->parent)
274 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
275 struct btrfs_delayed_ref_node *ref2,
280 if (ref1->type < ref2->type)
282 if (ref1->type > ref2->type)
284 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
285 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
286 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
287 btrfs_delayed_node_to_tree_ref(ref2));
289 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
290 btrfs_delayed_node_to_data_ref(ref2));
294 if (ref1->seq < ref2->seq)
296 if (ref1->seq > ref2->seq)
302 /* insert a new ref to head ref rbtree */
303 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
304 struct rb_node *node)
306 struct rb_node **p = &root->rb_root.rb_node;
307 struct rb_node *parent_node = NULL;
308 struct btrfs_delayed_ref_head *entry;
309 struct btrfs_delayed_ref_head *ins;
311 bool leftmost = true;
313 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
314 bytenr = ins->bytenr;
317 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
320 if (bytenr < entry->bytenr) {
322 } else if (bytenr > entry->bytenr) {
330 rb_link_node(node, parent_node, p);
331 rb_insert_color_cached(node, root, leftmost);
335 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
336 struct btrfs_delayed_ref_node *ins)
338 struct rb_node **p = &root->rb_root.rb_node;
339 struct rb_node *node = &ins->ref_node;
340 struct rb_node *parent_node = NULL;
341 struct btrfs_delayed_ref_node *entry;
342 bool leftmost = true;
348 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
350 comp = comp_refs(ins, entry, true);
353 } else if (comp > 0) {
361 rb_link_node(node, parent_node, p);
362 rb_insert_color_cached(node, root, leftmost);
366 static struct btrfs_delayed_ref_head *find_first_ref_head(
367 struct btrfs_delayed_ref_root *dr)
370 struct btrfs_delayed_ref_head *entry;
372 n = rb_first_cached(&dr->href_root);
376 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
382 * Find a head entry based on bytenr. This returns the delayed ref head if it
383 * was able to find one, or NULL if nothing was in that spot. If return_bigger
384 * is given, the next bigger entry is returned if no exact match is found.
386 static struct btrfs_delayed_ref_head *find_ref_head(
387 struct btrfs_delayed_ref_root *dr, u64 bytenr,
390 struct rb_root *root = &dr->href_root.rb_root;
392 struct btrfs_delayed_ref_head *entry;
397 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
399 if (bytenr < entry->bytenr)
401 else if (bytenr > entry->bytenr)
406 if (entry && return_bigger) {
407 if (bytenr > entry->bytenr) {
408 n = rb_next(&entry->href_node);
411 entry = rb_entry(n, struct btrfs_delayed_ref_head,
419 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
420 struct btrfs_delayed_ref_head *head)
422 lockdep_assert_held(&delayed_refs->lock);
423 if (mutex_trylock(&head->mutex))
426 refcount_inc(&head->refs);
427 spin_unlock(&delayed_refs->lock);
429 mutex_lock(&head->mutex);
430 spin_lock(&delayed_refs->lock);
431 if (RB_EMPTY_NODE(&head->href_node)) {
432 mutex_unlock(&head->mutex);
433 btrfs_put_delayed_ref_head(head);
436 btrfs_put_delayed_ref_head(head);
440 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
441 struct btrfs_delayed_ref_root *delayed_refs,
442 struct btrfs_delayed_ref_head *head,
443 struct btrfs_delayed_ref_node *ref)
445 lockdep_assert_held(&head->lock);
446 rb_erase_cached(&ref->ref_node, &head->ref_tree);
447 RB_CLEAR_NODE(&ref->ref_node);
448 if (!list_empty(&ref->add_list))
449 list_del(&ref->add_list);
451 btrfs_put_delayed_ref(ref);
452 atomic_dec(&delayed_refs->num_entries);
455 static bool merge_ref(struct btrfs_trans_handle *trans,
456 struct btrfs_delayed_ref_root *delayed_refs,
457 struct btrfs_delayed_ref_head *head,
458 struct btrfs_delayed_ref_node *ref,
461 struct btrfs_delayed_ref_node *next;
462 struct rb_node *node = rb_next(&ref->ref_node);
465 while (!done && node) {
468 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
469 node = rb_next(node);
470 if (seq && next->seq >= seq)
472 if (comp_refs(ref, next, false))
475 if (ref->action == next->action) {
478 if (ref->ref_mod < next->ref_mod) {
482 mod = -next->ref_mod;
485 drop_delayed_ref(trans, delayed_refs, head, next);
487 if (ref->ref_mod == 0) {
488 drop_delayed_ref(trans, delayed_refs, head, ref);
492 * Can't have multiples of the same ref on a tree block.
494 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
495 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
502 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
503 struct btrfs_delayed_ref_root *delayed_refs,
504 struct btrfs_delayed_ref_head *head)
506 struct btrfs_fs_info *fs_info = trans->fs_info;
507 struct btrfs_delayed_ref_node *ref;
508 struct rb_node *node;
511 lockdep_assert_held(&head->lock);
513 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
516 /* We don't have too many refs to merge for data. */
520 seq = btrfs_tree_mod_log_lowest_seq(fs_info);
522 for (node = rb_first_cached(&head->ref_tree); node;
523 node = rb_next(node)) {
524 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
525 if (seq && ref->seq >= seq)
527 if (merge_ref(trans, delayed_refs, head, ref, seq))
532 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
535 u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
537 if (min_seq != 0 && seq >= min_seq) {
539 "holding back delayed_ref %llu, lowest is %llu",
547 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
548 struct btrfs_delayed_ref_root *delayed_refs)
550 struct btrfs_delayed_ref_head *head;
553 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
555 if (!head && delayed_refs->run_delayed_start != 0) {
556 delayed_refs->run_delayed_start = 0;
557 head = find_first_ref_head(delayed_refs);
562 while (head->processing) {
563 struct rb_node *node;
565 node = rb_next(&head->href_node);
567 if (delayed_refs->run_delayed_start == 0)
569 delayed_refs->run_delayed_start = 0;
572 head = rb_entry(node, struct btrfs_delayed_ref_head,
576 head->processing = 1;
577 WARN_ON(delayed_refs->num_heads_ready == 0);
578 delayed_refs->num_heads_ready--;
579 delayed_refs->run_delayed_start = head->bytenr +
584 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
585 struct btrfs_delayed_ref_head *head)
587 lockdep_assert_held(&delayed_refs->lock);
588 lockdep_assert_held(&head->lock);
590 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
591 RB_CLEAR_NODE(&head->href_node);
592 atomic_dec(&delayed_refs->num_entries);
593 delayed_refs->num_heads--;
594 if (head->processing == 0)
595 delayed_refs->num_heads_ready--;
599 * Helper to insert the ref_node to the tail or merge with tail.
601 * Return 0 for insert.
602 * Return >0 for merge.
604 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
605 struct btrfs_delayed_ref_root *root,
606 struct btrfs_delayed_ref_head *href,
607 struct btrfs_delayed_ref_node *ref)
609 struct btrfs_delayed_ref_node *exist;
613 spin_lock(&href->lock);
614 exist = tree_insert(&href->ref_tree, ref);
618 /* Now we are sure we can merge */
620 if (exist->action == ref->action) {
623 /* Need to change action */
624 if (exist->ref_mod < ref->ref_mod) {
625 exist->action = ref->action;
626 mod = -exist->ref_mod;
627 exist->ref_mod = ref->ref_mod;
628 if (ref->action == BTRFS_ADD_DELAYED_REF)
629 list_add_tail(&exist->add_list,
630 &href->ref_add_list);
631 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
632 ASSERT(!list_empty(&exist->add_list));
633 list_del(&exist->add_list);
640 exist->ref_mod += mod;
642 /* remove existing tail if its ref_mod is zero */
643 if (exist->ref_mod == 0)
644 drop_delayed_ref(trans, root, href, exist);
645 spin_unlock(&href->lock);
648 if (ref->action == BTRFS_ADD_DELAYED_REF)
649 list_add_tail(&ref->add_list, &href->ref_add_list);
650 atomic_inc(&root->num_entries);
651 spin_unlock(&href->lock);
656 * helper function to update the accounting in the head ref
657 * existing and update must have the same bytenr
659 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
660 struct btrfs_delayed_ref_head *existing,
661 struct btrfs_delayed_ref_head *update)
663 struct btrfs_delayed_ref_root *delayed_refs =
664 &trans->transaction->delayed_refs;
665 struct btrfs_fs_info *fs_info = trans->fs_info;
668 BUG_ON(existing->is_data != update->is_data);
670 spin_lock(&existing->lock);
671 if (update->must_insert_reserved) {
672 /* if the extent was freed and then
673 * reallocated before the delayed ref
674 * entries were processed, we can end up
675 * with an existing head ref without
676 * the must_insert_reserved flag set.
679 existing->must_insert_reserved = update->must_insert_reserved;
682 * update the num_bytes so we make sure the accounting
685 existing->num_bytes = update->num_bytes;
689 if (update->extent_op) {
690 if (!existing->extent_op) {
691 existing->extent_op = update->extent_op;
693 if (update->extent_op->update_key) {
694 memcpy(&existing->extent_op->key,
695 &update->extent_op->key,
696 sizeof(update->extent_op->key));
697 existing->extent_op->update_key = true;
699 if (update->extent_op->update_flags) {
700 existing->extent_op->flags_to_set |=
701 update->extent_op->flags_to_set;
702 existing->extent_op->update_flags = true;
704 btrfs_free_delayed_extent_op(update->extent_op);
708 * update the reference mod on the head to reflect this new operation,
709 * only need the lock for this case cause we could be processing it
710 * currently, for refs we just added we know we're a-ok.
712 old_ref_mod = existing->total_ref_mod;
713 existing->ref_mod += update->ref_mod;
714 existing->total_ref_mod += update->ref_mod;
717 * If we are going to from a positive ref mod to a negative or vice
718 * versa we need to make sure to adjust pending_csums accordingly.
720 if (existing->is_data) {
722 btrfs_csum_bytes_to_leaves(fs_info,
723 existing->num_bytes);
725 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
726 delayed_refs->pending_csums -= existing->num_bytes;
727 btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
729 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
730 delayed_refs->pending_csums += existing->num_bytes;
731 trans->delayed_ref_updates += csum_leaves;
735 spin_unlock(&existing->lock);
738 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
739 struct btrfs_qgroup_extent_record *qrecord,
740 u64 bytenr, u64 num_bytes, u64 ref_root,
741 u64 reserved, int action, bool is_data,
745 int must_insert_reserved = 0;
747 /* If reserved is provided, it must be a data extent. */
748 BUG_ON(!is_data && reserved);
751 * The head node stores the sum of all the mods, so dropping a ref
752 * should drop the sum in the head node by one.
754 if (action == BTRFS_UPDATE_DELAYED_HEAD)
756 else if (action == BTRFS_DROP_DELAYED_REF)
760 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
761 * accounting when the extent is finally added, or if a later
762 * modification deletes the delayed ref without ever inserting the
763 * extent into the extent allocation tree. ref->must_insert_reserved
764 * is the flag used to record that accounting mods are required.
766 * Once we record must_insert_reserved, switch the action to
767 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
769 if (action == BTRFS_ADD_DELAYED_EXTENT)
770 must_insert_reserved = 1;
772 must_insert_reserved = 0;
774 refcount_set(&head_ref->refs, 1);
775 head_ref->bytenr = bytenr;
776 head_ref->num_bytes = num_bytes;
777 head_ref->ref_mod = count_mod;
778 head_ref->must_insert_reserved = must_insert_reserved;
779 head_ref->is_data = is_data;
780 head_ref->is_system = is_system;
781 head_ref->ref_tree = RB_ROOT_CACHED;
782 INIT_LIST_HEAD(&head_ref->ref_add_list);
783 RB_CLEAR_NODE(&head_ref->href_node);
784 head_ref->processing = 0;
785 head_ref->total_ref_mod = count_mod;
786 spin_lock_init(&head_ref->lock);
787 mutex_init(&head_ref->mutex);
790 if (ref_root && reserved) {
791 qrecord->data_rsv = reserved;
792 qrecord->data_rsv_refroot = ref_root;
794 qrecord->bytenr = bytenr;
795 qrecord->num_bytes = num_bytes;
796 qrecord->old_roots = NULL;
801 * helper function to actually insert a head node into the rbtree.
802 * this does all the dirty work in terms of maintaining the correct
803 * overall modification count.
805 static noinline struct btrfs_delayed_ref_head *
806 add_delayed_ref_head(struct btrfs_trans_handle *trans,
807 struct btrfs_delayed_ref_head *head_ref,
808 struct btrfs_qgroup_extent_record *qrecord,
809 int action, int *qrecord_inserted_ret)
811 struct btrfs_delayed_ref_head *existing;
812 struct btrfs_delayed_ref_root *delayed_refs;
813 int qrecord_inserted = 0;
815 delayed_refs = &trans->transaction->delayed_refs;
817 /* Record qgroup extent info if provided */
819 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
820 delayed_refs, qrecord))
823 qrecord_inserted = 1;
826 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
828 existing = htree_insert(&delayed_refs->href_root,
829 &head_ref->href_node);
831 update_existing_head_ref(trans, existing, head_ref);
833 * we've updated the existing ref, free the newly
836 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
839 if (head_ref->is_data && head_ref->ref_mod < 0) {
840 delayed_refs->pending_csums += head_ref->num_bytes;
841 trans->delayed_ref_updates +=
842 btrfs_csum_bytes_to_leaves(trans->fs_info,
843 head_ref->num_bytes);
845 delayed_refs->num_heads++;
846 delayed_refs->num_heads_ready++;
847 atomic_inc(&delayed_refs->num_entries);
848 trans->delayed_ref_updates++;
850 if (qrecord_inserted_ret)
851 *qrecord_inserted_ret = qrecord_inserted;
857 * init_delayed_ref_common - Initialize the structure which represents a
858 * modification to a an extent.
860 * @fs_info: Internal to the mounted filesystem mount structure.
862 * @ref: The structure which is going to be initialized.
864 * @bytenr: The logical address of the extent for which a modification is
865 * going to be recorded.
867 * @num_bytes: Size of the extent whose modification is being recorded.
869 * @ref_root: The id of the root where this modification has originated, this
870 * can be either one of the well-known metadata trees or the
871 * subvolume id which references this extent.
873 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
874 * BTRFS_ADD_DELAYED_EXTENT
876 * @ref_type: Holds the type of the extent which is being recorded, can be
877 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
878 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
879 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
881 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
882 struct btrfs_delayed_ref_node *ref,
883 u64 bytenr, u64 num_bytes, u64 ref_root,
884 int action, u8 ref_type)
888 if (action == BTRFS_ADD_DELAYED_EXTENT)
889 action = BTRFS_ADD_DELAYED_REF;
891 if (is_fstree(ref_root))
892 seq = atomic64_read(&fs_info->tree_mod_seq);
894 refcount_set(&ref->refs, 1);
895 ref->bytenr = bytenr;
896 ref->num_bytes = num_bytes;
898 ref->action = action;
902 ref->type = ref_type;
903 RB_CLEAR_NODE(&ref->ref_node);
904 INIT_LIST_HEAD(&ref->add_list);
908 * add a delayed tree ref. This does all of the accounting required
909 * to make sure the delayed ref is eventually processed before this
910 * transaction commits.
912 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
913 struct btrfs_ref *generic_ref,
914 struct btrfs_delayed_extent_op *extent_op)
916 struct btrfs_fs_info *fs_info = trans->fs_info;
917 struct btrfs_delayed_tree_ref *ref;
918 struct btrfs_delayed_ref_head *head_ref;
919 struct btrfs_delayed_ref_root *delayed_refs;
920 struct btrfs_qgroup_extent_record *record = NULL;
921 int qrecord_inserted;
923 int action = generic_ref->action;
924 int level = generic_ref->tree_ref.level;
926 u64 bytenr = generic_ref->bytenr;
927 u64 num_bytes = generic_ref->len;
928 u64 parent = generic_ref->parent;
931 is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
933 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
934 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
938 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
940 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
944 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
945 !generic_ref->skip_qgroup) {
946 record = kzalloc(sizeof(*record), GFP_NOFS);
948 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
949 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
955 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
957 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
959 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
960 generic_ref->tree_ref.owning_root, action,
962 ref->root = generic_ref->tree_ref.owning_root;
963 ref->parent = parent;
966 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
967 generic_ref->tree_ref.owning_root, 0, action,
969 head_ref->extent_op = extent_op;
971 delayed_refs = &trans->transaction->delayed_refs;
972 spin_lock(&delayed_refs->lock);
975 * insert both the head node and the new ref without dropping
978 head_ref = add_delayed_ref_head(trans, head_ref, record,
979 action, &qrecord_inserted);
981 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
982 spin_unlock(&delayed_refs->lock);
985 * Need to update the delayed_refs_rsv with any changes we may have
988 btrfs_update_delayed_refs_rsv(trans);
990 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
991 action == BTRFS_ADD_DELAYED_EXTENT ?
992 BTRFS_ADD_DELAYED_REF : action);
994 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
996 if (qrecord_inserted)
997 btrfs_qgroup_trace_extent_post(trans, record);
1003 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1005 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1006 struct btrfs_ref *generic_ref,
1009 struct btrfs_fs_info *fs_info = trans->fs_info;
1010 struct btrfs_delayed_data_ref *ref;
1011 struct btrfs_delayed_ref_head *head_ref;
1012 struct btrfs_delayed_ref_root *delayed_refs;
1013 struct btrfs_qgroup_extent_record *record = NULL;
1014 int qrecord_inserted;
1015 int action = generic_ref->action;
1017 u64 bytenr = generic_ref->bytenr;
1018 u64 num_bytes = generic_ref->len;
1019 u64 parent = generic_ref->parent;
1020 u64 ref_root = generic_ref->data_ref.owning_root;
1021 u64 owner = generic_ref->data_ref.ino;
1022 u64 offset = generic_ref->data_ref.offset;
1025 ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1026 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1031 ref_type = BTRFS_SHARED_DATA_REF_KEY;
1033 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1034 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1035 ref_root, action, ref_type);
1036 ref->root = ref_root;
1037 ref->parent = parent;
1038 ref->objectid = owner;
1039 ref->offset = offset;
1042 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1044 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1048 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1049 !generic_ref->skip_qgroup) {
1050 record = kzalloc(sizeof(*record), GFP_NOFS);
1052 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1053 kmem_cache_free(btrfs_delayed_ref_head_cachep,
1059 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1060 reserved, action, true, false);
1061 head_ref->extent_op = NULL;
1063 delayed_refs = &trans->transaction->delayed_refs;
1064 spin_lock(&delayed_refs->lock);
1067 * insert both the head node and the new ref without dropping
1070 head_ref = add_delayed_ref_head(trans, head_ref, record,
1071 action, &qrecord_inserted);
1073 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
1074 spin_unlock(&delayed_refs->lock);
1077 * Need to update the delayed_refs_rsv with any changes we may have
1080 btrfs_update_delayed_refs_rsv(trans);
1082 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1083 action == BTRFS_ADD_DELAYED_EXTENT ?
1084 BTRFS_ADD_DELAYED_REF : action);
1086 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1089 if (qrecord_inserted)
1090 return btrfs_qgroup_trace_extent_post(trans, record);
1094 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1095 u64 bytenr, u64 num_bytes,
1096 struct btrfs_delayed_extent_op *extent_op)
1098 struct btrfs_delayed_ref_head *head_ref;
1099 struct btrfs_delayed_ref_root *delayed_refs;
1101 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1105 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1106 BTRFS_UPDATE_DELAYED_HEAD, false, false);
1107 head_ref->extent_op = extent_op;
1109 delayed_refs = &trans->transaction->delayed_refs;
1110 spin_lock(&delayed_refs->lock);
1112 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1115 spin_unlock(&delayed_refs->lock);
1118 * Need to update the delayed_refs_rsv with any changes we may have
1121 btrfs_update_delayed_refs_rsv(trans);
1126 * This does a simple search for the head node for a given extent. Returns the
1127 * head node if found, or NULL if not.
1129 struct btrfs_delayed_ref_head *
1130 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1132 lockdep_assert_held(&delayed_refs->lock);
1134 return find_ref_head(delayed_refs, bytenr, false);
1137 void __cold btrfs_delayed_ref_exit(void)
1139 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1140 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1141 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1142 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1145 int __init btrfs_delayed_ref_init(void)
1147 btrfs_delayed_ref_head_cachep = kmem_cache_create(
1148 "btrfs_delayed_ref_head",
1149 sizeof(struct btrfs_delayed_ref_head), 0,
1150 SLAB_MEM_SPREAD, NULL);
1151 if (!btrfs_delayed_ref_head_cachep)
1154 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1155 "btrfs_delayed_tree_ref",
1156 sizeof(struct btrfs_delayed_tree_ref), 0,
1157 SLAB_MEM_SPREAD, NULL);
1158 if (!btrfs_delayed_tree_ref_cachep)
1161 btrfs_delayed_data_ref_cachep = kmem_cache_create(
1162 "btrfs_delayed_data_ref",
1163 sizeof(struct btrfs_delayed_data_ref), 0,
1164 SLAB_MEM_SPREAD, NULL);
1165 if (!btrfs_delayed_data_ref_cachep)
1168 btrfs_delayed_extent_op_cachep = kmem_cache_create(
1169 "btrfs_delayed_extent_op",
1170 sizeof(struct btrfs_delayed_extent_op), 0,
1171 SLAB_MEM_SPREAD, NULL);
1172 if (!btrfs_delayed_extent_op_cachep)
1177 btrfs_delayed_ref_exit();