Merge tag 'linux-kselftest-fixes-6.2-rc6' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-block.git] / fs / btrfs / delayed-ref.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "messages.h"
10 #include "ctree.h"
11 #include "delayed-ref.h"
12 #include "transaction.h"
13 #include "qgroup.h"
14 #include "space-info.h"
15 #include "tree-mod-log.h"
16 #include "fs.h"
17
18 struct kmem_cache *btrfs_delayed_ref_head_cachep;
19 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
20 struct kmem_cache *btrfs_delayed_data_ref_cachep;
21 struct kmem_cache *btrfs_delayed_extent_op_cachep;
22 /*
23  * delayed back reference update tracking.  For subvolume trees
24  * we queue up extent allocations and backref maintenance for
25  * delayed processing.   This avoids deep call chains where we
26  * add extents in the middle of btrfs_search_slot, and it allows
27  * us to buffer up frequently modified backrefs in an rb tree instead
28  * of hammering updates on the extent allocation tree.
29  */
30
31 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
32 {
33         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
35         bool ret = false;
36         u64 reserved;
37
38         spin_lock(&global_rsv->lock);
39         reserved = global_rsv->reserved;
40         spin_unlock(&global_rsv->lock);
41
42         /*
43          * Since the global reserve is just kind of magic we don't really want
44          * to rely on it to save our bacon, so if our size is more than the
45          * delayed_refs_rsv and the global rsv then it's time to think about
46          * bailing.
47          */
48         spin_lock(&delayed_refs_rsv->lock);
49         reserved += delayed_refs_rsv->reserved;
50         if (delayed_refs_rsv->size >= reserved)
51                 ret = true;
52         spin_unlock(&delayed_refs_rsv->lock);
53         return ret;
54 }
55
56 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
57 {
58         u64 num_entries =
59                 atomic_read(&trans->transaction->delayed_refs.num_entries);
60         u64 avg_runtime;
61         u64 val;
62
63         smp_mb();
64         avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
65         val = num_entries * avg_runtime;
66         if (val >= NSEC_PER_SEC)
67                 return 1;
68         if (val >= NSEC_PER_SEC / 2)
69                 return 2;
70
71         return btrfs_check_space_for_delayed_refs(trans->fs_info);
72 }
73
74 /*
75  * Release a ref head's reservation.
76  *
77  * @fs_info:  the filesystem
78  * @nr:       number of items to drop
79  *
80  * Drops the delayed ref head's count from the delayed refs rsv and free any
81  * excess reservation we had.
82  */
83 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
84 {
85         struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
86         u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
87         u64 released = 0;
88
89         /*
90          * We have to check the mount option here because we could be enabling
91          * the free space tree for the first time and don't have the compat_ro
92          * option set yet.
93          *
94          * We need extra reservations if we have the free space tree because
95          * we'll have to modify that tree as well.
96          */
97         if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
98                 num_bytes *= 2;
99
100         released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
101         if (released)
102                 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
103                                               0, released, 0);
104 }
105
106 /*
107  * Adjust the size of the delayed refs rsv.
108  *
109  * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
110  * it'll calculate the additional size and add it to the delayed_refs_rsv.
111  */
112 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
113 {
114         struct btrfs_fs_info *fs_info = trans->fs_info;
115         struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
116         u64 num_bytes;
117
118         if (!trans->delayed_ref_updates)
119                 return;
120
121         num_bytes = btrfs_calc_insert_metadata_size(fs_info,
122                                                     trans->delayed_ref_updates);
123         /*
124          * We have to check the mount option here because we could be enabling
125          * the free space tree for the first time and don't have the compat_ro
126          * option set yet.
127          *
128          * We need extra reservations if we have the free space tree because
129          * we'll have to modify that tree as well.
130          */
131         if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
132                 num_bytes *= 2;
133
134         spin_lock(&delayed_rsv->lock);
135         delayed_rsv->size += num_bytes;
136         delayed_rsv->full = false;
137         spin_unlock(&delayed_rsv->lock);
138         trans->delayed_ref_updates = 0;
139 }
140
141 /*
142  * Transfer bytes to our delayed refs rsv.
143  *
144  * @fs_info:   the filesystem
145  * @src:       source block rsv to transfer from
146  * @num_bytes: number of bytes to transfer
147  *
148  * This transfers up to the num_bytes amount from the src rsv to the
149  * delayed_refs_rsv.  Any extra bytes are returned to the space info.
150  */
151 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
152                                        struct btrfs_block_rsv *src,
153                                        u64 num_bytes)
154 {
155         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
156         u64 to_free = 0;
157
158         spin_lock(&src->lock);
159         src->reserved -= num_bytes;
160         src->size -= num_bytes;
161         spin_unlock(&src->lock);
162
163         spin_lock(&delayed_refs_rsv->lock);
164         if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
165                 u64 delta = delayed_refs_rsv->size -
166                         delayed_refs_rsv->reserved;
167                 if (num_bytes > delta) {
168                         to_free = num_bytes - delta;
169                         num_bytes = delta;
170                 }
171         } else {
172                 to_free = num_bytes;
173                 num_bytes = 0;
174         }
175
176         if (num_bytes)
177                 delayed_refs_rsv->reserved += num_bytes;
178         if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
179                 delayed_refs_rsv->full = true;
180         spin_unlock(&delayed_refs_rsv->lock);
181
182         if (num_bytes)
183                 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
184                                               0, num_bytes, 1);
185         if (to_free)
186                 btrfs_space_info_free_bytes_may_use(fs_info,
187                                 delayed_refs_rsv->space_info, to_free);
188 }
189
190 /*
191  * Refill based on our delayed refs usage.
192  *
193  * @fs_info: the filesystem
194  * @flush:   control how we can flush for this reservation.
195  *
196  * This will refill the delayed block_rsv up to 1 items size worth of space and
197  * will return -ENOSPC if we can't make the reservation.
198  */
199 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
200                                   enum btrfs_reserve_flush_enum flush)
201 {
202         struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
203         u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
204         u64 num_bytes = 0;
205         int ret = -ENOSPC;
206
207         spin_lock(&block_rsv->lock);
208         if (block_rsv->reserved < block_rsv->size) {
209                 num_bytes = block_rsv->size - block_rsv->reserved;
210                 num_bytes = min(num_bytes, limit);
211         }
212         spin_unlock(&block_rsv->lock);
213
214         if (!num_bytes)
215                 return 0;
216
217         ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
218         if (ret)
219                 return ret;
220         btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
221         trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
222                                       0, num_bytes, 1);
223         return 0;
224 }
225
226 /*
227  * compare two delayed tree backrefs with same bytenr and type
228  */
229 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
230                           struct btrfs_delayed_tree_ref *ref2)
231 {
232         if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
233                 if (ref1->root < ref2->root)
234                         return -1;
235                 if (ref1->root > ref2->root)
236                         return 1;
237         } else {
238                 if (ref1->parent < ref2->parent)
239                         return -1;
240                 if (ref1->parent > ref2->parent)
241                         return 1;
242         }
243         return 0;
244 }
245
246 /*
247  * compare two delayed data backrefs with same bytenr and type
248  */
249 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
250                           struct btrfs_delayed_data_ref *ref2)
251 {
252         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
253                 if (ref1->root < ref2->root)
254                         return -1;
255                 if (ref1->root > ref2->root)
256                         return 1;
257                 if (ref1->objectid < ref2->objectid)
258                         return -1;
259                 if (ref1->objectid > ref2->objectid)
260                         return 1;
261                 if (ref1->offset < ref2->offset)
262                         return -1;
263                 if (ref1->offset > ref2->offset)
264                         return 1;
265         } else {
266                 if (ref1->parent < ref2->parent)
267                         return -1;
268                 if (ref1->parent > ref2->parent)
269                         return 1;
270         }
271         return 0;
272 }
273
274 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
275                      struct btrfs_delayed_ref_node *ref2,
276                      bool check_seq)
277 {
278         int ret = 0;
279
280         if (ref1->type < ref2->type)
281                 return -1;
282         if (ref1->type > ref2->type)
283                 return 1;
284         if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
285             ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
286                 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
287                                      btrfs_delayed_node_to_tree_ref(ref2));
288         else
289                 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
290                                      btrfs_delayed_node_to_data_ref(ref2));
291         if (ret)
292                 return ret;
293         if (check_seq) {
294                 if (ref1->seq < ref2->seq)
295                         return -1;
296                 if (ref1->seq > ref2->seq)
297                         return 1;
298         }
299         return 0;
300 }
301
302 /* insert a new ref to head ref rbtree */
303 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
304                                                    struct rb_node *node)
305 {
306         struct rb_node **p = &root->rb_root.rb_node;
307         struct rb_node *parent_node = NULL;
308         struct btrfs_delayed_ref_head *entry;
309         struct btrfs_delayed_ref_head *ins;
310         u64 bytenr;
311         bool leftmost = true;
312
313         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
314         bytenr = ins->bytenr;
315         while (*p) {
316                 parent_node = *p;
317                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
318                                  href_node);
319
320                 if (bytenr < entry->bytenr) {
321                         p = &(*p)->rb_left;
322                 } else if (bytenr > entry->bytenr) {
323                         p = &(*p)->rb_right;
324                         leftmost = false;
325                 } else {
326                         return entry;
327                 }
328         }
329
330         rb_link_node(node, parent_node, p);
331         rb_insert_color_cached(node, root, leftmost);
332         return NULL;
333 }
334
335 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
336                 struct btrfs_delayed_ref_node *ins)
337 {
338         struct rb_node **p = &root->rb_root.rb_node;
339         struct rb_node *node = &ins->ref_node;
340         struct rb_node *parent_node = NULL;
341         struct btrfs_delayed_ref_node *entry;
342         bool leftmost = true;
343
344         while (*p) {
345                 int comp;
346
347                 parent_node = *p;
348                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
349                                  ref_node);
350                 comp = comp_refs(ins, entry, true);
351                 if (comp < 0) {
352                         p = &(*p)->rb_left;
353                 } else if (comp > 0) {
354                         p = &(*p)->rb_right;
355                         leftmost = false;
356                 } else {
357                         return entry;
358                 }
359         }
360
361         rb_link_node(node, parent_node, p);
362         rb_insert_color_cached(node, root, leftmost);
363         return NULL;
364 }
365
366 static struct btrfs_delayed_ref_head *find_first_ref_head(
367                 struct btrfs_delayed_ref_root *dr)
368 {
369         struct rb_node *n;
370         struct btrfs_delayed_ref_head *entry;
371
372         n = rb_first_cached(&dr->href_root);
373         if (!n)
374                 return NULL;
375
376         entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
377
378         return entry;
379 }
380
381 /*
382  * Find a head entry based on bytenr. This returns the delayed ref head if it
383  * was able to find one, or NULL if nothing was in that spot.  If return_bigger
384  * is given, the next bigger entry is returned if no exact match is found.
385  */
386 static struct btrfs_delayed_ref_head *find_ref_head(
387                 struct btrfs_delayed_ref_root *dr, u64 bytenr,
388                 bool return_bigger)
389 {
390         struct rb_root *root = &dr->href_root.rb_root;
391         struct rb_node *n;
392         struct btrfs_delayed_ref_head *entry;
393
394         n = root->rb_node;
395         entry = NULL;
396         while (n) {
397                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
398
399                 if (bytenr < entry->bytenr)
400                         n = n->rb_left;
401                 else if (bytenr > entry->bytenr)
402                         n = n->rb_right;
403                 else
404                         return entry;
405         }
406         if (entry && return_bigger) {
407                 if (bytenr > entry->bytenr) {
408                         n = rb_next(&entry->href_node);
409                         if (!n)
410                                 return NULL;
411                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
412                                          href_node);
413                 }
414                 return entry;
415         }
416         return NULL;
417 }
418
419 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
420                            struct btrfs_delayed_ref_head *head)
421 {
422         lockdep_assert_held(&delayed_refs->lock);
423         if (mutex_trylock(&head->mutex))
424                 return 0;
425
426         refcount_inc(&head->refs);
427         spin_unlock(&delayed_refs->lock);
428
429         mutex_lock(&head->mutex);
430         spin_lock(&delayed_refs->lock);
431         if (RB_EMPTY_NODE(&head->href_node)) {
432                 mutex_unlock(&head->mutex);
433                 btrfs_put_delayed_ref_head(head);
434                 return -EAGAIN;
435         }
436         btrfs_put_delayed_ref_head(head);
437         return 0;
438 }
439
440 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
441                                     struct btrfs_delayed_ref_root *delayed_refs,
442                                     struct btrfs_delayed_ref_head *head,
443                                     struct btrfs_delayed_ref_node *ref)
444 {
445         lockdep_assert_held(&head->lock);
446         rb_erase_cached(&ref->ref_node, &head->ref_tree);
447         RB_CLEAR_NODE(&ref->ref_node);
448         if (!list_empty(&ref->add_list))
449                 list_del(&ref->add_list);
450         ref->in_tree = 0;
451         btrfs_put_delayed_ref(ref);
452         atomic_dec(&delayed_refs->num_entries);
453 }
454
455 static bool merge_ref(struct btrfs_trans_handle *trans,
456                       struct btrfs_delayed_ref_root *delayed_refs,
457                       struct btrfs_delayed_ref_head *head,
458                       struct btrfs_delayed_ref_node *ref,
459                       u64 seq)
460 {
461         struct btrfs_delayed_ref_node *next;
462         struct rb_node *node = rb_next(&ref->ref_node);
463         bool done = false;
464
465         while (!done && node) {
466                 int mod;
467
468                 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
469                 node = rb_next(node);
470                 if (seq && next->seq >= seq)
471                         break;
472                 if (comp_refs(ref, next, false))
473                         break;
474
475                 if (ref->action == next->action) {
476                         mod = next->ref_mod;
477                 } else {
478                         if (ref->ref_mod < next->ref_mod) {
479                                 swap(ref, next);
480                                 done = true;
481                         }
482                         mod = -next->ref_mod;
483                 }
484
485                 drop_delayed_ref(trans, delayed_refs, head, next);
486                 ref->ref_mod += mod;
487                 if (ref->ref_mod == 0) {
488                         drop_delayed_ref(trans, delayed_refs, head, ref);
489                         done = true;
490                 } else {
491                         /*
492                          * Can't have multiples of the same ref on a tree block.
493                          */
494                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
495                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
496                 }
497         }
498
499         return done;
500 }
501
502 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
503                               struct btrfs_delayed_ref_root *delayed_refs,
504                               struct btrfs_delayed_ref_head *head)
505 {
506         struct btrfs_fs_info *fs_info = trans->fs_info;
507         struct btrfs_delayed_ref_node *ref;
508         struct rb_node *node;
509         u64 seq = 0;
510
511         lockdep_assert_held(&head->lock);
512
513         if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
514                 return;
515
516         /* We don't have too many refs to merge for data. */
517         if (head->is_data)
518                 return;
519
520         seq = btrfs_tree_mod_log_lowest_seq(fs_info);
521 again:
522         for (node = rb_first_cached(&head->ref_tree); node;
523              node = rb_next(node)) {
524                 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
525                 if (seq && ref->seq >= seq)
526                         continue;
527                 if (merge_ref(trans, delayed_refs, head, ref, seq))
528                         goto again;
529         }
530 }
531
532 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
533 {
534         int ret = 0;
535         u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
536
537         if (min_seq != 0 && seq >= min_seq) {
538                 btrfs_debug(fs_info,
539                             "holding back delayed_ref %llu, lowest is %llu",
540                             seq, min_seq);
541                 ret = 1;
542         }
543
544         return ret;
545 }
546
547 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
548                 struct btrfs_delayed_ref_root *delayed_refs)
549 {
550         struct btrfs_delayed_ref_head *head;
551
552 again:
553         head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
554                              true);
555         if (!head && delayed_refs->run_delayed_start != 0) {
556                 delayed_refs->run_delayed_start = 0;
557                 head = find_first_ref_head(delayed_refs);
558         }
559         if (!head)
560                 return NULL;
561
562         while (head->processing) {
563                 struct rb_node *node;
564
565                 node = rb_next(&head->href_node);
566                 if (!node) {
567                         if (delayed_refs->run_delayed_start == 0)
568                                 return NULL;
569                         delayed_refs->run_delayed_start = 0;
570                         goto again;
571                 }
572                 head = rb_entry(node, struct btrfs_delayed_ref_head,
573                                 href_node);
574         }
575
576         head->processing = 1;
577         WARN_ON(delayed_refs->num_heads_ready == 0);
578         delayed_refs->num_heads_ready--;
579         delayed_refs->run_delayed_start = head->bytenr +
580                 head->num_bytes;
581         return head;
582 }
583
584 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
585                            struct btrfs_delayed_ref_head *head)
586 {
587         lockdep_assert_held(&delayed_refs->lock);
588         lockdep_assert_held(&head->lock);
589
590         rb_erase_cached(&head->href_node, &delayed_refs->href_root);
591         RB_CLEAR_NODE(&head->href_node);
592         atomic_dec(&delayed_refs->num_entries);
593         delayed_refs->num_heads--;
594         if (head->processing == 0)
595                 delayed_refs->num_heads_ready--;
596 }
597
598 /*
599  * Helper to insert the ref_node to the tail or merge with tail.
600  *
601  * Return 0 for insert.
602  * Return >0 for merge.
603  */
604 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
605                               struct btrfs_delayed_ref_root *root,
606                               struct btrfs_delayed_ref_head *href,
607                               struct btrfs_delayed_ref_node *ref)
608 {
609         struct btrfs_delayed_ref_node *exist;
610         int mod;
611         int ret = 0;
612
613         spin_lock(&href->lock);
614         exist = tree_insert(&href->ref_tree, ref);
615         if (!exist)
616                 goto inserted;
617
618         /* Now we are sure we can merge */
619         ret = 1;
620         if (exist->action == ref->action) {
621                 mod = ref->ref_mod;
622         } else {
623                 /* Need to change action */
624                 if (exist->ref_mod < ref->ref_mod) {
625                         exist->action = ref->action;
626                         mod = -exist->ref_mod;
627                         exist->ref_mod = ref->ref_mod;
628                         if (ref->action == BTRFS_ADD_DELAYED_REF)
629                                 list_add_tail(&exist->add_list,
630                                               &href->ref_add_list);
631                         else if (ref->action == BTRFS_DROP_DELAYED_REF) {
632                                 ASSERT(!list_empty(&exist->add_list));
633                                 list_del(&exist->add_list);
634                         } else {
635                                 ASSERT(0);
636                         }
637                 } else
638                         mod = -ref->ref_mod;
639         }
640         exist->ref_mod += mod;
641
642         /* remove existing tail if its ref_mod is zero */
643         if (exist->ref_mod == 0)
644                 drop_delayed_ref(trans, root, href, exist);
645         spin_unlock(&href->lock);
646         return ret;
647 inserted:
648         if (ref->action == BTRFS_ADD_DELAYED_REF)
649                 list_add_tail(&ref->add_list, &href->ref_add_list);
650         atomic_inc(&root->num_entries);
651         spin_unlock(&href->lock);
652         return ret;
653 }
654
655 /*
656  * helper function to update the accounting in the head ref
657  * existing and update must have the same bytenr
658  */
659 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
660                          struct btrfs_delayed_ref_head *existing,
661                          struct btrfs_delayed_ref_head *update)
662 {
663         struct btrfs_delayed_ref_root *delayed_refs =
664                 &trans->transaction->delayed_refs;
665         struct btrfs_fs_info *fs_info = trans->fs_info;
666         int old_ref_mod;
667
668         BUG_ON(existing->is_data != update->is_data);
669
670         spin_lock(&existing->lock);
671         if (update->must_insert_reserved) {
672                 /* if the extent was freed and then
673                  * reallocated before the delayed ref
674                  * entries were processed, we can end up
675                  * with an existing head ref without
676                  * the must_insert_reserved flag set.
677                  * Set it again here
678                  */
679                 existing->must_insert_reserved = update->must_insert_reserved;
680
681                 /*
682                  * update the num_bytes so we make sure the accounting
683                  * is done correctly
684                  */
685                 existing->num_bytes = update->num_bytes;
686
687         }
688
689         if (update->extent_op) {
690                 if (!existing->extent_op) {
691                         existing->extent_op = update->extent_op;
692                 } else {
693                         if (update->extent_op->update_key) {
694                                 memcpy(&existing->extent_op->key,
695                                        &update->extent_op->key,
696                                        sizeof(update->extent_op->key));
697                                 existing->extent_op->update_key = true;
698                         }
699                         if (update->extent_op->update_flags) {
700                                 existing->extent_op->flags_to_set |=
701                                         update->extent_op->flags_to_set;
702                                 existing->extent_op->update_flags = true;
703                         }
704                         btrfs_free_delayed_extent_op(update->extent_op);
705                 }
706         }
707         /*
708          * update the reference mod on the head to reflect this new operation,
709          * only need the lock for this case cause we could be processing it
710          * currently, for refs we just added we know we're a-ok.
711          */
712         old_ref_mod = existing->total_ref_mod;
713         existing->ref_mod += update->ref_mod;
714         existing->total_ref_mod += update->ref_mod;
715
716         /*
717          * If we are going to from a positive ref mod to a negative or vice
718          * versa we need to make sure to adjust pending_csums accordingly.
719          */
720         if (existing->is_data) {
721                 u64 csum_leaves =
722                         btrfs_csum_bytes_to_leaves(fs_info,
723                                                    existing->num_bytes);
724
725                 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
726                         delayed_refs->pending_csums -= existing->num_bytes;
727                         btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
728                 }
729                 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
730                         delayed_refs->pending_csums += existing->num_bytes;
731                         trans->delayed_ref_updates += csum_leaves;
732                 }
733         }
734
735         spin_unlock(&existing->lock);
736 }
737
738 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
739                                   struct btrfs_qgroup_extent_record *qrecord,
740                                   u64 bytenr, u64 num_bytes, u64 ref_root,
741                                   u64 reserved, int action, bool is_data,
742                                   bool is_system)
743 {
744         int count_mod = 1;
745         int must_insert_reserved = 0;
746
747         /* If reserved is provided, it must be a data extent. */
748         BUG_ON(!is_data && reserved);
749
750         /*
751          * The head node stores the sum of all the mods, so dropping a ref
752          * should drop the sum in the head node by one.
753          */
754         if (action == BTRFS_UPDATE_DELAYED_HEAD)
755                 count_mod = 0;
756         else if (action == BTRFS_DROP_DELAYED_REF)
757                 count_mod = -1;
758
759         /*
760          * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
761          * accounting when the extent is finally added, or if a later
762          * modification deletes the delayed ref without ever inserting the
763          * extent into the extent allocation tree.  ref->must_insert_reserved
764          * is the flag used to record that accounting mods are required.
765          *
766          * Once we record must_insert_reserved, switch the action to
767          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
768          */
769         if (action == BTRFS_ADD_DELAYED_EXTENT)
770                 must_insert_reserved = 1;
771         else
772                 must_insert_reserved = 0;
773
774         refcount_set(&head_ref->refs, 1);
775         head_ref->bytenr = bytenr;
776         head_ref->num_bytes = num_bytes;
777         head_ref->ref_mod = count_mod;
778         head_ref->must_insert_reserved = must_insert_reserved;
779         head_ref->is_data = is_data;
780         head_ref->is_system = is_system;
781         head_ref->ref_tree = RB_ROOT_CACHED;
782         INIT_LIST_HEAD(&head_ref->ref_add_list);
783         RB_CLEAR_NODE(&head_ref->href_node);
784         head_ref->processing = 0;
785         head_ref->total_ref_mod = count_mod;
786         spin_lock_init(&head_ref->lock);
787         mutex_init(&head_ref->mutex);
788
789         if (qrecord) {
790                 if (ref_root && reserved) {
791                         qrecord->data_rsv = reserved;
792                         qrecord->data_rsv_refroot = ref_root;
793                 }
794                 qrecord->bytenr = bytenr;
795                 qrecord->num_bytes = num_bytes;
796                 qrecord->old_roots = NULL;
797         }
798 }
799
800 /*
801  * helper function to actually insert a head node into the rbtree.
802  * this does all the dirty work in terms of maintaining the correct
803  * overall modification count.
804  */
805 static noinline struct btrfs_delayed_ref_head *
806 add_delayed_ref_head(struct btrfs_trans_handle *trans,
807                      struct btrfs_delayed_ref_head *head_ref,
808                      struct btrfs_qgroup_extent_record *qrecord,
809                      int action, int *qrecord_inserted_ret)
810 {
811         struct btrfs_delayed_ref_head *existing;
812         struct btrfs_delayed_ref_root *delayed_refs;
813         int qrecord_inserted = 0;
814
815         delayed_refs = &trans->transaction->delayed_refs;
816
817         /* Record qgroup extent info if provided */
818         if (qrecord) {
819                 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
820                                         delayed_refs, qrecord))
821                         kfree(qrecord);
822                 else
823                         qrecord_inserted = 1;
824         }
825
826         trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
827
828         existing = htree_insert(&delayed_refs->href_root,
829                                 &head_ref->href_node);
830         if (existing) {
831                 update_existing_head_ref(trans, existing, head_ref);
832                 /*
833                  * we've updated the existing ref, free the newly
834                  * allocated ref
835                  */
836                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
837                 head_ref = existing;
838         } else {
839                 if (head_ref->is_data && head_ref->ref_mod < 0) {
840                         delayed_refs->pending_csums += head_ref->num_bytes;
841                         trans->delayed_ref_updates +=
842                                 btrfs_csum_bytes_to_leaves(trans->fs_info,
843                                                            head_ref->num_bytes);
844                 }
845                 delayed_refs->num_heads++;
846                 delayed_refs->num_heads_ready++;
847                 atomic_inc(&delayed_refs->num_entries);
848                 trans->delayed_ref_updates++;
849         }
850         if (qrecord_inserted_ret)
851                 *qrecord_inserted_ret = qrecord_inserted;
852
853         return head_ref;
854 }
855
856 /*
857  * init_delayed_ref_common - Initialize the structure which represents a
858  *                           modification to a an extent.
859  *
860  * @fs_info:    Internal to the mounted filesystem mount structure.
861  *
862  * @ref:        The structure which is going to be initialized.
863  *
864  * @bytenr:     The logical address of the extent for which a modification is
865  *              going to be recorded.
866  *
867  * @num_bytes:  Size of the extent whose modification is being recorded.
868  *
869  * @ref_root:   The id of the root where this modification has originated, this
870  *              can be either one of the well-known metadata trees or the
871  *              subvolume id which references this extent.
872  *
873  * @action:     Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
874  *              BTRFS_ADD_DELAYED_EXTENT
875  *
876  * @ref_type:   Holds the type of the extent which is being recorded, can be
877  *              one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
878  *              when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
879  *              BTRFS_EXTENT_DATA_REF_KEY when recording data extent
880  */
881 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
882                                     struct btrfs_delayed_ref_node *ref,
883                                     u64 bytenr, u64 num_bytes, u64 ref_root,
884                                     int action, u8 ref_type)
885 {
886         u64 seq = 0;
887
888         if (action == BTRFS_ADD_DELAYED_EXTENT)
889                 action = BTRFS_ADD_DELAYED_REF;
890
891         if (is_fstree(ref_root))
892                 seq = atomic64_read(&fs_info->tree_mod_seq);
893
894         refcount_set(&ref->refs, 1);
895         ref->bytenr = bytenr;
896         ref->num_bytes = num_bytes;
897         ref->ref_mod = 1;
898         ref->action = action;
899         ref->is_head = 0;
900         ref->in_tree = 1;
901         ref->seq = seq;
902         ref->type = ref_type;
903         RB_CLEAR_NODE(&ref->ref_node);
904         INIT_LIST_HEAD(&ref->add_list);
905 }
906
907 /*
908  * add a delayed tree ref.  This does all of the accounting required
909  * to make sure the delayed ref is eventually processed before this
910  * transaction commits.
911  */
912 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
913                                struct btrfs_ref *generic_ref,
914                                struct btrfs_delayed_extent_op *extent_op)
915 {
916         struct btrfs_fs_info *fs_info = trans->fs_info;
917         struct btrfs_delayed_tree_ref *ref;
918         struct btrfs_delayed_ref_head *head_ref;
919         struct btrfs_delayed_ref_root *delayed_refs;
920         struct btrfs_qgroup_extent_record *record = NULL;
921         int qrecord_inserted;
922         bool is_system;
923         int action = generic_ref->action;
924         int level = generic_ref->tree_ref.level;
925         int ret;
926         u64 bytenr = generic_ref->bytenr;
927         u64 num_bytes = generic_ref->len;
928         u64 parent = generic_ref->parent;
929         u8 ref_type;
930
931         is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
932
933         ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
934         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
935         if (!ref)
936                 return -ENOMEM;
937
938         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
939         if (!head_ref) {
940                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
941                 return -ENOMEM;
942         }
943
944         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
945             !generic_ref->skip_qgroup) {
946                 record = kzalloc(sizeof(*record), GFP_NOFS);
947                 if (!record) {
948                         kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
949                         kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
950                         return -ENOMEM;
951                 }
952         }
953
954         if (parent)
955                 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
956         else
957                 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
958
959         init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
960                                 generic_ref->tree_ref.owning_root, action,
961                                 ref_type);
962         ref->root = generic_ref->tree_ref.owning_root;
963         ref->parent = parent;
964         ref->level = level;
965
966         init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
967                               generic_ref->tree_ref.owning_root, 0, action,
968                               false, is_system);
969         head_ref->extent_op = extent_op;
970
971         delayed_refs = &trans->transaction->delayed_refs;
972         spin_lock(&delayed_refs->lock);
973
974         /*
975          * insert both the head node and the new ref without dropping
976          * the spin lock
977          */
978         head_ref = add_delayed_ref_head(trans, head_ref, record,
979                                         action, &qrecord_inserted);
980
981         ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
982         spin_unlock(&delayed_refs->lock);
983
984         /*
985          * Need to update the delayed_refs_rsv with any changes we may have
986          * made.
987          */
988         btrfs_update_delayed_refs_rsv(trans);
989
990         trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
991                                    action == BTRFS_ADD_DELAYED_EXTENT ?
992                                    BTRFS_ADD_DELAYED_REF : action);
993         if (ret > 0)
994                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
995
996         if (qrecord_inserted)
997                 btrfs_qgroup_trace_extent_post(trans, record);
998
999         return 0;
1000 }
1001
1002 /*
1003  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1004  */
1005 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1006                                struct btrfs_ref *generic_ref,
1007                                u64 reserved)
1008 {
1009         struct btrfs_fs_info *fs_info = trans->fs_info;
1010         struct btrfs_delayed_data_ref *ref;
1011         struct btrfs_delayed_ref_head *head_ref;
1012         struct btrfs_delayed_ref_root *delayed_refs;
1013         struct btrfs_qgroup_extent_record *record = NULL;
1014         int qrecord_inserted;
1015         int action = generic_ref->action;
1016         int ret;
1017         u64 bytenr = generic_ref->bytenr;
1018         u64 num_bytes = generic_ref->len;
1019         u64 parent = generic_ref->parent;
1020         u64 ref_root = generic_ref->data_ref.owning_root;
1021         u64 owner = generic_ref->data_ref.ino;
1022         u64 offset = generic_ref->data_ref.offset;
1023         u8 ref_type;
1024
1025         ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1026         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1027         if (!ref)
1028                 return -ENOMEM;
1029
1030         if (parent)
1031                 ref_type = BTRFS_SHARED_DATA_REF_KEY;
1032         else
1033                 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1034         init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1035                                 ref_root, action, ref_type);
1036         ref->root = ref_root;
1037         ref->parent = parent;
1038         ref->objectid = owner;
1039         ref->offset = offset;
1040
1041
1042         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1043         if (!head_ref) {
1044                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1045                 return -ENOMEM;
1046         }
1047
1048         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1049             !generic_ref->skip_qgroup) {
1050                 record = kzalloc(sizeof(*record), GFP_NOFS);
1051                 if (!record) {
1052                         kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1053                         kmem_cache_free(btrfs_delayed_ref_head_cachep,
1054                                         head_ref);
1055                         return -ENOMEM;
1056                 }
1057         }
1058
1059         init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1060                               reserved, action, true, false);
1061         head_ref->extent_op = NULL;
1062
1063         delayed_refs = &trans->transaction->delayed_refs;
1064         spin_lock(&delayed_refs->lock);
1065
1066         /*
1067          * insert both the head node and the new ref without dropping
1068          * the spin lock
1069          */
1070         head_ref = add_delayed_ref_head(trans, head_ref, record,
1071                                         action, &qrecord_inserted);
1072
1073         ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
1074         spin_unlock(&delayed_refs->lock);
1075
1076         /*
1077          * Need to update the delayed_refs_rsv with any changes we may have
1078          * made.
1079          */
1080         btrfs_update_delayed_refs_rsv(trans);
1081
1082         trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1083                                    action == BTRFS_ADD_DELAYED_EXTENT ?
1084                                    BTRFS_ADD_DELAYED_REF : action);
1085         if (ret > 0)
1086                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1087
1088
1089         if (qrecord_inserted)
1090                 return btrfs_qgroup_trace_extent_post(trans, record);
1091         return 0;
1092 }
1093
1094 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1095                                 u64 bytenr, u64 num_bytes,
1096                                 struct btrfs_delayed_extent_op *extent_op)
1097 {
1098         struct btrfs_delayed_ref_head *head_ref;
1099         struct btrfs_delayed_ref_root *delayed_refs;
1100
1101         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1102         if (!head_ref)
1103                 return -ENOMEM;
1104
1105         init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1106                               BTRFS_UPDATE_DELAYED_HEAD, false, false);
1107         head_ref->extent_op = extent_op;
1108
1109         delayed_refs = &trans->transaction->delayed_refs;
1110         spin_lock(&delayed_refs->lock);
1111
1112         add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1113                              NULL);
1114
1115         spin_unlock(&delayed_refs->lock);
1116
1117         /*
1118          * Need to update the delayed_refs_rsv with any changes we may have
1119          * made.
1120          */
1121         btrfs_update_delayed_refs_rsv(trans);
1122         return 0;
1123 }
1124
1125 /*
1126  * This does a simple search for the head node for a given extent.  Returns the
1127  * head node if found, or NULL if not.
1128  */
1129 struct btrfs_delayed_ref_head *
1130 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1131 {
1132         lockdep_assert_held(&delayed_refs->lock);
1133
1134         return find_ref_head(delayed_refs, bytenr, false);
1135 }
1136
1137 void __cold btrfs_delayed_ref_exit(void)
1138 {
1139         kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1140         kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1141         kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1142         kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1143 }
1144
1145 int __init btrfs_delayed_ref_init(void)
1146 {
1147         btrfs_delayed_ref_head_cachep = kmem_cache_create(
1148                                 "btrfs_delayed_ref_head",
1149                                 sizeof(struct btrfs_delayed_ref_head), 0,
1150                                 SLAB_MEM_SPREAD, NULL);
1151         if (!btrfs_delayed_ref_head_cachep)
1152                 goto fail;
1153
1154         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1155                                 "btrfs_delayed_tree_ref",
1156                                 sizeof(struct btrfs_delayed_tree_ref), 0,
1157                                 SLAB_MEM_SPREAD, NULL);
1158         if (!btrfs_delayed_tree_ref_cachep)
1159                 goto fail;
1160
1161         btrfs_delayed_data_ref_cachep = kmem_cache_create(
1162                                 "btrfs_delayed_data_ref",
1163                                 sizeof(struct btrfs_delayed_data_ref), 0,
1164                                 SLAB_MEM_SPREAD, NULL);
1165         if (!btrfs_delayed_data_ref_cachep)
1166                 goto fail;
1167
1168         btrfs_delayed_extent_op_cachep = kmem_cache_create(
1169                                 "btrfs_delayed_extent_op",
1170                                 sizeof(struct btrfs_delayed_extent_op), 0,
1171                                 SLAB_MEM_SPREAD, NULL);
1172         if (!btrfs_delayed_extent_op_cachep)
1173                 goto fail;
1174
1175         return 0;
1176 fail:
1177         btrfs_delayed_ref_exit();
1178         return -ENOMEM;
1179 }