btrfs: qgroup: Record possible quota-related extent for qgroup.
[linux-2.6-block.git] / fs / btrfs / delayed-ref.c
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 #include "qgroup.h"
26
27 struct kmem_cache *btrfs_delayed_ref_head_cachep;
28 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29 struct kmem_cache *btrfs_delayed_data_ref_cachep;
30 struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 /*
32  * delayed back reference update tracking.  For subvolume trees
33  * we queue up extent allocations and backref maintenance for
34  * delayed processing.   This avoids deep call chains where we
35  * add extents in the middle of btrfs_search_slot, and it allows
36  * us to buffer up frequently modified backrefs in an rb tree instead
37  * of hammering updates on the extent allocation tree.
38  */
39
40 /*
41  * compare two delayed tree backrefs with same bytenr and type
42  */
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44                           struct btrfs_delayed_tree_ref *ref1, int type)
45 {
46         if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47                 if (ref1->root < ref2->root)
48                         return -1;
49                 if (ref1->root > ref2->root)
50                         return 1;
51         } else {
52                 if (ref1->parent < ref2->parent)
53                         return -1;
54                 if (ref1->parent > ref2->parent)
55                         return 1;
56         }
57         return 0;
58 }
59
60 /*
61  * compare two delayed data backrefs with same bytenr and type
62  */
63 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64                           struct btrfs_delayed_data_ref *ref1)
65 {
66         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67                 if (ref1->root < ref2->root)
68                         return -1;
69                 if (ref1->root > ref2->root)
70                         return 1;
71                 if (ref1->objectid < ref2->objectid)
72                         return -1;
73                 if (ref1->objectid > ref2->objectid)
74                         return 1;
75                 if (ref1->offset < ref2->offset)
76                         return -1;
77                 if (ref1->offset > ref2->offset)
78                         return 1;
79         } else {
80                 if (ref1->parent < ref2->parent)
81                         return -1;
82                 if (ref1->parent > ref2->parent)
83                         return 1;
84         }
85         return 0;
86 }
87
88 /* insert a new ref to head ref rbtree */
89 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
90                                                    struct rb_node *node)
91 {
92         struct rb_node **p = &root->rb_node;
93         struct rb_node *parent_node = NULL;
94         struct btrfs_delayed_ref_head *entry;
95         struct btrfs_delayed_ref_head *ins;
96         u64 bytenr;
97
98         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99         bytenr = ins->node.bytenr;
100         while (*p) {
101                 parent_node = *p;
102                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103                                  href_node);
104
105                 if (bytenr < entry->node.bytenr)
106                         p = &(*p)->rb_left;
107                 else if (bytenr > entry->node.bytenr)
108                         p = &(*p)->rb_right;
109                 else
110                         return entry;
111         }
112
113         rb_link_node(node, parent_node, p);
114         rb_insert_color(node, root);
115         return NULL;
116 }
117
118 /*
119  * find an head entry based on bytenr. This returns the delayed ref
120  * head if it was able to find one, or NULL if nothing was in that spot.
121  * If return_bigger is given, the next bigger entry is returned if no exact
122  * match is found.
123  */
124 static struct btrfs_delayed_ref_head *
125 find_ref_head(struct rb_root *root, u64 bytenr,
126               int return_bigger)
127 {
128         struct rb_node *n;
129         struct btrfs_delayed_ref_head *entry;
130
131         n = root->rb_node;
132         entry = NULL;
133         while (n) {
134                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135
136                 if (bytenr < entry->node.bytenr)
137                         n = n->rb_left;
138                 else if (bytenr > entry->node.bytenr)
139                         n = n->rb_right;
140                 else
141                         return entry;
142         }
143         if (entry && return_bigger) {
144                 if (bytenr > entry->node.bytenr) {
145                         n = rb_next(&entry->href_node);
146                         if (!n)
147                                 n = rb_first(root);
148                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
149                                          href_node);
150                         return entry;
151                 }
152                 return entry;
153         }
154         return NULL;
155 }
156
157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158                            struct btrfs_delayed_ref_head *head)
159 {
160         struct btrfs_delayed_ref_root *delayed_refs;
161
162         delayed_refs = &trans->transaction->delayed_refs;
163         assert_spin_locked(&delayed_refs->lock);
164         if (mutex_trylock(&head->mutex))
165                 return 0;
166
167         atomic_inc(&head->node.refs);
168         spin_unlock(&delayed_refs->lock);
169
170         mutex_lock(&head->mutex);
171         spin_lock(&delayed_refs->lock);
172         if (!head->node.in_tree) {
173                 mutex_unlock(&head->mutex);
174                 btrfs_put_delayed_ref(&head->node);
175                 return -EAGAIN;
176         }
177         btrfs_put_delayed_ref(&head->node);
178         return 0;
179 }
180
181 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182                                     struct btrfs_delayed_ref_root *delayed_refs,
183                                     struct btrfs_delayed_ref_head *head,
184                                     struct btrfs_delayed_ref_node *ref)
185 {
186         if (btrfs_delayed_ref_is_head(ref)) {
187                 head = btrfs_delayed_node_to_head(ref);
188                 rb_erase(&head->href_node, &delayed_refs->href_root);
189         } else {
190                 assert_spin_locked(&head->lock);
191                 list_del(&ref->list);
192         }
193         ref->in_tree = 0;
194         btrfs_put_delayed_ref(ref);
195         atomic_dec(&delayed_refs->num_entries);
196         if (trans->delayed_ref_updates)
197                 trans->delayed_ref_updates--;
198 }
199
200 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
201                             struct btrfs_delayed_ref_root *delayed_refs,
202                             u64 seq)
203 {
204         struct seq_list *elem;
205         int ret = 0;
206
207         spin_lock(&fs_info->tree_mod_seq_lock);
208         if (!list_empty(&fs_info->tree_mod_seq_list)) {
209                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
210                                         struct seq_list, list);
211                 if (seq >= elem->seq) {
212                         pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
213                                  (u32)(seq >> 32), (u32)seq,
214                                  (u32)(elem->seq >> 32), (u32)elem->seq,
215                                  delayed_refs);
216                         ret = 1;
217                 }
218         }
219
220         spin_unlock(&fs_info->tree_mod_seq_lock);
221         return ret;
222 }
223
224 struct btrfs_delayed_ref_head *
225 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
226 {
227         struct btrfs_delayed_ref_root *delayed_refs;
228         struct btrfs_delayed_ref_head *head;
229         u64 start;
230         bool loop = false;
231
232         delayed_refs = &trans->transaction->delayed_refs;
233
234 again:
235         start = delayed_refs->run_delayed_start;
236         head = find_ref_head(&delayed_refs->href_root, start, 1);
237         if (!head && !loop) {
238                 delayed_refs->run_delayed_start = 0;
239                 start = 0;
240                 loop = true;
241                 head = find_ref_head(&delayed_refs->href_root, start, 1);
242                 if (!head)
243                         return NULL;
244         } else if (!head && loop) {
245                 return NULL;
246         }
247
248         while (head->processing) {
249                 struct rb_node *node;
250
251                 node = rb_next(&head->href_node);
252                 if (!node) {
253                         if (loop)
254                                 return NULL;
255                         delayed_refs->run_delayed_start = 0;
256                         start = 0;
257                         loop = true;
258                         goto again;
259                 }
260                 head = rb_entry(node, struct btrfs_delayed_ref_head,
261                                 href_node);
262         }
263
264         head->processing = 1;
265         WARN_ON(delayed_refs->num_heads_ready == 0);
266         delayed_refs->num_heads_ready--;
267         delayed_refs->run_delayed_start = head->node.bytenr +
268                 head->node.num_bytes;
269         return head;
270 }
271
272 /*
273  * Helper to insert the ref_node to the tail or merge with tail.
274  *
275  * Return 0 for insert.
276  * Return >0 for merge.
277  */
278 static int
279 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
280                            struct btrfs_delayed_ref_root *root,
281                            struct btrfs_delayed_ref_head *href,
282                            struct btrfs_delayed_ref_node *ref)
283 {
284         struct btrfs_delayed_ref_node *exist;
285         int mod;
286         int ret = 0;
287
288         spin_lock(&href->lock);
289         /* Check whether we can merge the tail node with ref */
290         if (list_empty(&href->ref_list))
291                 goto add_tail;
292         exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
293                            list);
294         /* No need to compare bytenr nor is_head */
295         if (exist->type != ref->type || exist->no_quota != ref->no_quota ||
296             exist->seq != ref->seq)
297                 goto add_tail;
298
299         if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
300              exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
301             comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
302                            btrfs_delayed_node_to_tree_ref(ref),
303                            ref->type))
304                 goto add_tail;
305         if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
306              exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
307             comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
308                            btrfs_delayed_node_to_data_ref(ref)))
309                 goto add_tail;
310
311         /* Now we are sure we can merge */
312         ret = 1;
313         if (exist->action == ref->action) {
314                 mod = ref->ref_mod;
315         } else {
316                 /* Need to change action */
317                 if (exist->ref_mod < ref->ref_mod) {
318                         exist->action = ref->action;
319                         mod = -exist->ref_mod;
320                         exist->ref_mod = ref->ref_mod;
321                 } else
322                         mod = -ref->ref_mod;
323         }
324         exist->ref_mod += mod;
325
326         /* remove existing tail if its ref_mod is zero */
327         if (exist->ref_mod == 0)
328                 drop_delayed_ref(trans, root, href, exist);
329         spin_unlock(&href->lock);
330         return ret;
331
332 add_tail:
333         list_add_tail(&ref->list, &href->ref_list);
334         atomic_inc(&root->num_entries);
335         trans->delayed_ref_updates++;
336         spin_unlock(&href->lock);
337         return ret;
338 }
339
340 /*
341  * helper function to update the accounting in the head ref
342  * existing and update must have the same bytenr
343  */
344 static noinline void
345 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
346                          struct btrfs_delayed_ref_node *existing,
347                          struct btrfs_delayed_ref_node *update)
348 {
349         struct btrfs_delayed_ref_head *existing_ref;
350         struct btrfs_delayed_ref_head *ref;
351         int old_ref_mod;
352
353         existing_ref = btrfs_delayed_node_to_head(existing);
354         ref = btrfs_delayed_node_to_head(update);
355         BUG_ON(existing_ref->is_data != ref->is_data);
356
357         spin_lock(&existing_ref->lock);
358         if (ref->must_insert_reserved) {
359                 /* if the extent was freed and then
360                  * reallocated before the delayed ref
361                  * entries were processed, we can end up
362                  * with an existing head ref without
363                  * the must_insert_reserved flag set.
364                  * Set it again here
365                  */
366                 existing_ref->must_insert_reserved = ref->must_insert_reserved;
367
368                 /*
369                  * update the num_bytes so we make sure the accounting
370                  * is done correctly
371                  */
372                 existing->num_bytes = update->num_bytes;
373
374         }
375
376         if (ref->extent_op) {
377                 if (!existing_ref->extent_op) {
378                         existing_ref->extent_op = ref->extent_op;
379                 } else {
380                         if (ref->extent_op->update_key) {
381                                 memcpy(&existing_ref->extent_op->key,
382                                        &ref->extent_op->key,
383                                        sizeof(ref->extent_op->key));
384                                 existing_ref->extent_op->update_key = 1;
385                         }
386                         if (ref->extent_op->update_flags) {
387                                 existing_ref->extent_op->flags_to_set |=
388                                         ref->extent_op->flags_to_set;
389                                 existing_ref->extent_op->update_flags = 1;
390                         }
391                         btrfs_free_delayed_extent_op(ref->extent_op);
392                 }
393         }
394         /*
395          * update the reference mod on the head to reflect this new operation,
396          * only need the lock for this case cause we could be processing it
397          * currently, for refs we just added we know we're a-ok.
398          */
399         old_ref_mod = existing_ref->total_ref_mod;
400         existing->ref_mod += update->ref_mod;
401         existing_ref->total_ref_mod += update->ref_mod;
402
403         /*
404          * If we are going to from a positive ref mod to a negative or vice
405          * versa we need to make sure to adjust pending_csums accordingly.
406          */
407         if (existing_ref->is_data) {
408                 if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
409                         delayed_refs->pending_csums -= existing->num_bytes;
410                 if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
411                         delayed_refs->pending_csums += existing->num_bytes;
412         }
413         spin_unlock(&existing_ref->lock);
414 }
415
416 /*
417  * helper function to actually insert a head node into the rbtree.
418  * this does all the dirty work in terms of maintaining the correct
419  * overall modification count.
420  */
421 static noinline struct btrfs_delayed_ref_head *
422 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
423                      struct btrfs_trans_handle *trans,
424                      struct btrfs_delayed_ref_node *ref,
425                      struct btrfs_qgroup_extent_record *qrecord,
426                      u64 bytenr, u64 num_bytes, int action, int is_data)
427 {
428         struct btrfs_delayed_ref_head *existing;
429         struct btrfs_delayed_ref_head *head_ref = NULL;
430         struct btrfs_delayed_ref_root *delayed_refs;
431         struct btrfs_qgroup_extent_record *qexisting;
432         int count_mod = 1;
433         int must_insert_reserved = 0;
434
435         /*
436          * the head node stores the sum of all the mods, so dropping a ref
437          * should drop the sum in the head node by one.
438          */
439         if (action == BTRFS_UPDATE_DELAYED_HEAD)
440                 count_mod = 0;
441         else if (action == BTRFS_DROP_DELAYED_REF)
442                 count_mod = -1;
443
444         /*
445          * BTRFS_ADD_DELAYED_EXTENT means that we need to update
446          * the reserved accounting when the extent is finally added, or
447          * if a later modification deletes the delayed ref without ever
448          * inserting the extent into the extent allocation tree.
449          * ref->must_insert_reserved is the flag used to record
450          * that accounting mods are required.
451          *
452          * Once we record must_insert_reserved, switch the action to
453          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
454          */
455         if (action == BTRFS_ADD_DELAYED_EXTENT)
456                 must_insert_reserved = 1;
457         else
458                 must_insert_reserved = 0;
459
460         delayed_refs = &trans->transaction->delayed_refs;
461
462         /* first set the basic ref node struct up */
463         atomic_set(&ref->refs, 1);
464         ref->bytenr = bytenr;
465         ref->num_bytes = num_bytes;
466         ref->ref_mod = count_mod;
467         ref->type  = 0;
468         ref->action  = 0;
469         ref->is_head = 1;
470         ref->in_tree = 1;
471         ref->seq = 0;
472
473         head_ref = btrfs_delayed_node_to_head(ref);
474         head_ref->must_insert_reserved = must_insert_reserved;
475         head_ref->is_data = is_data;
476         INIT_LIST_HEAD(&head_ref->ref_list);
477         head_ref->processing = 0;
478         head_ref->total_ref_mod = count_mod;
479
480         /* Record qgroup extent info if provided */
481         if (qrecord) {
482                 qrecord->bytenr = bytenr;
483                 qrecord->num_bytes = num_bytes;
484                 qrecord->old_roots = NULL;
485
486                 qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
487                                                              qrecord);
488                 if (qexisting)
489                         kfree(qrecord);
490         }
491
492         spin_lock_init(&head_ref->lock);
493         mutex_init(&head_ref->mutex);
494
495         trace_add_delayed_ref_head(ref, head_ref, action);
496
497         existing = htree_insert(&delayed_refs->href_root,
498                                 &head_ref->href_node);
499         if (existing) {
500                 update_existing_head_ref(delayed_refs, &existing->node, ref);
501                 /*
502                  * we've updated the existing ref, free the newly
503                  * allocated ref
504                  */
505                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
506                 head_ref = existing;
507         } else {
508                 if (is_data && count_mod < 0)
509                         delayed_refs->pending_csums += num_bytes;
510                 delayed_refs->num_heads++;
511                 delayed_refs->num_heads_ready++;
512                 atomic_inc(&delayed_refs->num_entries);
513                 trans->delayed_ref_updates++;
514         }
515         return head_ref;
516 }
517
518 /*
519  * helper to insert a delayed tree ref into the rbtree.
520  */
521 static noinline void
522 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
523                      struct btrfs_trans_handle *trans,
524                      struct btrfs_delayed_ref_head *head_ref,
525                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
526                      u64 num_bytes, u64 parent, u64 ref_root, int level,
527                      int action, int no_quota)
528 {
529         struct btrfs_delayed_tree_ref *full_ref;
530         struct btrfs_delayed_ref_root *delayed_refs;
531         u64 seq = 0;
532         int ret;
533
534         if (action == BTRFS_ADD_DELAYED_EXTENT)
535                 action = BTRFS_ADD_DELAYED_REF;
536
537         if (is_fstree(ref_root))
538                 seq = atomic64_read(&fs_info->tree_mod_seq);
539         delayed_refs = &trans->transaction->delayed_refs;
540
541         /* first set the basic ref node struct up */
542         atomic_set(&ref->refs, 1);
543         ref->bytenr = bytenr;
544         ref->num_bytes = num_bytes;
545         ref->ref_mod = 1;
546         ref->action = action;
547         ref->is_head = 0;
548         ref->in_tree = 1;
549         ref->no_quota = no_quota;
550         ref->seq = seq;
551
552         full_ref = btrfs_delayed_node_to_tree_ref(ref);
553         full_ref->parent = parent;
554         full_ref->root = ref_root;
555         if (parent)
556                 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
557         else
558                 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
559         full_ref->level = level;
560
561         trace_add_delayed_tree_ref(ref, full_ref, action);
562
563         ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
564
565         /*
566          * XXX: memory should be freed at the same level allocated.
567          * But bad practice is anywhere... Follow it now. Need cleanup.
568          */
569         if (ret > 0)
570                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
571 }
572
573 /*
574  * helper to insert a delayed data ref into the rbtree.
575  */
576 static noinline void
577 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
578                      struct btrfs_trans_handle *trans,
579                      struct btrfs_delayed_ref_head *head_ref,
580                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
581                      u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
582                      u64 offset, int action, int no_quota)
583 {
584         struct btrfs_delayed_data_ref *full_ref;
585         struct btrfs_delayed_ref_root *delayed_refs;
586         u64 seq = 0;
587         int ret;
588
589         if (action == BTRFS_ADD_DELAYED_EXTENT)
590                 action = BTRFS_ADD_DELAYED_REF;
591
592         delayed_refs = &trans->transaction->delayed_refs;
593
594         if (is_fstree(ref_root))
595                 seq = atomic64_read(&fs_info->tree_mod_seq);
596
597         /* first set the basic ref node struct up */
598         atomic_set(&ref->refs, 1);
599         ref->bytenr = bytenr;
600         ref->num_bytes = num_bytes;
601         ref->ref_mod = 1;
602         ref->action = action;
603         ref->is_head = 0;
604         ref->in_tree = 1;
605         ref->no_quota = no_quota;
606         ref->seq = seq;
607
608         full_ref = btrfs_delayed_node_to_data_ref(ref);
609         full_ref->parent = parent;
610         full_ref->root = ref_root;
611         if (parent)
612                 ref->type = BTRFS_SHARED_DATA_REF_KEY;
613         else
614                 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
615
616         full_ref->objectid = owner;
617         full_ref->offset = offset;
618
619         trace_add_delayed_data_ref(ref, full_ref, action);
620
621         ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
622
623         if (ret > 0)
624                 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
625 }
626
627 /*
628  * add a delayed tree ref.  This does all of the accounting required
629  * to make sure the delayed ref is eventually processed before this
630  * transaction commits.
631  */
632 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
633                                struct btrfs_trans_handle *trans,
634                                u64 bytenr, u64 num_bytes, u64 parent,
635                                u64 ref_root,  int level, int action,
636                                struct btrfs_delayed_extent_op *extent_op,
637                                int no_quota)
638 {
639         struct btrfs_delayed_tree_ref *ref;
640         struct btrfs_delayed_ref_head *head_ref;
641         struct btrfs_delayed_ref_root *delayed_refs;
642         struct btrfs_qgroup_extent_record *record = NULL;
643
644         if (!is_fstree(ref_root) || !fs_info->quota_enabled)
645                 no_quota = 0;
646
647         BUG_ON(extent_op && extent_op->is_data);
648         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
649         if (!ref)
650                 return -ENOMEM;
651
652         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
653         if (!head_ref) {
654                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
655                 return -ENOMEM;
656         }
657
658         if (fs_info->quota_enabled && is_fstree(ref_root)) {
659                 record = kmalloc(sizeof(*record), GFP_NOFS);
660                 if (!record) {
661                         kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
662                         kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
663                         return -ENOMEM;
664                 }
665         }
666
667         head_ref->extent_op = extent_op;
668
669         delayed_refs = &trans->transaction->delayed_refs;
670         spin_lock(&delayed_refs->lock);
671
672         /*
673          * insert both the head node and the new ref without dropping
674          * the spin lock
675          */
676         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
677                                         bytenr, num_bytes, action, 0);
678
679         add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
680                                    num_bytes, parent, ref_root, level, action,
681                                    no_quota);
682         spin_unlock(&delayed_refs->lock);
683
684         return 0;
685 }
686
687 /*
688  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
689  */
690 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
691                                struct btrfs_trans_handle *trans,
692                                u64 bytenr, u64 num_bytes,
693                                u64 parent, u64 ref_root,
694                                u64 owner, u64 offset, int action,
695                                struct btrfs_delayed_extent_op *extent_op,
696                                int no_quota)
697 {
698         struct btrfs_delayed_data_ref *ref;
699         struct btrfs_delayed_ref_head *head_ref;
700         struct btrfs_delayed_ref_root *delayed_refs;
701         struct btrfs_qgroup_extent_record *record = NULL;
702
703         if (!is_fstree(ref_root) || !fs_info->quota_enabled)
704                 no_quota = 0;
705
706         BUG_ON(extent_op && !extent_op->is_data);
707         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
708         if (!ref)
709                 return -ENOMEM;
710
711         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
712         if (!head_ref) {
713                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
714                 return -ENOMEM;
715         }
716
717         if (fs_info->quota_enabled && is_fstree(ref_root)) {
718                 record = kmalloc(sizeof(*record), GFP_NOFS);
719                 if (!record) {
720                         kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
721                         kmem_cache_free(btrfs_delayed_ref_head_cachep,
722                                         head_ref);
723                         return -ENOMEM;
724                 }
725         }
726
727         head_ref->extent_op = extent_op;
728
729         delayed_refs = &trans->transaction->delayed_refs;
730         spin_lock(&delayed_refs->lock);
731
732         /*
733          * insert both the head node and the new ref without dropping
734          * the spin lock
735          */
736         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
737                                         bytenr, num_bytes, action, 1);
738
739         add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
740                                    num_bytes, parent, ref_root, owner, offset,
741                                    action, no_quota);
742         spin_unlock(&delayed_refs->lock);
743
744         return 0;
745 }
746
747 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
748                                 struct btrfs_trans_handle *trans,
749                                 u64 bytenr, u64 num_bytes,
750                                 struct btrfs_delayed_extent_op *extent_op)
751 {
752         struct btrfs_delayed_ref_head *head_ref;
753         struct btrfs_delayed_ref_root *delayed_refs;
754
755         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
756         if (!head_ref)
757                 return -ENOMEM;
758
759         head_ref->extent_op = extent_op;
760
761         delayed_refs = &trans->transaction->delayed_refs;
762         spin_lock(&delayed_refs->lock);
763
764         add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
765                              num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
766                              extent_op->is_data);
767
768         spin_unlock(&delayed_refs->lock);
769         return 0;
770 }
771
772 /*
773  * this does a simple search for the head node for a given extent.
774  * It must be called with the delayed ref spinlock held, and it returns
775  * the head node if any where found, or NULL if not.
776  */
777 struct btrfs_delayed_ref_head *
778 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
779 {
780         struct btrfs_delayed_ref_root *delayed_refs;
781
782         delayed_refs = &trans->transaction->delayed_refs;
783         return find_ref_head(&delayed_refs->href_root, bytenr, 0);
784 }
785
786 void btrfs_delayed_ref_exit(void)
787 {
788         if (btrfs_delayed_ref_head_cachep)
789                 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
790         if (btrfs_delayed_tree_ref_cachep)
791                 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
792         if (btrfs_delayed_data_ref_cachep)
793                 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
794         if (btrfs_delayed_extent_op_cachep)
795                 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
796 }
797
798 int btrfs_delayed_ref_init(void)
799 {
800         btrfs_delayed_ref_head_cachep = kmem_cache_create(
801                                 "btrfs_delayed_ref_head",
802                                 sizeof(struct btrfs_delayed_ref_head), 0,
803                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
804         if (!btrfs_delayed_ref_head_cachep)
805                 goto fail;
806
807         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
808                                 "btrfs_delayed_tree_ref",
809                                 sizeof(struct btrfs_delayed_tree_ref), 0,
810                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
811         if (!btrfs_delayed_tree_ref_cachep)
812                 goto fail;
813
814         btrfs_delayed_data_ref_cachep = kmem_cache_create(
815                                 "btrfs_delayed_data_ref",
816                                 sizeof(struct btrfs_delayed_data_ref), 0,
817                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
818         if (!btrfs_delayed_data_ref_cachep)
819                 goto fail;
820
821         btrfs_delayed_extent_op_cachep = kmem_cache_create(
822                                 "btrfs_delayed_extent_op",
823                                 sizeof(struct btrfs_delayed_extent_op), 0,
824                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
825         if (!btrfs_delayed_extent_op_cachep)
826                 goto fail;
827
828         return 0;
829 fail:
830         btrfs_delayed_ref_exit();
831         return -ENOMEM;
832 }