Merge branch 'for-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[linux-2.6-block.git] / fs / btrfs / transaction.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/fs.h>
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/writeback.h>
10 #include <linux/pagemap.h>
11 #include <linux/blkdev.h>
12 #include <linux/uuid.h>
13 #include "misc.h"
14 #include "ctree.h"
15 #include "disk-io.h"
16 #include "transaction.h"
17 #include "locking.h"
18 #include "tree-log.h"
19 #include "inode-map.h"
20 #include "volumes.h"
21 #include "dev-replace.h"
22 #include "qgroup.h"
23 #include "block-group.h"
24
25 #define BTRFS_ROOT_TRANS_TAG 0
26
27 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
28         [TRANS_STATE_RUNNING]           = 0U,
29         [TRANS_STATE_BLOCKED]           =  __TRANS_START,
30         [TRANS_STATE_COMMIT_START]      = (__TRANS_START | __TRANS_ATTACH),
31         [TRANS_STATE_COMMIT_DOING]      = (__TRANS_START |
32                                            __TRANS_ATTACH |
33                                            __TRANS_JOIN |
34                                            __TRANS_JOIN_NOSTART),
35         [TRANS_STATE_UNBLOCKED]         = (__TRANS_START |
36                                            __TRANS_ATTACH |
37                                            __TRANS_JOIN |
38                                            __TRANS_JOIN_NOLOCK |
39                                            __TRANS_JOIN_NOSTART),
40         [TRANS_STATE_COMPLETED]         = (__TRANS_START |
41                                            __TRANS_ATTACH |
42                                            __TRANS_JOIN |
43                                            __TRANS_JOIN_NOLOCK |
44                                            __TRANS_JOIN_NOSTART),
45 };
46
47 void btrfs_put_transaction(struct btrfs_transaction *transaction)
48 {
49         WARN_ON(refcount_read(&transaction->use_count) == 0);
50         if (refcount_dec_and_test(&transaction->use_count)) {
51                 BUG_ON(!list_empty(&transaction->list));
52                 WARN_ON(!RB_EMPTY_ROOT(
53                                 &transaction->delayed_refs.href_root.rb_root));
54                 if (transaction->delayed_refs.pending_csums)
55                         btrfs_err(transaction->fs_info,
56                                   "pending csums is %llu",
57                                   transaction->delayed_refs.pending_csums);
58                 /*
59                  * If any block groups are found in ->deleted_bgs then it's
60                  * because the transaction was aborted and a commit did not
61                  * happen (things failed before writing the new superblock
62                  * and calling btrfs_finish_extent_commit()), so we can not
63                  * discard the physical locations of the block groups.
64                  */
65                 while (!list_empty(&transaction->deleted_bgs)) {
66                         struct btrfs_block_group_cache *cache;
67
68                         cache = list_first_entry(&transaction->deleted_bgs,
69                                                  struct btrfs_block_group_cache,
70                                                  bg_list);
71                         list_del_init(&cache->bg_list);
72                         btrfs_put_block_group_trimming(cache);
73                         btrfs_put_block_group(cache);
74                 }
75                 WARN_ON(!list_empty(&transaction->dev_update_list));
76                 kfree(transaction);
77         }
78 }
79
80 static noinline void switch_commit_roots(struct btrfs_transaction *trans)
81 {
82         struct btrfs_fs_info *fs_info = trans->fs_info;
83         struct btrfs_root *root, *tmp;
84
85         down_write(&fs_info->commit_root_sem);
86         list_for_each_entry_safe(root, tmp, &trans->switch_commits,
87                                  dirty_list) {
88                 list_del_init(&root->dirty_list);
89                 free_extent_buffer(root->commit_root);
90                 root->commit_root = btrfs_root_node(root);
91                 if (is_fstree(root->root_key.objectid))
92                         btrfs_unpin_free_ino(root);
93                 extent_io_tree_release(&root->dirty_log_pages);
94                 btrfs_qgroup_clean_swapped_blocks(root);
95         }
96
97         /* We can free old roots now. */
98         spin_lock(&trans->dropped_roots_lock);
99         while (!list_empty(&trans->dropped_roots)) {
100                 root = list_first_entry(&trans->dropped_roots,
101                                         struct btrfs_root, root_list);
102                 list_del_init(&root->root_list);
103                 spin_unlock(&trans->dropped_roots_lock);
104                 btrfs_drop_and_free_fs_root(fs_info, root);
105                 spin_lock(&trans->dropped_roots_lock);
106         }
107         spin_unlock(&trans->dropped_roots_lock);
108         up_write(&fs_info->commit_root_sem);
109 }
110
111 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
112                                          unsigned int type)
113 {
114         if (type & TRANS_EXTWRITERS)
115                 atomic_inc(&trans->num_extwriters);
116 }
117
118 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
119                                          unsigned int type)
120 {
121         if (type & TRANS_EXTWRITERS)
122                 atomic_dec(&trans->num_extwriters);
123 }
124
125 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
126                                           unsigned int type)
127 {
128         atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
129 }
130
131 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
132 {
133         return atomic_read(&trans->num_extwriters);
134 }
135
136 /*
137  * To be called after all the new block groups attached to the transaction
138  * handle have been created (btrfs_create_pending_block_groups()).
139  */
140 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
141 {
142         struct btrfs_fs_info *fs_info = trans->fs_info;
143
144         if (!trans->chunk_bytes_reserved)
145                 return;
146
147         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
148
149         btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
150                                 trans->chunk_bytes_reserved);
151         trans->chunk_bytes_reserved = 0;
152 }
153
154 /*
155  * either allocate a new transaction or hop into the existing one
156  */
157 static noinline int join_transaction(struct btrfs_fs_info *fs_info,
158                                      unsigned int type)
159 {
160         struct btrfs_transaction *cur_trans;
161
162         spin_lock(&fs_info->trans_lock);
163 loop:
164         /* The file system has been taken offline. No new transactions. */
165         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
166                 spin_unlock(&fs_info->trans_lock);
167                 return -EROFS;
168         }
169
170         cur_trans = fs_info->running_transaction;
171         if (cur_trans) {
172                 if (cur_trans->aborted) {
173                         spin_unlock(&fs_info->trans_lock);
174                         return cur_trans->aborted;
175                 }
176                 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
177                         spin_unlock(&fs_info->trans_lock);
178                         return -EBUSY;
179                 }
180                 refcount_inc(&cur_trans->use_count);
181                 atomic_inc(&cur_trans->num_writers);
182                 extwriter_counter_inc(cur_trans, type);
183                 spin_unlock(&fs_info->trans_lock);
184                 return 0;
185         }
186         spin_unlock(&fs_info->trans_lock);
187
188         /*
189          * If we are ATTACH, we just want to catch the current transaction,
190          * and commit it. If there is no transaction, just return ENOENT.
191          */
192         if (type == TRANS_ATTACH)
193                 return -ENOENT;
194
195         /*
196          * JOIN_NOLOCK only happens during the transaction commit, so
197          * it is impossible that ->running_transaction is NULL
198          */
199         BUG_ON(type == TRANS_JOIN_NOLOCK);
200
201         cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
202         if (!cur_trans)
203                 return -ENOMEM;
204
205         spin_lock(&fs_info->trans_lock);
206         if (fs_info->running_transaction) {
207                 /*
208                  * someone started a transaction after we unlocked.  Make sure
209                  * to redo the checks above
210                  */
211                 kfree(cur_trans);
212                 goto loop;
213         } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
214                 spin_unlock(&fs_info->trans_lock);
215                 kfree(cur_trans);
216                 return -EROFS;
217         }
218
219         cur_trans->fs_info = fs_info;
220         atomic_set(&cur_trans->num_writers, 1);
221         extwriter_counter_init(cur_trans, type);
222         init_waitqueue_head(&cur_trans->writer_wait);
223         init_waitqueue_head(&cur_trans->commit_wait);
224         cur_trans->state = TRANS_STATE_RUNNING;
225         /*
226          * One for this trans handle, one so it will live on until we
227          * commit the transaction.
228          */
229         refcount_set(&cur_trans->use_count, 2);
230         cur_trans->flags = 0;
231         cur_trans->start_time = ktime_get_seconds();
232
233         memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
234
235         cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
236         cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
237         atomic_set(&cur_trans->delayed_refs.num_entries, 0);
238
239         /*
240          * although the tree mod log is per file system and not per transaction,
241          * the log must never go across transaction boundaries.
242          */
243         smp_mb();
244         if (!list_empty(&fs_info->tree_mod_seq_list))
245                 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
246         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
247                 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
248         atomic64_set(&fs_info->tree_mod_seq, 0);
249
250         spin_lock_init(&cur_trans->delayed_refs.lock);
251
252         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
253         INIT_LIST_HEAD(&cur_trans->dev_update_list);
254         INIT_LIST_HEAD(&cur_trans->switch_commits);
255         INIT_LIST_HEAD(&cur_trans->dirty_bgs);
256         INIT_LIST_HEAD(&cur_trans->io_bgs);
257         INIT_LIST_HEAD(&cur_trans->dropped_roots);
258         mutex_init(&cur_trans->cache_write_mutex);
259         spin_lock_init(&cur_trans->dirty_bgs_lock);
260         INIT_LIST_HEAD(&cur_trans->deleted_bgs);
261         spin_lock_init(&cur_trans->dropped_roots_lock);
262         list_add_tail(&cur_trans->list, &fs_info->trans_list);
263         extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
264                         IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
265         fs_info->generation++;
266         cur_trans->transid = fs_info->generation;
267         fs_info->running_transaction = cur_trans;
268         cur_trans->aborted = 0;
269         spin_unlock(&fs_info->trans_lock);
270
271         return 0;
272 }
273
274 /*
275  * this does all the record keeping required to make sure that a reference
276  * counted root is properly recorded in a given transaction.  This is required
277  * to make sure the old root from before we joined the transaction is deleted
278  * when the transaction commits
279  */
280 static int record_root_in_trans(struct btrfs_trans_handle *trans,
281                                struct btrfs_root *root,
282                                int force)
283 {
284         struct btrfs_fs_info *fs_info = root->fs_info;
285
286         if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
287             root->last_trans < trans->transid) || force) {
288                 WARN_ON(root == fs_info->extent_root);
289                 WARN_ON(!force && root->commit_root != root->node);
290
291                 /*
292                  * see below for IN_TRANS_SETUP usage rules
293                  * we have the reloc mutex held now, so there
294                  * is only one writer in this function
295                  */
296                 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
297
298                 /* make sure readers find IN_TRANS_SETUP before
299                  * they find our root->last_trans update
300                  */
301                 smp_wmb();
302
303                 spin_lock(&fs_info->fs_roots_radix_lock);
304                 if (root->last_trans == trans->transid && !force) {
305                         spin_unlock(&fs_info->fs_roots_radix_lock);
306                         return 0;
307                 }
308                 radix_tree_tag_set(&fs_info->fs_roots_radix,
309                                    (unsigned long)root->root_key.objectid,
310                                    BTRFS_ROOT_TRANS_TAG);
311                 spin_unlock(&fs_info->fs_roots_radix_lock);
312                 root->last_trans = trans->transid;
313
314                 /* this is pretty tricky.  We don't want to
315                  * take the relocation lock in btrfs_record_root_in_trans
316                  * unless we're really doing the first setup for this root in
317                  * this transaction.
318                  *
319                  * Normally we'd use root->last_trans as a flag to decide
320                  * if we want to take the expensive mutex.
321                  *
322                  * But, we have to set root->last_trans before we
323                  * init the relocation root, otherwise, we trip over warnings
324                  * in ctree.c.  The solution used here is to flag ourselves
325                  * with root IN_TRANS_SETUP.  When this is 1, we're still
326                  * fixing up the reloc trees and everyone must wait.
327                  *
328                  * When this is zero, they can trust root->last_trans and fly
329                  * through btrfs_record_root_in_trans without having to take the
330                  * lock.  smp_wmb() makes sure that all the writes above are
331                  * done before we pop in the zero below
332                  */
333                 btrfs_init_reloc_root(trans, root);
334                 smp_mb__before_atomic();
335                 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
336         }
337         return 0;
338 }
339
340
341 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
342                             struct btrfs_root *root)
343 {
344         struct btrfs_fs_info *fs_info = root->fs_info;
345         struct btrfs_transaction *cur_trans = trans->transaction;
346
347         /* Add ourselves to the transaction dropped list */
348         spin_lock(&cur_trans->dropped_roots_lock);
349         list_add_tail(&root->root_list, &cur_trans->dropped_roots);
350         spin_unlock(&cur_trans->dropped_roots_lock);
351
352         /* Make sure we don't try to update the root at commit time */
353         spin_lock(&fs_info->fs_roots_radix_lock);
354         radix_tree_tag_clear(&fs_info->fs_roots_radix,
355                              (unsigned long)root->root_key.objectid,
356                              BTRFS_ROOT_TRANS_TAG);
357         spin_unlock(&fs_info->fs_roots_radix_lock);
358 }
359
360 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
361                                struct btrfs_root *root)
362 {
363         struct btrfs_fs_info *fs_info = root->fs_info;
364
365         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
366                 return 0;
367
368         /*
369          * see record_root_in_trans for comments about IN_TRANS_SETUP usage
370          * and barriers
371          */
372         smp_rmb();
373         if (root->last_trans == trans->transid &&
374             !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
375                 return 0;
376
377         mutex_lock(&fs_info->reloc_mutex);
378         record_root_in_trans(trans, root, 0);
379         mutex_unlock(&fs_info->reloc_mutex);
380
381         return 0;
382 }
383
384 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
385 {
386         return (trans->state >= TRANS_STATE_BLOCKED &&
387                 trans->state < TRANS_STATE_UNBLOCKED &&
388                 !trans->aborted);
389 }
390
391 /* wait for commit against the current transaction to become unblocked
392  * when this is done, it is safe to start a new transaction, but the current
393  * transaction might not be fully on disk.
394  */
395 static void wait_current_trans(struct btrfs_fs_info *fs_info)
396 {
397         struct btrfs_transaction *cur_trans;
398
399         spin_lock(&fs_info->trans_lock);
400         cur_trans = fs_info->running_transaction;
401         if (cur_trans && is_transaction_blocked(cur_trans)) {
402                 refcount_inc(&cur_trans->use_count);
403                 spin_unlock(&fs_info->trans_lock);
404
405                 wait_event(fs_info->transaction_wait,
406                            cur_trans->state >= TRANS_STATE_UNBLOCKED ||
407                            cur_trans->aborted);
408                 btrfs_put_transaction(cur_trans);
409         } else {
410                 spin_unlock(&fs_info->trans_lock);
411         }
412 }
413
414 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
415 {
416         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
417                 return 0;
418
419         if (type == TRANS_START)
420                 return 1;
421
422         return 0;
423 }
424
425 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
426 {
427         struct btrfs_fs_info *fs_info = root->fs_info;
428
429         if (!fs_info->reloc_ctl ||
430             !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
431             root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
432             root->reloc_root)
433                 return false;
434
435         return true;
436 }
437
438 static struct btrfs_trans_handle *
439 start_transaction(struct btrfs_root *root, unsigned int num_items,
440                   unsigned int type, enum btrfs_reserve_flush_enum flush,
441                   bool enforce_qgroups)
442 {
443         struct btrfs_fs_info *fs_info = root->fs_info;
444         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
445         struct btrfs_trans_handle *h;
446         struct btrfs_transaction *cur_trans;
447         u64 num_bytes = 0;
448         u64 qgroup_reserved = 0;
449         bool reloc_reserved = false;
450         int ret;
451
452         /* Send isn't supposed to start transactions. */
453         ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
454
455         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
456                 return ERR_PTR(-EROFS);
457
458         if (current->journal_info) {
459                 WARN_ON(type & TRANS_EXTWRITERS);
460                 h = current->journal_info;
461                 refcount_inc(&h->use_count);
462                 WARN_ON(refcount_read(&h->use_count) > 2);
463                 h->orig_rsv = h->block_rsv;
464                 h->block_rsv = NULL;
465                 goto got_it;
466         }
467
468         /*
469          * Do the reservation before we join the transaction so we can do all
470          * the appropriate flushing if need be.
471          */
472         if (num_items && root != fs_info->chunk_root) {
473                 struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
474                 u64 delayed_refs_bytes = 0;
475
476                 qgroup_reserved = num_items * fs_info->nodesize;
477                 ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
478                                 enforce_qgroups);
479                 if (ret)
480                         return ERR_PTR(ret);
481
482                 /*
483                  * We want to reserve all the bytes we may need all at once, so
484                  * we only do 1 enospc flushing cycle per transaction start.  We
485                  * accomplish this by simply assuming we'll do 2 x num_items
486                  * worth of delayed refs updates in this trans handle, and
487                  * refill that amount for whatever is missing in the reserve.
488                  */
489                 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
490                 if (delayed_refs_rsv->full == 0) {
491                         delayed_refs_bytes = num_bytes;
492                         num_bytes <<= 1;
493                 }
494
495                 /*
496                  * Do the reservation for the relocation root creation
497                  */
498                 if (need_reserve_reloc_root(root)) {
499                         num_bytes += fs_info->nodesize;
500                         reloc_reserved = true;
501                 }
502
503                 ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush);
504                 if (ret)
505                         goto reserve_fail;
506                 if (delayed_refs_bytes) {
507                         btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
508                                                           delayed_refs_bytes);
509                         num_bytes -= delayed_refs_bytes;
510                 }
511         } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
512                    !delayed_refs_rsv->full) {
513                 /*
514                  * Some people call with btrfs_start_transaction(root, 0)
515                  * because they can be throttled, but have some other mechanism
516                  * for reserving space.  We still want these guys to refill the
517                  * delayed block_rsv so just add 1 items worth of reservation
518                  * here.
519                  */
520                 ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
521                 if (ret)
522                         goto reserve_fail;
523         }
524 again:
525         h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
526         if (!h) {
527                 ret = -ENOMEM;
528                 goto alloc_fail;
529         }
530
531         /*
532          * If we are JOIN_NOLOCK we're already committing a transaction and
533          * waiting on this guy, so we don't need to do the sb_start_intwrite
534          * because we're already holding a ref.  We need this because we could
535          * have raced in and did an fsync() on a file which can kick a commit
536          * and then we deadlock with somebody doing a freeze.
537          *
538          * If we are ATTACH, it means we just want to catch the current
539          * transaction and commit it, so we needn't do sb_start_intwrite(). 
540          */
541         if (type & __TRANS_FREEZABLE)
542                 sb_start_intwrite(fs_info->sb);
543
544         if (may_wait_transaction(fs_info, type))
545                 wait_current_trans(fs_info);
546
547         do {
548                 ret = join_transaction(fs_info, type);
549                 if (ret == -EBUSY) {
550                         wait_current_trans(fs_info);
551                         if (unlikely(type == TRANS_ATTACH ||
552                                      type == TRANS_JOIN_NOSTART))
553                                 ret = -ENOENT;
554                 }
555         } while (ret == -EBUSY);
556
557         if (ret < 0)
558                 goto join_fail;
559
560         cur_trans = fs_info->running_transaction;
561
562         h->transid = cur_trans->transid;
563         h->transaction = cur_trans;
564         h->root = root;
565         refcount_set(&h->use_count, 1);
566         h->fs_info = root->fs_info;
567
568         h->type = type;
569         h->can_flush_pending_bgs = true;
570         INIT_LIST_HEAD(&h->new_bgs);
571
572         smp_mb();
573         if (cur_trans->state >= TRANS_STATE_BLOCKED &&
574             may_wait_transaction(fs_info, type)) {
575                 current->journal_info = h;
576                 btrfs_commit_transaction(h);
577                 goto again;
578         }
579
580         if (num_bytes) {
581                 trace_btrfs_space_reservation(fs_info, "transaction",
582                                               h->transid, num_bytes, 1);
583                 h->block_rsv = &fs_info->trans_block_rsv;
584                 h->bytes_reserved = num_bytes;
585                 h->reloc_reserved = reloc_reserved;
586         }
587
588 got_it:
589         btrfs_record_root_in_trans(h, root);
590
591         if (!current->journal_info)
592                 current->journal_info = h;
593         return h;
594
595 join_fail:
596         if (type & __TRANS_FREEZABLE)
597                 sb_end_intwrite(fs_info->sb);
598         kmem_cache_free(btrfs_trans_handle_cachep, h);
599 alloc_fail:
600         if (num_bytes)
601                 btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
602                                         num_bytes);
603 reserve_fail:
604         btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
605         return ERR_PTR(ret);
606 }
607
608 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
609                                                    unsigned int num_items)
610 {
611         return start_transaction(root, num_items, TRANS_START,
612                                  BTRFS_RESERVE_FLUSH_ALL, true);
613 }
614
615 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
616                                         struct btrfs_root *root,
617                                         unsigned int num_items,
618                                         int min_factor)
619 {
620         struct btrfs_fs_info *fs_info = root->fs_info;
621         struct btrfs_trans_handle *trans;
622         u64 num_bytes;
623         int ret;
624
625         /*
626          * We have two callers: unlink and block group removal.  The
627          * former should succeed even if we will temporarily exceed
628          * quota and the latter operates on the extent root so
629          * qgroup enforcement is ignored anyway.
630          */
631         trans = start_transaction(root, num_items, TRANS_START,
632                                   BTRFS_RESERVE_FLUSH_ALL, false);
633         if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
634                 return trans;
635
636         trans = btrfs_start_transaction(root, 0);
637         if (IS_ERR(trans))
638                 return trans;
639
640         num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
641         ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
642                                        num_bytes, min_factor);
643         if (ret) {
644                 btrfs_end_transaction(trans);
645                 return ERR_PTR(ret);
646         }
647
648         trans->block_rsv = &fs_info->trans_block_rsv;
649         trans->bytes_reserved = num_bytes;
650         trace_btrfs_space_reservation(fs_info, "transaction",
651                                       trans->transid, num_bytes, 1);
652
653         return trans;
654 }
655
656 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
657 {
658         return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
659                                  true);
660 }
661
662 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
663 {
664         return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
665                                  BTRFS_RESERVE_NO_FLUSH, true);
666 }
667
668 /*
669  * Similar to regular join but it never starts a transaction when none is
670  * running or after waiting for the current one to finish.
671  */
672 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
673 {
674         return start_transaction(root, 0, TRANS_JOIN_NOSTART,
675                                  BTRFS_RESERVE_NO_FLUSH, true);
676 }
677
678 /*
679  * btrfs_attach_transaction() - catch the running transaction
680  *
681  * It is used when we want to commit the current the transaction, but
682  * don't want to start a new one.
683  *
684  * Note: If this function return -ENOENT, it just means there is no
685  * running transaction. But it is possible that the inactive transaction
686  * is still in the memory, not fully on disk. If you hope there is no
687  * inactive transaction in the fs when -ENOENT is returned, you should
688  * invoke
689  *     btrfs_attach_transaction_barrier()
690  */
691 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
692 {
693         return start_transaction(root, 0, TRANS_ATTACH,
694                                  BTRFS_RESERVE_NO_FLUSH, true);
695 }
696
697 /*
698  * btrfs_attach_transaction_barrier() - catch the running transaction
699  *
700  * It is similar to the above function, the difference is this one
701  * will wait for all the inactive transactions until they fully
702  * complete.
703  */
704 struct btrfs_trans_handle *
705 btrfs_attach_transaction_barrier(struct btrfs_root *root)
706 {
707         struct btrfs_trans_handle *trans;
708
709         trans = start_transaction(root, 0, TRANS_ATTACH,
710                                   BTRFS_RESERVE_NO_FLUSH, true);
711         if (trans == ERR_PTR(-ENOENT))
712                 btrfs_wait_for_commit(root->fs_info, 0);
713
714         return trans;
715 }
716
717 /* wait for a transaction commit to be fully complete */
718 static noinline void wait_for_commit(struct btrfs_transaction *commit)
719 {
720         wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
721 }
722
723 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
724 {
725         struct btrfs_transaction *cur_trans = NULL, *t;
726         int ret = 0;
727
728         if (transid) {
729                 if (transid <= fs_info->last_trans_committed)
730                         goto out;
731
732                 /* find specified transaction */
733                 spin_lock(&fs_info->trans_lock);
734                 list_for_each_entry(t, &fs_info->trans_list, list) {
735                         if (t->transid == transid) {
736                                 cur_trans = t;
737                                 refcount_inc(&cur_trans->use_count);
738                                 ret = 0;
739                                 break;
740                         }
741                         if (t->transid > transid) {
742                                 ret = 0;
743                                 break;
744                         }
745                 }
746                 spin_unlock(&fs_info->trans_lock);
747
748                 /*
749                  * The specified transaction doesn't exist, or we
750                  * raced with btrfs_commit_transaction
751                  */
752                 if (!cur_trans) {
753                         if (transid > fs_info->last_trans_committed)
754                                 ret = -EINVAL;
755                         goto out;
756                 }
757         } else {
758                 /* find newest transaction that is committing | committed */
759                 spin_lock(&fs_info->trans_lock);
760                 list_for_each_entry_reverse(t, &fs_info->trans_list,
761                                             list) {
762                         if (t->state >= TRANS_STATE_COMMIT_START) {
763                                 if (t->state == TRANS_STATE_COMPLETED)
764                                         break;
765                                 cur_trans = t;
766                                 refcount_inc(&cur_trans->use_count);
767                                 break;
768                         }
769                 }
770                 spin_unlock(&fs_info->trans_lock);
771                 if (!cur_trans)
772                         goto out;  /* nothing committing|committed */
773         }
774
775         wait_for_commit(cur_trans);
776         btrfs_put_transaction(cur_trans);
777 out:
778         return ret;
779 }
780
781 void btrfs_throttle(struct btrfs_fs_info *fs_info)
782 {
783         wait_current_trans(fs_info);
784 }
785
786 static int should_end_transaction(struct btrfs_trans_handle *trans)
787 {
788         struct btrfs_fs_info *fs_info = trans->fs_info;
789
790         if (btrfs_check_space_for_delayed_refs(fs_info))
791                 return 1;
792
793         return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
794 }
795
796 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
797 {
798         struct btrfs_transaction *cur_trans = trans->transaction;
799
800         smp_mb();
801         if (cur_trans->state >= TRANS_STATE_BLOCKED ||
802             cur_trans->delayed_refs.flushing)
803                 return 1;
804
805         return should_end_transaction(trans);
806 }
807
808 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
809
810 {
811         struct btrfs_fs_info *fs_info = trans->fs_info;
812
813         if (!trans->block_rsv) {
814                 ASSERT(!trans->bytes_reserved);
815                 return;
816         }
817
818         if (!trans->bytes_reserved)
819                 return;
820
821         ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
822         trace_btrfs_space_reservation(fs_info, "transaction",
823                                       trans->transid, trans->bytes_reserved, 0);
824         btrfs_block_rsv_release(fs_info, trans->block_rsv,
825                                 trans->bytes_reserved);
826         trans->bytes_reserved = 0;
827 }
828
829 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
830                                    int throttle)
831 {
832         struct btrfs_fs_info *info = trans->fs_info;
833         struct btrfs_transaction *cur_trans = trans->transaction;
834         int lock = (trans->type != TRANS_JOIN_NOLOCK);
835         int err = 0;
836
837         if (refcount_read(&trans->use_count) > 1) {
838                 refcount_dec(&trans->use_count);
839                 trans->block_rsv = trans->orig_rsv;
840                 return 0;
841         }
842
843         btrfs_trans_release_metadata(trans);
844         trans->block_rsv = NULL;
845
846         btrfs_create_pending_block_groups(trans);
847
848         btrfs_trans_release_chunk_metadata(trans);
849
850         if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
851                 if (throttle)
852                         return btrfs_commit_transaction(trans);
853                 else
854                         wake_up_process(info->transaction_kthread);
855         }
856
857         if (trans->type & __TRANS_FREEZABLE)
858                 sb_end_intwrite(info->sb);
859
860         WARN_ON(cur_trans != info->running_transaction);
861         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
862         atomic_dec(&cur_trans->num_writers);
863         extwriter_counter_dec(cur_trans, trans->type);
864
865         cond_wake_up(&cur_trans->writer_wait);
866         btrfs_put_transaction(cur_trans);
867
868         if (current->journal_info == trans)
869                 current->journal_info = NULL;
870
871         if (throttle)
872                 btrfs_run_delayed_iputs(info);
873
874         if (trans->aborted ||
875             test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
876                 wake_up_process(info->transaction_kthread);
877                 err = -EIO;
878         }
879
880         kmem_cache_free(btrfs_trans_handle_cachep, trans);
881         return err;
882 }
883
884 int btrfs_end_transaction(struct btrfs_trans_handle *trans)
885 {
886         return __btrfs_end_transaction(trans, 0);
887 }
888
889 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
890 {
891         return __btrfs_end_transaction(trans, 1);
892 }
893
894 /*
895  * when btree blocks are allocated, they have some corresponding bits set for
896  * them in one of two extent_io trees.  This is used to make sure all of
897  * those extents are sent to disk but does not wait on them
898  */
899 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
900                                struct extent_io_tree *dirty_pages, int mark)
901 {
902         int err = 0;
903         int werr = 0;
904         struct address_space *mapping = fs_info->btree_inode->i_mapping;
905         struct extent_state *cached_state = NULL;
906         u64 start = 0;
907         u64 end;
908
909         atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
910         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
911                                       mark, &cached_state)) {
912                 bool wait_writeback = false;
913
914                 err = convert_extent_bit(dirty_pages, start, end,
915                                          EXTENT_NEED_WAIT,
916                                          mark, &cached_state);
917                 /*
918                  * convert_extent_bit can return -ENOMEM, which is most of the
919                  * time a temporary error. So when it happens, ignore the error
920                  * and wait for writeback of this range to finish - because we
921                  * failed to set the bit EXTENT_NEED_WAIT for the range, a call
922                  * to __btrfs_wait_marked_extents() would not know that
923                  * writeback for this range started and therefore wouldn't
924                  * wait for it to finish - we don't want to commit a
925                  * superblock that points to btree nodes/leafs for which
926                  * writeback hasn't finished yet (and without errors).
927                  * We cleanup any entries left in the io tree when committing
928                  * the transaction (through extent_io_tree_release()).
929                  */
930                 if (err == -ENOMEM) {
931                         err = 0;
932                         wait_writeback = true;
933                 }
934                 if (!err)
935                         err = filemap_fdatawrite_range(mapping, start, end);
936                 if (err)
937                         werr = err;
938                 else if (wait_writeback)
939                         werr = filemap_fdatawait_range(mapping, start, end);
940                 free_extent_state(cached_state);
941                 cached_state = NULL;
942                 cond_resched();
943                 start = end + 1;
944         }
945         atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
946         return werr;
947 }
948
949 /*
950  * when btree blocks are allocated, they have some corresponding bits set for
951  * them in one of two extent_io trees.  This is used to make sure all of
952  * those extents are on disk for transaction or log commit.  We wait
953  * on all the pages and clear them from the dirty pages state tree
954  */
955 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
956                                        struct extent_io_tree *dirty_pages)
957 {
958         int err = 0;
959         int werr = 0;
960         struct address_space *mapping = fs_info->btree_inode->i_mapping;
961         struct extent_state *cached_state = NULL;
962         u64 start = 0;
963         u64 end;
964
965         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
966                                       EXTENT_NEED_WAIT, &cached_state)) {
967                 /*
968                  * Ignore -ENOMEM errors returned by clear_extent_bit().
969                  * When committing the transaction, we'll remove any entries
970                  * left in the io tree. For a log commit, we don't remove them
971                  * after committing the log because the tree can be accessed
972                  * concurrently - we do it only at transaction commit time when
973                  * it's safe to do it (through extent_io_tree_release()).
974                  */
975                 err = clear_extent_bit(dirty_pages, start, end,
976                                        EXTENT_NEED_WAIT, 0, 0, &cached_state);
977                 if (err == -ENOMEM)
978                         err = 0;
979                 if (!err)
980                         err = filemap_fdatawait_range(mapping, start, end);
981                 if (err)
982                         werr = err;
983                 free_extent_state(cached_state);
984                 cached_state = NULL;
985                 cond_resched();
986                 start = end + 1;
987         }
988         if (err)
989                 werr = err;
990         return werr;
991 }
992
993 int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
994                        struct extent_io_tree *dirty_pages)
995 {
996         bool errors = false;
997         int err;
998
999         err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1000         if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1001                 errors = true;
1002
1003         if (errors && !err)
1004                 err = -EIO;
1005         return err;
1006 }
1007
1008 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1009 {
1010         struct btrfs_fs_info *fs_info = log_root->fs_info;
1011         struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1012         bool errors = false;
1013         int err;
1014
1015         ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
1016
1017         err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1018         if ((mark & EXTENT_DIRTY) &&
1019             test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1020                 errors = true;
1021
1022         if ((mark & EXTENT_NEW) &&
1023             test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1024                 errors = true;
1025
1026         if (errors && !err)
1027                 err = -EIO;
1028         return err;
1029 }
1030
1031 /*
1032  * When btree blocks are allocated the corresponding extents are marked dirty.
1033  * This function ensures such extents are persisted on disk for transaction or
1034  * log commit.
1035  *
1036  * @trans: transaction whose dirty pages we'd like to write
1037  */
1038 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1039 {
1040         int ret;
1041         int ret2;
1042         struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1043         struct btrfs_fs_info *fs_info = trans->fs_info;
1044         struct blk_plug plug;
1045
1046         blk_start_plug(&plug);
1047         ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1048         blk_finish_plug(&plug);
1049         ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1050
1051         extent_io_tree_release(&trans->transaction->dirty_pages);
1052
1053         if (ret)
1054                 return ret;
1055         else if (ret2)
1056                 return ret2;
1057         else
1058                 return 0;
1059 }
1060
1061 /*
1062  * this is used to update the root pointer in the tree of tree roots.
1063  *
1064  * But, in the case of the extent allocation tree, updating the root
1065  * pointer may allocate blocks which may change the root of the extent
1066  * allocation tree.
1067  *
1068  * So, this loops and repeats and makes sure the cowonly root didn't
1069  * change while the root pointer was being updated in the metadata.
1070  */
1071 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1072                                struct btrfs_root *root)
1073 {
1074         int ret;
1075         u64 old_root_bytenr;
1076         u64 old_root_used;
1077         struct btrfs_fs_info *fs_info = root->fs_info;
1078         struct btrfs_root *tree_root = fs_info->tree_root;
1079
1080         old_root_used = btrfs_root_used(&root->root_item);
1081
1082         while (1) {
1083                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1084                 if (old_root_bytenr == root->node->start &&
1085                     old_root_used == btrfs_root_used(&root->root_item))
1086                         break;
1087
1088                 btrfs_set_root_node(&root->root_item, root->node);
1089                 ret = btrfs_update_root(trans, tree_root,
1090                                         &root->root_key,
1091                                         &root->root_item);
1092                 if (ret)
1093                         return ret;
1094
1095                 old_root_used = btrfs_root_used(&root->root_item);
1096         }
1097
1098         return 0;
1099 }
1100
1101 /*
1102  * update all the cowonly tree roots on disk
1103  *
1104  * The error handling in this function may not be obvious. Any of the
1105  * failures will cause the file system to go offline. We still need
1106  * to clean up the delayed refs.
1107  */
1108 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1109 {
1110         struct btrfs_fs_info *fs_info = trans->fs_info;
1111         struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1112         struct list_head *io_bgs = &trans->transaction->io_bgs;
1113         struct list_head *next;
1114         struct extent_buffer *eb;
1115         int ret;
1116
1117         eb = btrfs_lock_root_node(fs_info->tree_root);
1118         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1119                               0, &eb);
1120         btrfs_tree_unlock(eb);
1121         free_extent_buffer(eb);
1122
1123         if (ret)
1124                 return ret;
1125
1126         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1127         if (ret)
1128                 return ret;
1129
1130         ret = btrfs_run_dev_stats(trans);
1131         if (ret)
1132                 return ret;
1133         ret = btrfs_run_dev_replace(trans);
1134         if (ret)
1135                 return ret;
1136         ret = btrfs_run_qgroups(trans);
1137         if (ret)
1138                 return ret;
1139
1140         ret = btrfs_setup_space_cache(trans);
1141         if (ret)
1142                 return ret;
1143
1144         /* run_qgroups might have added some more refs */
1145         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1146         if (ret)
1147                 return ret;
1148 again:
1149         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1150                 struct btrfs_root *root;
1151                 next = fs_info->dirty_cowonly_roots.next;
1152                 list_del_init(next);
1153                 root = list_entry(next, struct btrfs_root, dirty_list);
1154                 clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1155
1156                 if (root != fs_info->extent_root)
1157                         list_add_tail(&root->dirty_list,
1158                                       &trans->transaction->switch_commits);
1159                 ret = update_cowonly_root(trans, root);
1160                 if (ret)
1161                         return ret;
1162                 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1163                 if (ret)
1164                         return ret;
1165         }
1166
1167         while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1168                 ret = btrfs_write_dirty_block_groups(trans);
1169                 if (ret)
1170                         return ret;
1171                 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1172                 if (ret)
1173                         return ret;
1174         }
1175
1176         if (!list_empty(&fs_info->dirty_cowonly_roots))
1177                 goto again;
1178
1179         list_add_tail(&fs_info->extent_root->dirty_list,
1180                       &trans->transaction->switch_commits);
1181
1182         /* Update dev-replace pointer once everything is committed */
1183         fs_info->dev_replace.committed_cursor_left =
1184                 fs_info->dev_replace.cursor_left_last_write_of_item;
1185
1186         return 0;
1187 }
1188
1189 /*
1190  * dead roots are old snapshots that need to be deleted.  This allocates
1191  * a dirty root struct and adds it into the list of dead roots that need to
1192  * be deleted
1193  */
1194 void btrfs_add_dead_root(struct btrfs_root *root)
1195 {
1196         struct btrfs_fs_info *fs_info = root->fs_info;
1197
1198         spin_lock(&fs_info->trans_lock);
1199         if (list_empty(&root->root_list))
1200                 list_add_tail(&root->root_list, &fs_info->dead_roots);
1201         spin_unlock(&fs_info->trans_lock);
1202 }
1203
1204 /*
1205  * update all the cowonly tree roots on disk
1206  */
1207 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1208 {
1209         struct btrfs_fs_info *fs_info = trans->fs_info;
1210         struct btrfs_root *gang[8];
1211         int i;
1212         int ret;
1213         int err = 0;
1214
1215         spin_lock(&fs_info->fs_roots_radix_lock);
1216         while (1) {
1217                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1218                                                  (void **)gang, 0,
1219                                                  ARRAY_SIZE(gang),
1220                                                  BTRFS_ROOT_TRANS_TAG);
1221                 if (ret == 0)
1222                         break;
1223                 for (i = 0; i < ret; i++) {
1224                         struct btrfs_root *root = gang[i];
1225                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
1226                                         (unsigned long)root->root_key.objectid,
1227                                         BTRFS_ROOT_TRANS_TAG);
1228                         spin_unlock(&fs_info->fs_roots_radix_lock);
1229
1230                         btrfs_free_log(trans, root);
1231                         btrfs_update_reloc_root(trans, root);
1232
1233                         btrfs_save_ino_cache(root, trans);
1234
1235                         /* see comments in should_cow_block() */
1236                         clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1237                         smp_mb__after_atomic();
1238
1239                         if (root->commit_root != root->node) {
1240                                 list_add_tail(&root->dirty_list,
1241                                         &trans->transaction->switch_commits);
1242                                 btrfs_set_root_node(&root->root_item,
1243                                                     root->node);
1244                         }
1245
1246                         err = btrfs_update_root(trans, fs_info->tree_root,
1247                                                 &root->root_key,
1248                                                 &root->root_item);
1249                         spin_lock(&fs_info->fs_roots_radix_lock);
1250                         if (err)
1251                                 break;
1252                         btrfs_qgroup_free_meta_all_pertrans(root);
1253                 }
1254         }
1255         spin_unlock(&fs_info->fs_roots_radix_lock);
1256         return err;
1257 }
1258
1259 /*
1260  * defrag a given btree.
1261  * Every leaf in the btree is read and defragged.
1262  */
1263 int btrfs_defrag_root(struct btrfs_root *root)
1264 {
1265         struct btrfs_fs_info *info = root->fs_info;
1266         struct btrfs_trans_handle *trans;
1267         int ret;
1268
1269         if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1270                 return 0;
1271
1272         while (1) {
1273                 trans = btrfs_start_transaction(root, 0);
1274                 if (IS_ERR(trans))
1275                         return PTR_ERR(trans);
1276
1277                 ret = btrfs_defrag_leaves(trans, root);
1278
1279                 btrfs_end_transaction(trans);
1280                 btrfs_btree_balance_dirty(info);
1281                 cond_resched();
1282
1283                 if (btrfs_fs_closing(info) || ret != -EAGAIN)
1284                         break;
1285
1286                 if (btrfs_defrag_cancelled(info)) {
1287                         btrfs_debug(info, "defrag_root cancelled");
1288                         ret = -EAGAIN;
1289                         break;
1290                 }
1291         }
1292         clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1293         return ret;
1294 }
1295
1296 /*
1297  * Do all special snapshot related qgroup dirty hack.
1298  *
1299  * Will do all needed qgroup inherit and dirty hack like switch commit
1300  * roots inside one transaction and write all btree into disk, to make
1301  * qgroup works.
1302  */
1303 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1304                                    struct btrfs_root *src,
1305                                    struct btrfs_root *parent,
1306                                    struct btrfs_qgroup_inherit *inherit,
1307                                    u64 dst_objectid)
1308 {
1309         struct btrfs_fs_info *fs_info = src->fs_info;
1310         int ret;
1311
1312         /*
1313          * Save some performance in the case that qgroups are not
1314          * enabled. If this check races with the ioctl, rescan will
1315          * kick in anyway.
1316          */
1317         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1318                 return 0;
1319
1320         /*
1321          * Ensure dirty @src will be committed.  Or, after coming
1322          * commit_fs_roots() and switch_commit_roots(), any dirty but not
1323          * recorded root will never be updated again, causing an outdated root
1324          * item.
1325          */
1326         record_root_in_trans(trans, src, 1);
1327
1328         /*
1329          * We are going to commit transaction, see btrfs_commit_transaction()
1330          * comment for reason locking tree_log_mutex
1331          */
1332         mutex_lock(&fs_info->tree_log_mutex);
1333
1334         ret = commit_fs_roots(trans);
1335         if (ret)
1336                 goto out;
1337         ret = btrfs_qgroup_account_extents(trans);
1338         if (ret < 0)
1339                 goto out;
1340
1341         /* Now qgroup are all updated, we can inherit it to new qgroups */
1342         ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1343                                    inherit);
1344         if (ret < 0)
1345                 goto out;
1346
1347         /*
1348          * Now we do a simplified commit transaction, which will:
1349          * 1) commit all subvolume and extent tree
1350          *    To ensure all subvolume and extent tree have a valid
1351          *    commit_root to accounting later insert_dir_item()
1352          * 2) write all btree blocks onto disk
1353          *    This is to make sure later btree modification will be cowed
1354          *    Or commit_root can be populated and cause wrong qgroup numbers
1355          * In this simplified commit, we don't really care about other trees
1356          * like chunk and root tree, as they won't affect qgroup.
1357          * And we don't write super to avoid half committed status.
1358          */
1359         ret = commit_cowonly_roots(trans);
1360         if (ret)
1361                 goto out;
1362         switch_commit_roots(trans->transaction);
1363         ret = btrfs_write_and_wait_transaction(trans);
1364         if (ret)
1365                 btrfs_handle_fs_error(fs_info, ret,
1366                         "Error while writing out transaction for qgroup");
1367
1368 out:
1369         mutex_unlock(&fs_info->tree_log_mutex);
1370
1371         /*
1372          * Force parent root to be updated, as we recorded it before so its
1373          * last_trans == cur_transid.
1374          * Or it won't be committed again onto disk after later
1375          * insert_dir_item()
1376          */
1377         if (!ret)
1378                 record_root_in_trans(trans, parent, 1);
1379         return ret;
1380 }
1381
1382 /*
1383  * new snapshots need to be created at a very specific time in the
1384  * transaction commit.  This does the actual creation.
1385  *
1386  * Note:
1387  * If the error which may affect the commitment of the current transaction
1388  * happens, we should return the error number. If the error which just affect
1389  * the creation of the pending snapshots, just return 0.
1390  */
1391 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1392                                    struct btrfs_pending_snapshot *pending)
1393 {
1394
1395         struct btrfs_fs_info *fs_info = trans->fs_info;
1396         struct btrfs_key key;
1397         struct btrfs_root_item *new_root_item;
1398         struct btrfs_root *tree_root = fs_info->tree_root;
1399         struct btrfs_root *root = pending->root;
1400         struct btrfs_root *parent_root;
1401         struct btrfs_block_rsv *rsv;
1402         struct inode *parent_inode;
1403         struct btrfs_path *path;
1404         struct btrfs_dir_item *dir_item;
1405         struct dentry *dentry;
1406         struct extent_buffer *tmp;
1407         struct extent_buffer *old;
1408         struct timespec64 cur_time;
1409         int ret = 0;
1410         u64 to_reserve = 0;
1411         u64 index = 0;
1412         u64 objectid;
1413         u64 root_flags;
1414         uuid_le new_uuid;
1415
1416         ASSERT(pending->path);
1417         path = pending->path;
1418
1419         ASSERT(pending->root_item);
1420         new_root_item = pending->root_item;
1421
1422         pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1423         if (pending->error)
1424                 goto no_free_objectid;
1425
1426         /*
1427          * Make qgroup to skip current new snapshot's qgroupid, as it is
1428          * accounted by later btrfs_qgroup_inherit().
1429          */
1430         btrfs_set_skip_qgroup(trans, objectid);
1431
1432         btrfs_reloc_pre_snapshot(pending, &to_reserve);
1433
1434         if (to_reserve > 0) {
1435                 pending->error = btrfs_block_rsv_add(root,
1436                                                      &pending->block_rsv,
1437                                                      to_reserve,
1438                                                      BTRFS_RESERVE_NO_FLUSH);
1439                 if (pending->error)
1440                         goto clear_skip_qgroup;
1441         }
1442
1443         key.objectid = objectid;
1444         key.offset = (u64)-1;
1445         key.type = BTRFS_ROOT_ITEM_KEY;
1446
1447         rsv = trans->block_rsv;
1448         trans->block_rsv = &pending->block_rsv;
1449         trans->bytes_reserved = trans->block_rsv->reserved;
1450         trace_btrfs_space_reservation(fs_info, "transaction",
1451                                       trans->transid,
1452                                       trans->bytes_reserved, 1);
1453         dentry = pending->dentry;
1454         parent_inode = pending->dir;
1455         parent_root = BTRFS_I(parent_inode)->root;
1456         record_root_in_trans(trans, parent_root, 0);
1457
1458         cur_time = current_time(parent_inode);
1459
1460         /*
1461          * insert the directory item
1462          */
1463         ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1464         BUG_ON(ret); /* -ENOMEM */
1465
1466         /* check if there is a file/dir which has the same name. */
1467         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1468                                          btrfs_ino(BTRFS_I(parent_inode)),
1469                                          dentry->d_name.name,
1470                                          dentry->d_name.len, 0);
1471         if (dir_item != NULL && !IS_ERR(dir_item)) {
1472                 pending->error = -EEXIST;
1473                 goto dir_item_existed;
1474         } else if (IS_ERR(dir_item)) {
1475                 ret = PTR_ERR(dir_item);
1476                 btrfs_abort_transaction(trans, ret);
1477                 goto fail;
1478         }
1479         btrfs_release_path(path);
1480
1481         /*
1482          * pull in the delayed directory update
1483          * and the delayed inode item
1484          * otherwise we corrupt the FS during
1485          * snapshot
1486          */
1487         ret = btrfs_run_delayed_items(trans);
1488         if (ret) {      /* Transaction aborted */
1489                 btrfs_abort_transaction(trans, ret);
1490                 goto fail;
1491         }
1492
1493         record_root_in_trans(trans, root, 0);
1494         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1495         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1496         btrfs_check_and_init_root_item(new_root_item);
1497
1498         root_flags = btrfs_root_flags(new_root_item);
1499         if (pending->readonly)
1500                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1501         else
1502                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1503         btrfs_set_root_flags(new_root_item, root_flags);
1504
1505         btrfs_set_root_generation_v2(new_root_item,
1506                         trans->transid);
1507         uuid_le_gen(&new_uuid);
1508         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1509         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1510                         BTRFS_UUID_SIZE);
1511         if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1512                 memset(new_root_item->received_uuid, 0,
1513                        sizeof(new_root_item->received_uuid));
1514                 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1515                 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1516                 btrfs_set_root_stransid(new_root_item, 0);
1517                 btrfs_set_root_rtransid(new_root_item, 0);
1518         }
1519         btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1520         btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1521         btrfs_set_root_otransid(new_root_item, trans->transid);
1522
1523         old = btrfs_lock_root_node(root);
1524         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1525         if (ret) {
1526                 btrfs_tree_unlock(old);
1527                 free_extent_buffer(old);
1528                 btrfs_abort_transaction(trans, ret);
1529                 goto fail;
1530         }
1531
1532         btrfs_set_lock_blocking_write(old);
1533
1534         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1535         /* clean up in any case */
1536         btrfs_tree_unlock(old);
1537         free_extent_buffer(old);
1538         if (ret) {
1539                 btrfs_abort_transaction(trans, ret);
1540                 goto fail;
1541         }
1542         /* see comments in should_cow_block() */
1543         set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1544         smp_wmb();
1545
1546         btrfs_set_root_node(new_root_item, tmp);
1547         /* record when the snapshot was created in key.offset */
1548         key.offset = trans->transid;
1549         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1550         btrfs_tree_unlock(tmp);
1551         free_extent_buffer(tmp);
1552         if (ret) {
1553                 btrfs_abort_transaction(trans, ret);
1554                 goto fail;
1555         }
1556
1557         /*
1558          * insert root back/forward references
1559          */
1560         ret = btrfs_add_root_ref(trans, objectid,
1561                                  parent_root->root_key.objectid,
1562                                  btrfs_ino(BTRFS_I(parent_inode)), index,
1563                                  dentry->d_name.name, dentry->d_name.len);
1564         if (ret) {
1565                 btrfs_abort_transaction(trans, ret);
1566                 goto fail;
1567         }
1568
1569         key.offset = (u64)-1;
1570         pending->snap = btrfs_read_fs_root_no_name(fs_info, &key);
1571         if (IS_ERR(pending->snap)) {
1572                 ret = PTR_ERR(pending->snap);
1573                 btrfs_abort_transaction(trans, ret);
1574                 goto fail;
1575         }
1576
1577         ret = btrfs_reloc_post_snapshot(trans, pending);
1578         if (ret) {
1579                 btrfs_abort_transaction(trans, ret);
1580                 goto fail;
1581         }
1582
1583         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1584         if (ret) {
1585                 btrfs_abort_transaction(trans, ret);
1586                 goto fail;
1587         }
1588
1589         /*
1590          * Do special qgroup accounting for snapshot, as we do some qgroup
1591          * snapshot hack to do fast snapshot.
1592          * To co-operate with that hack, we do hack again.
1593          * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1594          */
1595         ret = qgroup_account_snapshot(trans, root, parent_root,
1596                                       pending->inherit, objectid);
1597         if (ret < 0)
1598                 goto fail;
1599
1600         ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
1601                                     dentry->d_name.len, BTRFS_I(parent_inode),
1602                                     &key, BTRFS_FT_DIR, index);
1603         /* We have check then name at the beginning, so it is impossible. */
1604         BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1605         if (ret) {
1606                 btrfs_abort_transaction(trans, ret);
1607                 goto fail;
1608         }
1609
1610         btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1611                                          dentry->d_name.len * 2);
1612         parent_inode->i_mtime = parent_inode->i_ctime =
1613                 current_time(parent_inode);
1614         ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1615         if (ret) {
1616                 btrfs_abort_transaction(trans, ret);
1617                 goto fail;
1618         }
1619         ret = btrfs_uuid_tree_add(trans, new_uuid.b, BTRFS_UUID_KEY_SUBVOL,
1620                                   objectid);
1621         if (ret) {
1622                 btrfs_abort_transaction(trans, ret);
1623                 goto fail;
1624         }
1625         if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1626                 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1627                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1628                                           objectid);
1629                 if (ret && ret != -EEXIST) {
1630                         btrfs_abort_transaction(trans, ret);
1631                         goto fail;
1632                 }
1633         }
1634
1635         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1636         if (ret) {
1637                 btrfs_abort_transaction(trans, ret);
1638                 goto fail;
1639         }
1640
1641 fail:
1642         pending->error = ret;
1643 dir_item_existed:
1644         trans->block_rsv = rsv;
1645         trans->bytes_reserved = 0;
1646 clear_skip_qgroup:
1647         btrfs_clear_skip_qgroup(trans);
1648 no_free_objectid:
1649         kfree(new_root_item);
1650         pending->root_item = NULL;
1651         btrfs_free_path(path);
1652         pending->path = NULL;
1653
1654         return ret;
1655 }
1656
1657 /*
1658  * create all the snapshots we've scheduled for creation
1659  */
1660 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1661 {
1662         struct btrfs_pending_snapshot *pending, *next;
1663         struct list_head *head = &trans->transaction->pending_snapshots;
1664         int ret = 0;
1665
1666         list_for_each_entry_safe(pending, next, head, list) {
1667                 list_del(&pending->list);
1668                 ret = create_pending_snapshot(trans, pending);
1669                 if (ret)
1670                         break;
1671         }
1672         return ret;
1673 }
1674
1675 static void update_super_roots(struct btrfs_fs_info *fs_info)
1676 {
1677         struct btrfs_root_item *root_item;
1678         struct btrfs_super_block *super;
1679
1680         super = fs_info->super_copy;
1681
1682         root_item = &fs_info->chunk_root->root_item;
1683         super->chunk_root = root_item->bytenr;
1684         super->chunk_root_generation = root_item->generation;
1685         super->chunk_root_level = root_item->level;
1686
1687         root_item = &fs_info->tree_root->root_item;
1688         super->root = root_item->bytenr;
1689         super->generation = root_item->generation;
1690         super->root_level = root_item->level;
1691         if (btrfs_test_opt(fs_info, SPACE_CACHE))
1692                 super->cache_generation = root_item->generation;
1693         if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1694                 super->uuid_tree_generation = root_item->generation;
1695 }
1696
1697 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1698 {
1699         struct btrfs_transaction *trans;
1700         int ret = 0;
1701
1702         spin_lock(&info->trans_lock);
1703         trans = info->running_transaction;
1704         if (trans)
1705                 ret = (trans->state >= TRANS_STATE_COMMIT_START);
1706         spin_unlock(&info->trans_lock);
1707         return ret;
1708 }
1709
1710 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1711 {
1712         struct btrfs_transaction *trans;
1713         int ret = 0;
1714
1715         spin_lock(&info->trans_lock);
1716         trans = info->running_transaction;
1717         if (trans)
1718                 ret = is_transaction_blocked(trans);
1719         spin_unlock(&info->trans_lock);
1720         return ret;
1721 }
1722
1723 /*
1724  * wait for the current transaction commit to start and block subsequent
1725  * transaction joins
1726  */
1727 static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
1728                                             struct btrfs_transaction *trans)
1729 {
1730         wait_event(fs_info->transaction_blocked_wait,
1731                    trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
1732 }
1733
1734 /*
1735  * wait for the current transaction to start and then become unblocked.
1736  * caller holds ref.
1737  */
1738 static void wait_current_trans_commit_start_and_unblock(
1739                                         struct btrfs_fs_info *fs_info,
1740                                         struct btrfs_transaction *trans)
1741 {
1742         wait_event(fs_info->transaction_wait,
1743                    trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
1744 }
1745
1746 /*
1747  * commit transactions asynchronously. once btrfs_commit_transaction_async
1748  * returns, any subsequent transaction will not be allowed to join.
1749  */
1750 struct btrfs_async_commit {
1751         struct btrfs_trans_handle *newtrans;
1752         struct work_struct work;
1753 };
1754
1755 static void do_async_commit(struct work_struct *work)
1756 {
1757         struct btrfs_async_commit *ac =
1758                 container_of(work, struct btrfs_async_commit, work);
1759
1760         /*
1761          * We've got freeze protection passed with the transaction.
1762          * Tell lockdep about it.
1763          */
1764         if (ac->newtrans->type & __TRANS_FREEZABLE)
1765                 __sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
1766
1767         current->journal_info = ac->newtrans;
1768
1769         btrfs_commit_transaction(ac->newtrans);
1770         kfree(ac);
1771 }
1772
1773 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1774                                    int wait_for_unblock)
1775 {
1776         struct btrfs_fs_info *fs_info = trans->fs_info;
1777         struct btrfs_async_commit *ac;
1778         struct btrfs_transaction *cur_trans;
1779
1780         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1781         if (!ac)
1782                 return -ENOMEM;
1783
1784         INIT_WORK(&ac->work, do_async_commit);
1785         ac->newtrans = btrfs_join_transaction(trans->root);
1786         if (IS_ERR(ac->newtrans)) {
1787                 int err = PTR_ERR(ac->newtrans);
1788                 kfree(ac);
1789                 return err;
1790         }
1791
1792         /* take transaction reference */
1793         cur_trans = trans->transaction;
1794         refcount_inc(&cur_trans->use_count);
1795
1796         btrfs_end_transaction(trans);
1797
1798         /*
1799          * Tell lockdep we've released the freeze rwsem, since the
1800          * async commit thread will be the one to unlock it.
1801          */
1802         if (ac->newtrans->type & __TRANS_FREEZABLE)
1803                 __sb_writers_release(fs_info->sb, SB_FREEZE_FS);
1804
1805         schedule_work(&ac->work);
1806
1807         /* wait for transaction to start and unblock */
1808         if (wait_for_unblock)
1809                 wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
1810         else
1811                 wait_current_trans_commit_start(fs_info, cur_trans);
1812
1813         if (current->journal_info == trans)
1814                 current->journal_info = NULL;
1815
1816         btrfs_put_transaction(cur_trans);
1817         return 0;
1818 }
1819
1820
1821 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1822 {
1823         struct btrfs_fs_info *fs_info = trans->fs_info;
1824         struct btrfs_transaction *cur_trans = trans->transaction;
1825
1826         WARN_ON(refcount_read(&trans->use_count) > 1);
1827
1828         btrfs_abort_transaction(trans, err);
1829
1830         spin_lock(&fs_info->trans_lock);
1831
1832         /*
1833          * If the transaction is removed from the list, it means this
1834          * transaction has been committed successfully, so it is impossible
1835          * to call the cleanup function.
1836          */
1837         BUG_ON(list_empty(&cur_trans->list));
1838
1839         list_del_init(&cur_trans->list);
1840         if (cur_trans == fs_info->running_transaction) {
1841                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
1842                 spin_unlock(&fs_info->trans_lock);
1843                 wait_event(cur_trans->writer_wait,
1844                            atomic_read(&cur_trans->num_writers) == 1);
1845
1846                 spin_lock(&fs_info->trans_lock);
1847         }
1848         spin_unlock(&fs_info->trans_lock);
1849
1850         btrfs_cleanup_one_transaction(trans->transaction, fs_info);
1851
1852         spin_lock(&fs_info->trans_lock);
1853         if (cur_trans == fs_info->running_transaction)
1854                 fs_info->running_transaction = NULL;
1855         spin_unlock(&fs_info->trans_lock);
1856
1857         if (trans->type & __TRANS_FREEZABLE)
1858                 sb_end_intwrite(fs_info->sb);
1859         btrfs_put_transaction(cur_trans);
1860         btrfs_put_transaction(cur_trans);
1861
1862         trace_btrfs_transaction_commit(trans->root);
1863
1864         if (current->journal_info == trans)
1865                 current->journal_info = NULL;
1866         btrfs_scrub_cancel(fs_info);
1867
1868         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1869 }
1870
1871 /*
1872  * Release reserved delayed ref space of all pending block groups of the
1873  * transaction and remove them from the list
1874  */
1875 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1876 {
1877        struct btrfs_fs_info *fs_info = trans->fs_info;
1878        struct btrfs_block_group_cache *block_group, *tmp;
1879
1880        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
1881                btrfs_delayed_refs_rsv_release(fs_info, 1);
1882                list_del_init(&block_group->bg_list);
1883        }
1884 }
1885
1886 static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
1887 {
1888         struct btrfs_fs_info *fs_info = trans->fs_info;
1889
1890         /*
1891          * We use writeback_inodes_sb here because if we used
1892          * btrfs_start_delalloc_roots we would deadlock with fs freeze.
1893          * Currently are holding the fs freeze lock, if we do an async flush
1894          * we'll do btrfs_join_transaction() and deadlock because we need to
1895          * wait for the fs freeze lock.  Using the direct flushing we benefit
1896          * from already being in a transaction and our join_transaction doesn't
1897          * have to re-take the fs freeze lock.
1898          */
1899         if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1900                 writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
1901         } else {
1902                 struct btrfs_pending_snapshot *pending;
1903                 struct list_head *head = &trans->transaction->pending_snapshots;
1904
1905                 /*
1906                  * Flush dellaloc for any root that is going to be snapshotted.
1907                  * This is done to avoid a corrupted version of files, in the
1908                  * snapshots, that had both buffered and direct IO writes (even
1909                  * if they were done sequentially) due to an unordered update of
1910                  * the inode's size on disk.
1911                  */
1912                 list_for_each_entry(pending, head, list) {
1913                         int ret;
1914
1915                         ret = btrfs_start_delalloc_snapshot(pending->root);
1916                         if (ret)
1917                                 return ret;
1918                 }
1919         }
1920         return 0;
1921 }
1922
1923 static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
1924 {
1925         struct btrfs_fs_info *fs_info = trans->fs_info;
1926
1927         if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1928                 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1929         } else {
1930                 struct btrfs_pending_snapshot *pending;
1931                 struct list_head *head = &trans->transaction->pending_snapshots;
1932
1933                 /*
1934                  * Wait for any dellaloc that we started previously for the roots
1935                  * that are going to be snapshotted. This is to avoid a corrupted
1936                  * version of files in the snapshots that had both buffered and
1937                  * direct IO writes (even if they were done sequentially).
1938                  */
1939                 list_for_each_entry(pending, head, list)
1940                         btrfs_wait_ordered_extents(pending->root,
1941                                                    U64_MAX, 0, U64_MAX);
1942         }
1943 }
1944
1945 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
1946 {
1947         struct btrfs_fs_info *fs_info = trans->fs_info;
1948         struct btrfs_transaction *cur_trans = trans->transaction;
1949         struct btrfs_transaction *prev_trans = NULL;
1950         int ret;
1951
1952         /* Stop the commit early if ->aborted is set */
1953         if (unlikely(READ_ONCE(cur_trans->aborted))) {
1954                 ret = cur_trans->aborted;
1955                 btrfs_end_transaction(trans);
1956                 return ret;
1957         }
1958
1959         btrfs_trans_release_metadata(trans);
1960         trans->block_rsv = NULL;
1961
1962         /* make a pass through all the delayed refs we have so far
1963          * any runnings procs may add more while we are here
1964          */
1965         ret = btrfs_run_delayed_refs(trans, 0);
1966         if (ret) {
1967                 btrfs_end_transaction(trans);
1968                 return ret;
1969         }
1970
1971         cur_trans = trans->transaction;
1972
1973         /*
1974          * set the flushing flag so procs in this transaction have to
1975          * start sending their work down.
1976          */
1977         cur_trans->delayed_refs.flushing = 1;
1978         smp_wmb();
1979
1980         btrfs_create_pending_block_groups(trans);
1981
1982         ret = btrfs_run_delayed_refs(trans, 0);
1983         if (ret) {
1984                 btrfs_end_transaction(trans);
1985                 return ret;
1986         }
1987
1988         if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
1989                 int run_it = 0;
1990
1991                 /* this mutex is also taken before trying to set
1992                  * block groups readonly.  We need to make sure
1993                  * that nobody has set a block group readonly
1994                  * after a extents from that block group have been
1995                  * allocated for cache files.  btrfs_set_block_group_ro
1996                  * will wait for the transaction to commit if it
1997                  * finds BTRFS_TRANS_DIRTY_BG_RUN set.
1998                  *
1999                  * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2000                  * only one process starts all the block group IO.  It wouldn't
2001                  * hurt to have more than one go through, but there's no
2002                  * real advantage to it either.
2003                  */
2004                 mutex_lock(&fs_info->ro_block_group_mutex);
2005                 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2006                                       &cur_trans->flags))
2007                         run_it = 1;
2008                 mutex_unlock(&fs_info->ro_block_group_mutex);
2009
2010                 if (run_it) {
2011                         ret = btrfs_start_dirty_block_groups(trans);
2012                         if (ret) {
2013                                 btrfs_end_transaction(trans);
2014                                 return ret;
2015                         }
2016                 }
2017         }
2018
2019         spin_lock(&fs_info->trans_lock);
2020         if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
2021                 spin_unlock(&fs_info->trans_lock);
2022                 refcount_inc(&cur_trans->use_count);
2023                 ret = btrfs_end_transaction(trans);
2024
2025                 wait_for_commit(cur_trans);
2026
2027                 if (unlikely(cur_trans->aborted))
2028                         ret = cur_trans->aborted;
2029
2030                 btrfs_put_transaction(cur_trans);
2031
2032                 return ret;
2033         }
2034
2035         cur_trans->state = TRANS_STATE_COMMIT_START;
2036         wake_up(&fs_info->transaction_blocked_wait);
2037
2038         if (cur_trans->list.prev != &fs_info->trans_list) {
2039                 prev_trans = list_entry(cur_trans->list.prev,
2040                                         struct btrfs_transaction, list);
2041                 if (prev_trans->state != TRANS_STATE_COMPLETED) {
2042                         refcount_inc(&prev_trans->use_count);
2043                         spin_unlock(&fs_info->trans_lock);
2044
2045                         wait_for_commit(prev_trans);
2046                         ret = prev_trans->aborted;
2047
2048                         btrfs_put_transaction(prev_trans);
2049                         if (ret)
2050                                 goto cleanup_transaction;
2051                 } else {
2052                         spin_unlock(&fs_info->trans_lock);
2053                 }
2054         } else {
2055                 spin_unlock(&fs_info->trans_lock);
2056                 /*
2057                  * The previous transaction was aborted and was already removed
2058                  * from the list of transactions at fs_info->trans_list. So we
2059                  * abort to prevent writing a new superblock that reflects a
2060                  * corrupt state (pointing to trees with unwritten nodes/leafs).
2061                  */
2062                 if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
2063                         ret = -EROFS;
2064                         goto cleanup_transaction;
2065                 }
2066         }
2067
2068         extwriter_counter_dec(cur_trans, trans->type);
2069
2070         ret = btrfs_start_delalloc_flush(trans);
2071         if (ret)
2072                 goto cleanup_transaction;
2073
2074         ret = btrfs_run_delayed_items(trans);
2075         if (ret)
2076                 goto cleanup_transaction;
2077
2078         wait_event(cur_trans->writer_wait,
2079                    extwriter_counter_read(cur_trans) == 0);
2080
2081         /* some pending stuffs might be added after the previous flush. */
2082         ret = btrfs_run_delayed_items(trans);
2083         if (ret)
2084                 goto cleanup_transaction;
2085
2086         btrfs_wait_delalloc_flush(trans);
2087
2088         btrfs_scrub_pause(fs_info);
2089         /*
2090          * Ok now we need to make sure to block out any other joins while we
2091          * commit the transaction.  We could have started a join before setting
2092          * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2093          */
2094         spin_lock(&fs_info->trans_lock);
2095         cur_trans->state = TRANS_STATE_COMMIT_DOING;
2096         spin_unlock(&fs_info->trans_lock);
2097         wait_event(cur_trans->writer_wait,
2098                    atomic_read(&cur_trans->num_writers) == 1);
2099
2100         /* ->aborted might be set after the previous check, so check it */
2101         if (unlikely(READ_ONCE(cur_trans->aborted))) {
2102                 ret = cur_trans->aborted;
2103                 goto scrub_continue;
2104         }
2105         /*
2106          * the reloc mutex makes sure that we stop
2107          * the balancing code from coming in and moving
2108          * extents around in the middle of the commit
2109          */
2110         mutex_lock(&fs_info->reloc_mutex);
2111
2112         /*
2113          * We needn't worry about the delayed items because we will
2114          * deal with them in create_pending_snapshot(), which is the
2115          * core function of the snapshot creation.
2116          */
2117         ret = create_pending_snapshots(trans);
2118         if (ret) {
2119                 mutex_unlock(&fs_info->reloc_mutex);
2120                 goto scrub_continue;
2121         }
2122
2123         /*
2124          * We insert the dir indexes of the snapshots and update the inode
2125          * of the snapshots' parents after the snapshot creation, so there
2126          * are some delayed items which are not dealt with. Now deal with
2127          * them.
2128          *
2129          * We needn't worry that this operation will corrupt the snapshots,
2130          * because all the tree which are snapshoted will be forced to COW
2131          * the nodes and leaves.
2132          */
2133         ret = btrfs_run_delayed_items(trans);
2134         if (ret) {
2135                 mutex_unlock(&fs_info->reloc_mutex);
2136                 goto scrub_continue;
2137         }
2138
2139         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2140         if (ret) {
2141                 mutex_unlock(&fs_info->reloc_mutex);
2142                 goto scrub_continue;
2143         }
2144
2145         /*
2146          * make sure none of the code above managed to slip in a
2147          * delayed item
2148          */
2149         btrfs_assert_delayed_root_empty(fs_info);
2150
2151         WARN_ON(cur_trans != trans->transaction);
2152
2153         /* btrfs_commit_tree_roots is responsible for getting the
2154          * various roots consistent with each other.  Every pointer
2155          * in the tree of tree roots has to point to the most up to date
2156          * root for every subvolume and other tree.  So, we have to keep
2157          * the tree logging code from jumping in and changing any
2158          * of the trees.
2159          *
2160          * At this point in the commit, there can't be any tree-log
2161          * writers, but a little lower down we drop the trans mutex
2162          * and let new people in.  By holding the tree_log_mutex
2163          * from now until after the super is written, we avoid races
2164          * with the tree-log code.
2165          */
2166         mutex_lock(&fs_info->tree_log_mutex);
2167
2168         ret = commit_fs_roots(trans);
2169         if (ret) {
2170                 mutex_unlock(&fs_info->tree_log_mutex);
2171                 mutex_unlock(&fs_info->reloc_mutex);
2172                 goto scrub_continue;
2173         }
2174
2175         /*
2176          * Since the transaction is done, we can apply the pending changes
2177          * before the next transaction.
2178          */
2179         btrfs_apply_pending_changes(fs_info);
2180
2181         /* commit_fs_roots gets rid of all the tree log roots, it is now
2182          * safe to free the root of tree log roots
2183          */
2184         btrfs_free_log_root_tree(trans, fs_info);
2185
2186         /*
2187          * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
2188          * new delayed refs. Must handle them or qgroup can be wrong.
2189          */
2190         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2191         if (ret) {
2192                 mutex_unlock(&fs_info->tree_log_mutex);
2193                 mutex_unlock(&fs_info->reloc_mutex);
2194                 goto scrub_continue;
2195         }
2196
2197         /*
2198          * Since fs roots are all committed, we can get a quite accurate
2199          * new_roots. So let's do quota accounting.
2200          */
2201         ret = btrfs_qgroup_account_extents(trans);
2202         if (ret < 0) {
2203                 mutex_unlock(&fs_info->tree_log_mutex);
2204                 mutex_unlock(&fs_info->reloc_mutex);
2205                 goto scrub_continue;
2206         }
2207
2208         ret = commit_cowonly_roots(trans);
2209         if (ret) {
2210                 mutex_unlock(&fs_info->tree_log_mutex);
2211                 mutex_unlock(&fs_info->reloc_mutex);
2212                 goto scrub_continue;
2213         }
2214
2215         /*
2216          * The tasks which save the space cache and inode cache may also
2217          * update ->aborted, check it.
2218          */
2219         if (unlikely(READ_ONCE(cur_trans->aborted))) {
2220                 ret = cur_trans->aborted;
2221                 mutex_unlock(&fs_info->tree_log_mutex);
2222                 mutex_unlock(&fs_info->reloc_mutex);
2223                 goto scrub_continue;
2224         }
2225
2226         btrfs_prepare_extent_commit(fs_info);
2227
2228         cur_trans = fs_info->running_transaction;
2229
2230         btrfs_set_root_node(&fs_info->tree_root->root_item,
2231                             fs_info->tree_root->node);
2232         list_add_tail(&fs_info->tree_root->dirty_list,
2233                       &cur_trans->switch_commits);
2234
2235         btrfs_set_root_node(&fs_info->chunk_root->root_item,
2236                             fs_info->chunk_root->node);
2237         list_add_tail(&fs_info->chunk_root->dirty_list,
2238                       &cur_trans->switch_commits);
2239
2240         switch_commit_roots(cur_trans);
2241
2242         ASSERT(list_empty(&cur_trans->dirty_bgs));
2243         ASSERT(list_empty(&cur_trans->io_bgs));
2244         update_super_roots(fs_info);
2245
2246         btrfs_set_super_log_root(fs_info->super_copy, 0);
2247         btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2248         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2249                sizeof(*fs_info->super_copy));
2250
2251         btrfs_commit_device_sizes(cur_trans);
2252
2253         clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2254         clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2255
2256         btrfs_trans_release_chunk_metadata(trans);
2257
2258         spin_lock(&fs_info->trans_lock);
2259         cur_trans->state = TRANS_STATE_UNBLOCKED;
2260         fs_info->running_transaction = NULL;
2261         spin_unlock(&fs_info->trans_lock);
2262         mutex_unlock(&fs_info->reloc_mutex);
2263
2264         wake_up(&fs_info->transaction_wait);
2265
2266         ret = btrfs_write_and_wait_transaction(trans);
2267         if (ret) {
2268                 btrfs_handle_fs_error(fs_info, ret,
2269                                       "Error while writing out transaction");
2270                 mutex_unlock(&fs_info->tree_log_mutex);
2271                 goto scrub_continue;
2272         }
2273
2274         ret = write_all_supers(fs_info, 0);
2275         /*
2276          * the super is written, we can safely allow the tree-loggers
2277          * to go about their business
2278          */
2279         mutex_unlock(&fs_info->tree_log_mutex);
2280         if (ret)
2281                 goto scrub_continue;
2282
2283         btrfs_finish_extent_commit(trans);
2284
2285         if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2286                 btrfs_clear_space_info_full(fs_info);
2287
2288         fs_info->last_trans_committed = cur_trans->transid;
2289         /*
2290          * We needn't acquire the lock here because there is no other task
2291          * which can change it.
2292          */
2293         cur_trans->state = TRANS_STATE_COMPLETED;
2294         wake_up(&cur_trans->commit_wait);
2295         clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
2296
2297         spin_lock(&fs_info->trans_lock);
2298         list_del_init(&cur_trans->list);
2299         spin_unlock(&fs_info->trans_lock);
2300
2301         btrfs_put_transaction(cur_trans);
2302         btrfs_put_transaction(cur_trans);
2303
2304         if (trans->type & __TRANS_FREEZABLE)
2305                 sb_end_intwrite(fs_info->sb);
2306
2307         trace_btrfs_transaction_commit(trans->root);
2308
2309         btrfs_scrub_continue(fs_info);
2310
2311         if (current->journal_info == trans)
2312                 current->journal_info = NULL;
2313
2314         kmem_cache_free(btrfs_trans_handle_cachep, trans);
2315
2316         return ret;
2317
2318 scrub_continue:
2319         btrfs_scrub_continue(fs_info);
2320 cleanup_transaction:
2321         btrfs_trans_release_metadata(trans);
2322         btrfs_cleanup_pending_block_groups(trans);
2323         btrfs_trans_release_chunk_metadata(trans);
2324         trans->block_rsv = NULL;
2325         btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2326         if (current->journal_info == trans)
2327                 current->journal_info = NULL;
2328         cleanup_transaction(trans, ret);
2329
2330         return ret;
2331 }
2332
2333 /*
2334  * return < 0 if error
2335  * 0 if there are no more dead_roots at the time of call
2336  * 1 there are more to be processed, call me again
2337  *
2338  * The return value indicates there are certainly more snapshots to delete, but
2339  * if there comes a new one during processing, it may return 0. We don't mind,
2340  * because btrfs_commit_super will poke cleaner thread and it will process it a
2341  * few seconds later.
2342  */
2343 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2344 {
2345         int ret;
2346         struct btrfs_fs_info *fs_info = root->fs_info;
2347
2348         spin_lock(&fs_info->trans_lock);
2349         if (list_empty(&fs_info->dead_roots)) {
2350                 spin_unlock(&fs_info->trans_lock);
2351                 return 0;
2352         }
2353         root = list_first_entry(&fs_info->dead_roots,
2354                         struct btrfs_root, root_list);
2355         list_del_init(&root->root_list);
2356         spin_unlock(&fs_info->trans_lock);
2357
2358         btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2359
2360         btrfs_kill_all_delayed_nodes(root);
2361
2362         if (btrfs_header_backref_rev(root->node) <
2363                         BTRFS_MIXED_BACKREF_REV)
2364                 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
2365         else
2366                 ret = btrfs_drop_snapshot(root, NULL, 1, 0);
2367
2368         return (ret < 0) ? 0 : 1;
2369 }
2370
2371 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2372 {
2373         unsigned long prev;
2374         unsigned long bit;
2375
2376         prev = xchg(&fs_info->pending_changes, 0);
2377         if (!prev)
2378                 return;
2379
2380         bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
2381         if (prev & bit)
2382                 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2383         prev &= ~bit;
2384
2385         bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
2386         if (prev & bit)
2387                 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2388         prev &= ~bit;
2389
2390         bit = 1 << BTRFS_PENDING_COMMIT;
2391         if (prev & bit)
2392                 btrfs_debug(fs_info, "pending commit done");
2393         prev &= ~bit;
2394
2395         if (prev)
2396                 btrfs_warn(fs_info,
2397                         "unknown pending changes left 0x%lx, ignoring", prev);
2398 }