m68k: kexec: Include <linux/reboot.h>
[linux-block.git] / fs / btrfs / transaction.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/fs.h>
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/writeback.h>
11 #include <linux/pagemap.h>
12 #include <linux/blkdev.h>
13 #include <linux/uuid.h>
14 #include <linux/timekeeping.h>
15 #include "misc.h"
16 #include "ctree.h"
17 #include "disk-io.h"
18 #include "transaction.h"
19 #include "locking.h"
20 #include "tree-log.h"
21 #include "volumes.h"
22 #include "dev-replace.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25 #include "space-info.h"
26 #include "zoned.h"
27 #include "fs.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
31 #include "defrag.h"
32 #include "dir-item.h"
33 #include "uuid-tree.h"
34 #include "ioctl.h"
35 #include "relocation.h"
36 #include "scrub.h"
37
38 static struct kmem_cache *btrfs_trans_handle_cachep;
39
40 #define BTRFS_ROOT_TRANS_TAG 0
41
42 /*
43  * Transaction states and transitions
44  *
45  * No running transaction (fs tree blocks are not modified)
46  * |
47  * | To next stage:
48  * |  Call start_transaction() variants. Except btrfs_join_transaction_nostart().
49  * V
50  * Transaction N [[TRANS_STATE_RUNNING]]
51  * |
52  * | New trans handles can be attached to transaction N by calling all
53  * | start_transaction() variants.
54  * |
55  * | To next stage:
56  * |  Call btrfs_commit_transaction() on any trans handle attached to
57  * |  transaction N
58  * V
59  * Transaction N [[TRANS_STATE_COMMIT_START]]
60  * |
61  * | Will wait for previous running transaction to completely finish if there
62  * | is one
63  * |
64  * | Then one of the following happes:
65  * | - Wait for all other trans handle holders to release.
66  * |   The btrfs_commit_transaction() caller will do the commit work.
67  * | - Wait for current transaction to be committed by others.
68  * |   Other btrfs_commit_transaction() caller will do the commit work.
69  * |
70  * | At this stage, only btrfs_join_transaction*() variants can attach
71  * | to this running transaction.
72  * | All other variants will wait for current one to finish and attach to
73  * | transaction N+1.
74  * |
75  * | To next stage:
76  * |  Caller is chosen to commit transaction N, and all other trans handle
77  * |  haven been released.
78  * V
79  * Transaction N [[TRANS_STATE_COMMIT_DOING]]
80  * |
81  * | The heavy lifting transaction work is started.
82  * | From running delayed refs (modifying extent tree) to creating pending
83  * | snapshots, running qgroups.
84  * | In short, modify supporting trees to reflect modifications of subvolume
85  * | trees.
86  * |
87  * | At this stage, all start_transaction() calls will wait for this
88  * | transaction to finish and attach to transaction N+1.
89  * |
90  * | To next stage:
91  * |  Until all supporting trees are updated.
92  * V
93  * Transaction N [[TRANS_STATE_UNBLOCKED]]
94  * |                                                Transaction N+1
95  * | All needed trees are modified, thus we only    [[TRANS_STATE_RUNNING]]
96  * | need to write them back to disk and update     |
97  * | super blocks.                                  |
98  * |                                                |
99  * | At this stage, new transaction is allowed to   |
100  * | start.                                         |
101  * | All new start_transaction() calls will be      |
102  * | attached to transid N+1.                       |
103  * |                                                |
104  * | To next stage:                                 |
105  * |  Until all tree blocks are super blocks are    |
106  * |  written to block devices                      |
107  * V                                                |
108  * Transaction N [[TRANS_STATE_COMPLETED]]          V
109  *   All tree blocks and super blocks are written.  Transaction N+1
110  *   This transaction is finished and all its       [[TRANS_STATE_COMMIT_START]]
111  *   data structures will be cleaned up.            | Life goes on
112  */
113 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
114         [TRANS_STATE_RUNNING]           = 0U,
115         [TRANS_STATE_COMMIT_START]      = (__TRANS_START | __TRANS_ATTACH),
116         [TRANS_STATE_COMMIT_DOING]      = (__TRANS_START |
117                                            __TRANS_ATTACH |
118                                            __TRANS_JOIN |
119                                            __TRANS_JOIN_NOSTART),
120         [TRANS_STATE_UNBLOCKED]         = (__TRANS_START |
121                                            __TRANS_ATTACH |
122                                            __TRANS_JOIN |
123                                            __TRANS_JOIN_NOLOCK |
124                                            __TRANS_JOIN_NOSTART),
125         [TRANS_STATE_SUPER_COMMITTED]   = (__TRANS_START |
126                                            __TRANS_ATTACH |
127                                            __TRANS_JOIN |
128                                            __TRANS_JOIN_NOLOCK |
129                                            __TRANS_JOIN_NOSTART),
130         [TRANS_STATE_COMPLETED]         = (__TRANS_START |
131                                            __TRANS_ATTACH |
132                                            __TRANS_JOIN |
133                                            __TRANS_JOIN_NOLOCK |
134                                            __TRANS_JOIN_NOSTART),
135 };
136
137 void btrfs_put_transaction(struct btrfs_transaction *transaction)
138 {
139         WARN_ON(refcount_read(&transaction->use_count) == 0);
140         if (refcount_dec_and_test(&transaction->use_count)) {
141                 BUG_ON(!list_empty(&transaction->list));
142                 WARN_ON(!RB_EMPTY_ROOT(
143                                 &transaction->delayed_refs.href_root.rb_root));
144                 WARN_ON(!RB_EMPTY_ROOT(
145                                 &transaction->delayed_refs.dirty_extent_root));
146                 if (transaction->delayed_refs.pending_csums)
147                         btrfs_err(transaction->fs_info,
148                                   "pending csums is %llu",
149                                   transaction->delayed_refs.pending_csums);
150                 /*
151                  * If any block groups are found in ->deleted_bgs then it's
152                  * because the transaction was aborted and a commit did not
153                  * happen (things failed before writing the new superblock
154                  * and calling btrfs_finish_extent_commit()), so we can not
155                  * discard the physical locations of the block groups.
156                  */
157                 while (!list_empty(&transaction->deleted_bgs)) {
158                         struct btrfs_block_group *cache;
159
160                         cache = list_first_entry(&transaction->deleted_bgs,
161                                                  struct btrfs_block_group,
162                                                  bg_list);
163                         list_del_init(&cache->bg_list);
164                         btrfs_unfreeze_block_group(cache);
165                         btrfs_put_block_group(cache);
166                 }
167                 WARN_ON(!list_empty(&transaction->dev_update_list));
168                 kfree(transaction);
169         }
170 }
171
172 static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
173 {
174         struct btrfs_transaction *cur_trans = trans->transaction;
175         struct btrfs_fs_info *fs_info = trans->fs_info;
176         struct btrfs_root *root, *tmp;
177
178         /*
179          * At this point no one can be using this transaction to modify any tree
180          * and no one can start another transaction to modify any tree either.
181          */
182         ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING);
183
184         down_write(&fs_info->commit_root_sem);
185
186         if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
187                 fs_info->last_reloc_trans = trans->transid;
188
189         list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
190                                  dirty_list) {
191                 list_del_init(&root->dirty_list);
192                 free_extent_buffer(root->commit_root);
193                 root->commit_root = btrfs_root_node(root);
194                 extent_io_tree_release(&root->dirty_log_pages);
195                 btrfs_qgroup_clean_swapped_blocks(root);
196         }
197
198         /* We can free old roots now. */
199         spin_lock(&cur_trans->dropped_roots_lock);
200         while (!list_empty(&cur_trans->dropped_roots)) {
201                 root = list_first_entry(&cur_trans->dropped_roots,
202                                         struct btrfs_root, root_list);
203                 list_del_init(&root->root_list);
204                 spin_unlock(&cur_trans->dropped_roots_lock);
205                 btrfs_free_log(trans, root);
206                 btrfs_drop_and_free_fs_root(fs_info, root);
207                 spin_lock(&cur_trans->dropped_roots_lock);
208         }
209         spin_unlock(&cur_trans->dropped_roots_lock);
210
211         up_write(&fs_info->commit_root_sem);
212 }
213
214 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
215                                          unsigned int type)
216 {
217         if (type & TRANS_EXTWRITERS)
218                 atomic_inc(&trans->num_extwriters);
219 }
220
221 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
222                                          unsigned int type)
223 {
224         if (type & TRANS_EXTWRITERS)
225                 atomic_dec(&trans->num_extwriters);
226 }
227
228 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
229                                           unsigned int type)
230 {
231         atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
232 }
233
234 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
235 {
236         return atomic_read(&trans->num_extwriters);
237 }
238
239 /*
240  * To be called after doing the chunk btree updates right after allocating a new
241  * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
242  * chunk after all chunk btree updates and after finishing the second phase of
243  * chunk allocation (btrfs_create_pending_block_groups()) in case some block
244  * group had its chunk item insertion delayed to the second phase.
245  */
246 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
247 {
248         struct btrfs_fs_info *fs_info = trans->fs_info;
249
250         if (!trans->chunk_bytes_reserved)
251                 return;
252
253         btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
254                                 trans->chunk_bytes_reserved, NULL);
255         trans->chunk_bytes_reserved = 0;
256 }
257
258 /*
259  * either allocate a new transaction or hop into the existing one
260  */
261 static noinline int join_transaction(struct btrfs_fs_info *fs_info,
262                                      unsigned int type)
263 {
264         struct btrfs_transaction *cur_trans;
265
266         spin_lock(&fs_info->trans_lock);
267 loop:
268         /* The file system has been taken offline. No new transactions. */
269         if (BTRFS_FS_ERROR(fs_info)) {
270                 spin_unlock(&fs_info->trans_lock);
271                 return -EROFS;
272         }
273
274         cur_trans = fs_info->running_transaction;
275         if (cur_trans) {
276                 if (TRANS_ABORTED(cur_trans)) {
277                         spin_unlock(&fs_info->trans_lock);
278                         return cur_trans->aborted;
279                 }
280                 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
281                         spin_unlock(&fs_info->trans_lock);
282                         return -EBUSY;
283                 }
284                 refcount_inc(&cur_trans->use_count);
285                 atomic_inc(&cur_trans->num_writers);
286                 extwriter_counter_inc(cur_trans, type);
287                 spin_unlock(&fs_info->trans_lock);
288                 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers);
289                 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters);
290                 return 0;
291         }
292         spin_unlock(&fs_info->trans_lock);
293
294         /*
295          * If we are ATTACH, we just want to catch the current transaction,
296          * and commit it. If there is no transaction, just return ENOENT.
297          */
298         if (type == TRANS_ATTACH)
299                 return -ENOENT;
300
301         /*
302          * JOIN_NOLOCK only happens during the transaction commit, so
303          * it is impossible that ->running_transaction is NULL
304          */
305         BUG_ON(type == TRANS_JOIN_NOLOCK);
306
307         cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
308         if (!cur_trans)
309                 return -ENOMEM;
310
311         btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers);
312         btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters);
313
314         spin_lock(&fs_info->trans_lock);
315         if (fs_info->running_transaction) {
316                 /*
317                  * someone started a transaction after we unlocked.  Make sure
318                  * to redo the checks above
319                  */
320                 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
321                 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
322                 kfree(cur_trans);
323                 goto loop;
324         } else if (BTRFS_FS_ERROR(fs_info)) {
325                 spin_unlock(&fs_info->trans_lock);
326                 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
327                 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
328                 kfree(cur_trans);
329                 return -EROFS;
330         }
331
332         cur_trans->fs_info = fs_info;
333         atomic_set(&cur_trans->pending_ordered, 0);
334         init_waitqueue_head(&cur_trans->pending_wait);
335         atomic_set(&cur_trans->num_writers, 1);
336         extwriter_counter_init(cur_trans, type);
337         init_waitqueue_head(&cur_trans->writer_wait);
338         init_waitqueue_head(&cur_trans->commit_wait);
339         cur_trans->state = TRANS_STATE_RUNNING;
340         /*
341          * One for this trans handle, one so it will live on until we
342          * commit the transaction.
343          */
344         refcount_set(&cur_trans->use_count, 2);
345         cur_trans->flags = 0;
346         cur_trans->start_time = ktime_get_seconds();
347
348         memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
349
350         cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
351         cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
352         atomic_set(&cur_trans->delayed_refs.num_entries, 0);
353
354         /*
355          * although the tree mod log is per file system and not per transaction,
356          * the log must never go across transaction boundaries.
357          */
358         smp_mb();
359         if (!list_empty(&fs_info->tree_mod_seq_list))
360                 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
361         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
362                 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
363         atomic64_set(&fs_info->tree_mod_seq, 0);
364
365         spin_lock_init(&cur_trans->delayed_refs.lock);
366
367         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
368         INIT_LIST_HEAD(&cur_trans->dev_update_list);
369         INIT_LIST_HEAD(&cur_trans->switch_commits);
370         INIT_LIST_HEAD(&cur_trans->dirty_bgs);
371         INIT_LIST_HEAD(&cur_trans->io_bgs);
372         INIT_LIST_HEAD(&cur_trans->dropped_roots);
373         mutex_init(&cur_trans->cache_write_mutex);
374         spin_lock_init(&cur_trans->dirty_bgs_lock);
375         INIT_LIST_HEAD(&cur_trans->deleted_bgs);
376         spin_lock_init(&cur_trans->dropped_roots_lock);
377         INIT_LIST_HEAD(&cur_trans->releasing_ebs);
378         spin_lock_init(&cur_trans->releasing_ebs_lock);
379         list_add_tail(&cur_trans->list, &fs_info->trans_list);
380         extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
381                         IO_TREE_TRANS_DIRTY_PAGES);
382         extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
383                         IO_TREE_FS_PINNED_EXTENTS);
384         fs_info->generation++;
385         cur_trans->transid = fs_info->generation;
386         fs_info->running_transaction = cur_trans;
387         cur_trans->aborted = 0;
388         spin_unlock(&fs_info->trans_lock);
389
390         return 0;
391 }
392
393 /*
394  * This does all the record keeping required to make sure that a shareable root
395  * is properly recorded in a given transaction.  This is required to make sure
396  * the old root from before we joined the transaction is deleted when the
397  * transaction commits.
398  */
399 static int record_root_in_trans(struct btrfs_trans_handle *trans,
400                                struct btrfs_root *root,
401                                int force)
402 {
403         struct btrfs_fs_info *fs_info = root->fs_info;
404         int ret = 0;
405
406         if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
407             root->last_trans < trans->transid) || force) {
408                 WARN_ON(!force && root->commit_root != root->node);
409
410                 /*
411                  * see below for IN_TRANS_SETUP usage rules
412                  * we have the reloc mutex held now, so there
413                  * is only one writer in this function
414                  */
415                 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
416
417                 /* make sure readers find IN_TRANS_SETUP before
418                  * they find our root->last_trans update
419                  */
420                 smp_wmb();
421
422                 spin_lock(&fs_info->fs_roots_radix_lock);
423                 if (root->last_trans == trans->transid && !force) {
424                         spin_unlock(&fs_info->fs_roots_radix_lock);
425                         return 0;
426                 }
427                 radix_tree_tag_set(&fs_info->fs_roots_radix,
428                                    (unsigned long)root->root_key.objectid,
429                                    BTRFS_ROOT_TRANS_TAG);
430                 spin_unlock(&fs_info->fs_roots_radix_lock);
431                 root->last_trans = trans->transid;
432
433                 /* this is pretty tricky.  We don't want to
434                  * take the relocation lock in btrfs_record_root_in_trans
435                  * unless we're really doing the first setup for this root in
436                  * this transaction.
437                  *
438                  * Normally we'd use root->last_trans as a flag to decide
439                  * if we want to take the expensive mutex.
440                  *
441                  * But, we have to set root->last_trans before we
442                  * init the relocation root, otherwise, we trip over warnings
443                  * in ctree.c.  The solution used here is to flag ourselves
444                  * with root IN_TRANS_SETUP.  When this is 1, we're still
445                  * fixing up the reloc trees and everyone must wait.
446                  *
447                  * When this is zero, they can trust root->last_trans and fly
448                  * through btrfs_record_root_in_trans without having to take the
449                  * lock.  smp_wmb() makes sure that all the writes above are
450                  * done before we pop in the zero below
451                  */
452                 ret = btrfs_init_reloc_root(trans, root);
453                 smp_mb__before_atomic();
454                 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
455         }
456         return ret;
457 }
458
459
460 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
461                             struct btrfs_root *root)
462 {
463         struct btrfs_fs_info *fs_info = root->fs_info;
464         struct btrfs_transaction *cur_trans = trans->transaction;
465
466         /* Add ourselves to the transaction dropped list */
467         spin_lock(&cur_trans->dropped_roots_lock);
468         list_add_tail(&root->root_list, &cur_trans->dropped_roots);
469         spin_unlock(&cur_trans->dropped_roots_lock);
470
471         /* Make sure we don't try to update the root at commit time */
472         spin_lock(&fs_info->fs_roots_radix_lock);
473         radix_tree_tag_clear(&fs_info->fs_roots_radix,
474                              (unsigned long)root->root_key.objectid,
475                              BTRFS_ROOT_TRANS_TAG);
476         spin_unlock(&fs_info->fs_roots_radix_lock);
477 }
478
479 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
480                                struct btrfs_root *root)
481 {
482         struct btrfs_fs_info *fs_info = root->fs_info;
483         int ret;
484
485         if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
486                 return 0;
487
488         /*
489          * see record_root_in_trans for comments about IN_TRANS_SETUP usage
490          * and barriers
491          */
492         smp_rmb();
493         if (root->last_trans == trans->transid &&
494             !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
495                 return 0;
496
497         mutex_lock(&fs_info->reloc_mutex);
498         ret = record_root_in_trans(trans, root, 0);
499         mutex_unlock(&fs_info->reloc_mutex);
500
501         return ret;
502 }
503
504 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
505 {
506         return (trans->state >= TRANS_STATE_COMMIT_START &&
507                 trans->state < TRANS_STATE_UNBLOCKED &&
508                 !TRANS_ABORTED(trans));
509 }
510
511 /* wait for commit against the current transaction to become unblocked
512  * when this is done, it is safe to start a new transaction, but the current
513  * transaction might not be fully on disk.
514  */
515 static void wait_current_trans(struct btrfs_fs_info *fs_info)
516 {
517         struct btrfs_transaction *cur_trans;
518
519         spin_lock(&fs_info->trans_lock);
520         cur_trans = fs_info->running_transaction;
521         if (cur_trans && is_transaction_blocked(cur_trans)) {
522                 refcount_inc(&cur_trans->use_count);
523                 spin_unlock(&fs_info->trans_lock);
524
525                 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
526                 wait_event(fs_info->transaction_wait,
527                            cur_trans->state >= TRANS_STATE_UNBLOCKED ||
528                            TRANS_ABORTED(cur_trans));
529                 btrfs_put_transaction(cur_trans);
530         } else {
531                 spin_unlock(&fs_info->trans_lock);
532         }
533 }
534
535 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
536 {
537         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
538                 return 0;
539
540         if (type == TRANS_START)
541                 return 1;
542
543         return 0;
544 }
545
546 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
547 {
548         struct btrfs_fs_info *fs_info = root->fs_info;
549
550         if (!fs_info->reloc_ctl ||
551             !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
552             root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
553             root->reloc_root)
554                 return false;
555
556         return true;
557 }
558
559 static struct btrfs_trans_handle *
560 start_transaction(struct btrfs_root *root, unsigned int num_items,
561                   unsigned int type, enum btrfs_reserve_flush_enum flush,
562                   bool enforce_qgroups)
563 {
564         struct btrfs_fs_info *fs_info = root->fs_info;
565         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
566         struct btrfs_trans_handle *h;
567         struct btrfs_transaction *cur_trans;
568         u64 num_bytes = 0;
569         u64 qgroup_reserved = 0;
570         bool reloc_reserved = false;
571         bool do_chunk_alloc = false;
572         int ret;
573
574         if (BTRFS_FS_ERROR(fs_info))
575                 return ERR_PTR(-EROFS);
576
577         if (current->journal_info) {
578                 WARN_ON(type & TRANS_EXTWRITERS);
579                 h = current->journal_info;
580                 refcount_inc(&h->use_count);
581                 WARN_ON(refcount_read(&h->use_count) > 2);
582                 h->orig_rsv = h->block_rsv;
583                 h->block_rsv = NULL;
584                 goto got_it;
585         }
586
587         /*
588          * Do the reservation before we join the transaction so we can do all
589          * the appropriate flushing if need be.
590          */
591         if (num_items && root != fs_info->chunk_root) {
592                 struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
593                 u64 delayed_refs_bytes = 0;
594
595                 qgroup_reserved = num_items * fs_info->nodesize;
596                 ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
597                                 enforce_qgroups);
598                 if (ret)
599                         return ERR_PTR(ret);
600
601                 /*
602                  * We want to reserve all the bytes we may need all at once, so
603                  * we only do 1 enospc flushing cycle per transaction start.  We
604                  * accomplish this by simply assuming we'll do 2 x num_items
605                  * worth of delayed refs updates in this trans handle, and
606                  * refill that amount for whatever is missing in the reserve.
607                  */
608                 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
609                 if (flush == BTRFS_RESERVE_FLUSH_ALL &&
610                     btrfs_block_rsv_full(delayed_refs_rsv) == 0) {
611                         delayed_refs_bytes = num_bytes;
612                         num_bytes <<= 1;
613                 }
614
615                 /*
616                  * Do the reservation for the relocation root creation
617                  */
618                 if (need_reserve_reloc_root(root)) {
619                         num_bytes += fs_info->nodesize;
620                         reloc_reserved = true;
621                 }
622
623                 ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, flush);
624                 if (ret)
625                         goto reserve_fail;
626                 if (delayed_refs_bytes) {
627                         btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
628                                                           delayed_refs_bytes);
629                         num_bytes -= delayed_refs_bytes;
630                 }
631
632                 if (rsv->space_info->force_alloc)
633                         do_chunk_alloc = true;
634         } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
635                    !btrfs_block_rsv_full(delayed_refs_rsv)) {
636                 /*
637                  * Some people call with btrfs_start_transaction(root, 0)
638                  * because they can be throttled, but have some other mechanism
639                  * for reserving space.  We still want these guys to refill the
640                  * delayed block_rsv so just add 1 items worth of reservation
641                  * here.
642                  */
643                 ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
644                 if (ret)
645                         goto reserve_fail;
646         }
647 again:
648         h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
649         if (!h) {
650                 ret = -ENOMEM;
651                 goto alloc_fail;
652         }
653
654         /*
655          * If we are JOIN_NOLOCK we're already committing a transaction and
656          * waiting on this guy, so we don't need to do the sb_start_intwrite
657          * because we're already holding a ref.  We need this because we could
658          * have raced in and did an fsync() on a file which can kick a commit
659          * and then we deadlock with somebody doing a freeze.
660          *
661          * If we are ATTACH, it means we just want to catch the current
662          * transaction and commit it, so we needn't do sb_start_intwrite(). 
663          */
664         if (type & __TRANS_FREEZABLE)
665                 sb_start_intwrite(fs_info->sb);
666
667         if (may_wait_transaction(fs_info, type))
668                 wait_current_trans(fs_info);
669
670         do {
671                 ret = join_transaction(fs_info, type);
672                 if (ret == -EBUSY) {
673                         wait_current_trans(fs_info);
674                         if (unlikely(type == TRANS_ATTACH ||
675                                      type == TRANS_JOIN_NOSTART))
676                                 ret = -ENOENT;
677                 }
678         } while (ret == -EBUSY);
679
680         if (ret < 0)
681                 goto join_fail;
682
683         cur_trans = fs_info->running_transaction;
684
685         h->transid = cur_trans->transid;
686         h->transaction = cur_trans;
687         refcount_set(&h->use_count, 1);
688         h->fs_info = root->fs_info;
689
690         h->type = type;
691         INIT_LIST_HEAD(&h->new_bgs);
692
693         smp_mb();
694         if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
695             may_wait_transaction(fs_info, type)) {
696                 current->journal_info = h;
697                 btrfs_commit_transaction(h);
698                 goto again;
699         }
700
701         if (num_bytes) {
702                 trace_btrfs_space_reservation(fs_info, "transaction",
703                                               h->transid, num_bytes, 1);
704                 h->block_rsv = &fs_info->trans_block_rsv;
705                 h->bytes_reserved = num_bytes;
706                 h->reloc_reserved = reloc_reserved;
707         }
708
709 got_it:
710         if (!current->journal_info)
711                 current->journal_info = h;
712
713         /*
714          * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
715          * ALLOC_FORCE the first run through, and then we won't allocate for
716          * anybody else who races in later.  We don't care about the return
717          * value here.
718          */
719         if (do_chunk_alloc && num_bytes) {
720                 u64 flags = h->block_rsv->space_info->flags;
721
722                 btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
723                                   CHUNK_ALLOC_NO_FORCE);
724         }
725
726         /*
727          * btrfs_record_root_in_trans() needs to alloc new extents, and may
728          * call btrfs_join_transaction() while we're also starting a
729          * transaction.
730          *
731          * Thus it need to be called after current->journal_info initialized,
732          * or we can deadlock.
733          */
734         ret = btrfs_record_root_in_trans(h, root);
735         if (ret) {
736                 /*
737                  * The transaction handle is fully initialized and linked with
738                  * other structures so it needs to be ended in case of errors,
739                  * not just freed.
740                  */
741                 btrfs_end_transaction(h);
742                 return ERR_PTR(ret);
743         }
744
745         return h;
746
747 join_fail:
748         if (type & __TRANS_FREEZABLE)
749                 sb_end_intwrite(fs_info->sb);
750         kmem_cache_free(btrfs_trans_handle_cachep, h);
751 alloc_fail:
752         if (num_bytes)
753                 btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
754                                         num_bytes, NULL);
755 reserve_fail:
756         btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
757         return ERR_PTR(ret);
758 }
759
760 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
761                                                    unsigned int num_items)
762 {
763         return start_transaction(root, num_items, TRANS_START,
764                                  BTRFS_RESERVE_FLUSH_ALL, true);
765 }
766
767 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
768                                         struct btrfs_root *root,
769                                         unsigned int num_items)
770 {
771         return start_transaction(root, num_items, TRANS_START,
772                                  BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
773 }
774
775 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
776 {
777         return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
778                                  true);
779 }
780
781 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
782 {
783         return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
784                                  BTRFS_RESERVE_NO_FLUSH, true);
785 }
786
787 /*
788  * Similar to regular join but it never starts a transaction when none is
789  * running or after waiting for the current one to finish.
790  */
791 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
792 {
793         return start_transaction(root, 0, TRANS_JOIN_NOSTART,
794                                  BTRFS_RESERVE_NO_FLUSH, true);
795 }
796
797 /*
798  * btrfs_attach_transaction() - catch the running transaction
799  *
800  * It is used when we want to commit the current the transaction, but
801  * don't want to start a new one.
802  *
803  * Note: If this function return -ENOENT, it just means there is no
804  * running transaction. But it is possible that the inactive transaction
805  * is still in the memory, not fully on disk. If you hope there is no
806  * inactive transaction in the fs when -ENOENT is returned, you should
807  * invoke
808  *     btrfs_attach_transaction_barrier()
809  */
810 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
811 {
812         return start_transaction(root, 0, TRANS_ATTACH,
813                                  BTRFS_RESERVE_NO_FLUSH, true);
814 }
815
816 /*
817  * btrfs_attach_transaction_barrier() - catch the running transaction
818  *
819  * It is similar to the above function, the difference is this one
820  * will wait for all the inactive transactions until they fully
821  * complete.
822  */
823 struct btrfs_trans_handle *
824 btrfs_attach_transaction_barrier(struct btrfs_root *root)
825 {
826         struct btrfs_trans_handle *trans;
827
828         trans = start_transaction(root, 0, TRANS_ATTACH,
829                                   BTRFS_RESERVE_NO_FLUSH, true);
830         if (trans == ERR_PTR(-ENOENT))
831                 btrfs_wait_for_commit(root->fs_info, 0);
832
833         return trans;
834 }
835
836 /* Wait for a transaction commit to reach at least the given state. */
837 static noinline void wait_for_commit(struct btrfs_transaction *commit,
838                                      const enum btrfs_trans_state min_state)
839 {
840         struct btrfs_fs_info *fs_info = commit->fs_info;
841         u64 transid = commit->transid;
842         bool put = false;
843
844         /*
845          * At the moment this function is called with min_state either being
846          * TRANS_STATE_COMPLETED or TRANS_STATE_SUPER_COMMITTED.
847          */
848         if (min_state == TRANS_STATE_COMPLETED)
849                 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
850         else
851                 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
852
853         while (1) {
854                 wait_event(commit->commit_wait, commit->state >= min_state);
855                 if (put)
856                         btrfs_put_transaction(commit);
857
858                 if (min_state < TRANS_STATE_COMPLETED)
859                         break;
860
861                 /*
862                  * A transaction isn't really completed until all of the
863                  * previous transactions are completed, but with fsync we can
864                  * end up with SUPER_COMMITTED transactions before a COMPLETED
865                  * transaction. Wait for those.
866                  */
867
868                 spin_lock(&fs_info->trans_lock);
869                 commit = list_first_entry_or_null(&fs_info->trans_list,
870                                                   struct btrfs_transaction,
871                                                   list);
872                 if (!commit || commit->transid > transid) {
873                         spin_unlock(&fs_info->trans_lock);
874                         break;
875                 }
876                 refcount_inc(&commit->use_count);
877                 put = true;
878                 spin_unlock(&fs_info->trans_lock);
879         }
880 }
881
882 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
883 {
884         struct btrfs_transaction *cur_trans = NULL, *t;
885         int ret = 0;
886
887         if (transid) {
888                 if (transid <= fs_info->last_trans_committed)
889                         goto out;
890
891                 /* find specified transaction */
892                 spin_lock(&fs_info->trans_lock);
893                 list_for_each_entry(t, &fs_info->trans_list, list) {
894                         if (t->transid == transid) {
895                                 cur_trans = t;
896                                 refcount_inc(&cur_trans->use_count);
897                                 ret = 0;
898                                 break;
899                         }
900                         if (t->transid > transid) {
901                                 ret = 0;
902                                 break;
903                         }
904                 }
905                 spin_unlock(&fs_info->trans_lock);
906
907                 /*
908                  * The specified transaction doesn't exist, or we
909                  * raced with btrfs_commit_transaction
910                  */
911                 if (!cur_trans) {
912                         if (transid > fs_info->last_trans_committed)
913                                 ret = -EINVAL;
914                         goto out;
915                 }
916         } else {
917                 /* find newest transaction that is committing | committed */
918                 spin_lock(&fs_info->trans_lock);
919                 list_for_each_entry_reverse(t, &fs_info->trans_list,
920                                             list) {
921                         if (t->state >= TRANS_STATE_COMMIT_START) {
922                                 if (t->state == TRANS_STATE_COMPLETED)
923                                         break;
924                                 cur_trans = t;
925                                 refcount_inc(&cur_trans->use_count);
926                                 break;
927                         }
928                 }
929                 spin_unlock(&fs_info->trans_lock);
930                 if (!cur_trans)
931                         goto out;  /* nothing committing|committed */
932         }
933
934         wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
935         btrfs_put_transaction(cur_trans);
936 out:
937         return ret;
938 }
939
940 void btrfs_throttle(struct btrfs_fs_info *fs_info)
941 {
942         wait_current_trans(fs_info);
943 }
944
945 static bool should_end_transaction(struct btrfs_trans_handle *trans)
946 {
947         struct btrfs_fs_info *fs_info = trans->fs_info;
948
949         if (btrfs_check_space_for_delayed_refs(fs_info))
950                 return true;
951
952         return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 50);
953 }
954
955 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
956 {
957         struct btrfs_transaction *cur_trans = trans->transaction;
958
959         if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
960             test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags))
961                 return true;
962
963         return should_end_transaction(trans);
964 }
965
966 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
967
968 {
969         struct btrfs_fs_info *fs_info = trans->fs_info;
970
971         if (!trans->block_rsv) {
972                 ASSERT(!trans->bytes_reserved);
973                 return;
974         }
975
976         if (!trans->bytes_reserved)
977                 return;
978
979         ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
980         trace_btrfs_space_reservation(fs_info, "transaction",
981                                       trans->transid, trans->bytes_reserved, 0);
982         btrfs_block_rsv_release(fs_info, trans->block_rsv,
983                                 trans->bytes_reserved, NULL);
984         trans->bytes_reserved = 0;
985 }
986
987 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
988                                    int throttle)
989 {
990         struct btrfs_fs_info *info = trans->fs_info;
991         struct btrfs_transaction *cur_trans = trans->transaction;
992         int err = 0;
993
994         if (refcount_read(&trans->use_count) > 1) {
995                 refcount_dec(&trans->use_count);
996                 trans->block_rsv = trans->orig_rsv;
997                 return 0;
998         }
999
1000         btrfs_trans_release_metadata(trans);
1001         trans->block_rsv = NULL;
1002
1003         btrfs_create_pending_block_groups(trans);
1004
1005         btrfs_trans_release_chunk_metadata(trans);
1006
1007         if (trans->type & __TRANS_FREEZABLE)
1008                 sb_end_intwrite(info->sb);
1009
1010         WARN_ON(cur_trans != info->running_transaction);
1011         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
1012         atomic_dec(&cur_trans->num_writers);
1013         extwriter_counter_dec(cur_trans, trans->type);
1014
1015         cond_wake_up(&cur_trans->writer_wait);
1016
1017         btrfs_lockdep_release(info, btrfs_trans_num_extwriters);
1018         btrfs_lockdep_release(info, btrfs_trans_num_writers);
1019
1020         btrfs_put_transaction(cur_trans);
1021
1022         if (current->journal_info == trans)
1023                 current->journal_info = NULL;
1024
1025         if (throttle)
1026                 btrfs_run_delayed_iputs(info);
1027
1028         if (TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info)) {
1029                 wake_up_process(info->transaction_kthread);
1030                 if (TRANS_ABORTED(trans))
1031                         err = trans->aborted;
1032                 else
1033                         err = -EROFS;
1034         }
1035
1036         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1037         return err;
1038 }
1039
1040 int btrfs_end_transaction(struct btrfs_trans_handle *trans)
1041 {
1042         return __btrfs_end_transaction(trans, 0);
1043 }
1044
1045 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
1046 {
1047         return __btrfs_end_transaction(trans, 1);
1048 }
1049
1050 /*
1051  * when btree blocks are allocated, they have some corresponding bits set for
1052  * them in one of two extent_io trees.  This is used to make sure all of
1053  * those extents are sent to disk but does not wait on them
1054  */
1055 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
1056                                struct extent_io_tree *dirty_pages, int mark)
1057 {
1058         int err = 0;
1059         int werr = 0;
1060         struct address_space *mapping = fs_info->btree_inode->i_mapping;
1061         struct extent_state *cached_state = NULL;
1062         u64 start = 0;
1063         u64 end;
1064
1065         atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1066         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1067                                       mark, &cached_state)) {
1068                 bool wait_writeback = false;
1069
1070                 err = convert_extent_bit(dirty_pages, start, end,
1071                                          EXTENT_NEED_WAIT,
1072                                          mark, &cached_state);
1073                 /*
1074                  * convert_extent_bit can return -ENOMEM, which is most of the
1075                  * time a temporary error. So when it happens, ignore the error
1076                  * and wait for writeback of this range to finish - because we
1077                  * failed to set the bit EXTENT_NEED_WAIT for the range, a call
1078                  * to __btrfs_wait_marked_extents() would not know that
1079                  * writeback for this range started and therefore wouldn't
1080                  * wait for it to finish - we don't want to commit a
1081                  * superblock that points to btree nodes/leafs for which
1082                  * writeback hasn't finished yet (and without errors).
1083                  * We cleanup any entries left in the io tree when committing
1084                  * the transaction (through extent_io_tree_release()).
1085                  */
1086                 if (err == -ENOMEM) {
1087                         err = 0;
1088                         wait_writeback = true;
1089                 }
1090                 if (!err)
1091                         err = filemap_fdatawrite_range(mapping, start, end);
1092                 if (err)
1093                         werr = err;
1094                 else if (wait_writeback)
1095                         werr = filemap_fdatawait_range(mapping, start, end);
1096                 free_extent_state(cached_state);
1097                 cached_state = NULL;
1098                 cond_resched();
1099                 start = end + 1;
1100         }
1101         atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1102         return werr;
1103 }
1104
1105 /*
1106  * when btree blocks are allocated, they have some corresponding bits set for
1107  * them in one of two extent_io trees.  This is used to make sure all of
1108  * those extents are on disk for transaction or log commit.  We wait
1109  * on all the pages and clear them from the dirty pages state tree
1110  */
1111 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
1112                                        struct extent_io_tree *dirty_pages)
1113 {
1114         int err = 0;
1115         int werr = 0;
1116         struct address_space *mapping = fs_info->btree_inode->i_mapping;
1117         struct extent_state *cached_state = NULL;
1118         u64 start = 0;
1119         u64 end;
1120
1121         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1122                                       EXTENT_NEED_WAIT, &cached_state)) {
1123                 /*
1124                  * Ignore -ENOMEM errors returned by clear_extent_bit().
1125                  * When committing the transaction, we'll remove any entries
1126                  * left in the io tree. For a log commit, we don't remove them
1127                  * after committing the log because the tree can be accessed
1128                  * concurrently - we do it only at transaction commit time when
1129                  * it's safe to do it (through extent_io_tree_release()).
1130                  */
1131                 err = clear_extent_bit(dirty_pages, start, end,
1132                                        EXTENT_NEED_WAIT, &cached_state);
1133                 if (err == -ENOMEM)
1134                         err = 0;
1135                 if (!err)
1136                         err = filemap_fdatawait_range(mapping, start, end);
1137                 if (err)
1138                         werr = err;
1139                 free_extent_state(cached_state);
1140                 cached_state = NULL;
1141                 cond_resched();
1142                 start = end + 1;
1143         }
1144         if (err)
1145                 werr = err;
1146         return werr;
1147 }
1148
1149 static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
1150                        struct extent_io_tree *dirty_pages)
1151 {
1152         bool errors = false;
1153         int err;
1154
1155         err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1156         if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1157                 errors = true;
1158
1159         if (errors && !err)
1160                 err = -EIO;
1161         return err;
1162 }
1163
1164 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1165 {
1166         struct btrfs_fs_info *fs_info = log_root->fs_info;
1167         struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1168         bool errors = false;
1169         int err;
1170
1171         ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
1172
1173         err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1174         if ((mark & EXTENT_DIRTY) &&
1175             test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1176                 errors = true;
1177
1178         if ((mark & EXTENT_NEW) &&
1179             test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1180                 errors = true;
1181
1182         if (errors && !err)
1183                 err = -EIO;
1184         return err;
1185 }
1186
1187 /*
1188  * When btree blocks are allocated the corresponding extents are marked dirty.
1189  * This function ensures such extents are persisted on disk for transaction or
1190  * log commit.
1191  *
1192  * @trans: transaction whose dirty pages we'd like to write
1193  */
1194 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1195 {
1196         int ret;
1197         int ret2;
1198         struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1199         struct btrfs_fs_info *fs_info = trans->fs_info;
1200         struct blk_plug plug;
1201
1202         blk_start_plug(&plug);
1203         ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1204         blk_finish_plug(&plug);
1205         ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1206
1207         extent_io_tree_release(&trans->transaction->dirty_pages);
1208
1209         if (ret)
1210                 return ret;
1211         else if (ret2)
1212                 return ret2;
1213         else
1214                 return 0;
1215 }
1216
1217 /*
1218  * this is used to update the root pointer in the tree of tree roots.
1219  *
1220  * But, in the case of the extent allocation tree, updating the root
1221  * pointer may allocate blocks which may change the root of the extent
1222  * allocation tree.
1223  *
1224  * So, this loops and repeats and makes sure the cowonly root didn't
1225  * change while the root pointer was being updated in the metadata.
1226  */
1227 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1228                                struct btrfs_root *root)
1229 {
1230         int ret;
1231         u64 old_root_bytenr;
1232         u64 old_root_used;
1233         struct btrfs_fs_info *fs_info = root->fs_info;
1234         struct btrfs_root *tree_root = fs_info->tree_root;
1235
1236         old_root_used = btrfs_root_used(&root->root_item);
1237
1238         while (1) {
1239                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1240                 if (old_root_bytenr == root->node->start &&
1241                     old_root_used == btrfs_root_used(&root->root_item))
1242                         break;
1243
1244                 btrfs_set_root_node(&root->root_item, root->node);
1245                 ret = btrfs_update_root(trans, tree_root,
1246                                         &root->root_key,
1247                                         &root->root_item);
1248                 if (ret)
1249                         return ret;
1250
1251                 old_root_used = btrfs_root_used(&root->root_item);
1252         }
1253
1254         return 0;
1255 }
1256
1257 /*
1258  * update all the cowonly tree roots on disk
1259  *
1260  * The error handling in this function may not be obvious. Any of the
1261  * failures will cause the file system to go offline. We still need
1262  * to clean up the delayed refs.
1263  */
1264 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1265 {
1266         struct btrfs_fs_info *fs_info = trans->fs_info;
1267         struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1268         struct list_head *io_bgs = &trans->transaction->io_bgs;
1269         struct list_head *next;
1270         struct extent_buffer *eb;
1271         int ret;
1272
1273         /*
1274          * At this point no one can be using this transaction to modify any tree
1275          * and no one can start another transaction to modify any tree either.
1276          */
1277         ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING);
1278
1279         eb = btrfs_lock_root_node(fs_info->tree_root);
1280         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1281                               0, &eb, BTRFS_NESTING_COW);
1282         btrfs_tree_unlock(eb);
1283         free_extent_buffer(eb);
1284
1285         if (ret)
1286                 return ret;
1287
1288         ret = btrfs_run_dev_stats(trans);
1289         if (ret)
1290                 return ret;
1291         ret = btrfs_run_dev_replace(trans);
1292         if (ret)
1293                 return ret;
1294         ret = btrfs_run_qgroups(trans);
1295         if (ret)
1296                 return ret;
1297
1298         ret = btrfs_setup_space_cache(trans);
1299         if (ret)
1300                 return ret;
1301
1302 again:
1303         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1304                 struct btrfs_root *root;
1305                 next = fs_info->dirty_cowonly_roots.next;
1306                 list_del_init(next);
1307                 root = list_entry(next, struct btrfs_root, dirty_list);
1308                 clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1309
1310                 list_add_tail(&root->dirty_list,
1311                               &trans->transaction->switch_commits);
1312                 ret = update_cowonly_root(trans, root);
1313                 if (ret)
1314                         return ret;
1315         }
1316
1317         /* Now flush any delayed refs generated by updating all of the roots */
1318         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1319         if (ret)
1320                 return ret;
1321
1322         while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1323                 ret = btrfs_write_dirty_block_groups(trans);
1324                 if (ret)
1325                         return ret;
1326
1327                 /*
1328                  * We're writing the dirty block groups, which could generate
1329                  * delayed refs, which could generate more dirty block groups,
1330                  * so we want to keep this flushing in this loop to make sure
1331                  * everything gets run.
1332                  */
1333                 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1334                 if (ret)
1335                         return ret;
1336         }
1337
1338         if (!list_empty(&fs_info->dirty_cowonly_roots))
1339                 goto again;
1340
1341         /* Update dev-replace pointer once everything is committed */
1342         fs_info->dev_replace.committed_cursor_left =
1343                 fs_info->dev_replace.cursor_left_last_write_of_item;
1344
1345         return 0;
1346 }
1347
1348 /*
1349  * If we had a pending drop we need to see if there are any others left in our
1350  * dead roots list, and if not clear our bit and wake any waiters.
1351  */
1352 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
1353 {
1354         /*
1355          * We put the drop in progress roots at the front of the list, so if the
1356          * first entry doesn't have UNFINISHED_DROP set we can wake everybody
1357          * up.
1358          */
1359         spin_lock(&fs_info->trans_lock);
1360         if (!list_empty(&fs_info->dead_roots)) {
1361                 struct btrfs_root *root = list_first_entry(&fs_info->dead_roots,
1362                                                            struct btrfs_root,
1363                                                            root_list);
1364                 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) {
1365                         spin_unlock(&fs_info->trans_lock);
1366                         return;
1367                 }
1368         }
1369         spin_unlock(&fs_info->trans_lock);
1370
1371         btrfs_wake_unfinished_drop(fs_info);
1372 }
1373
1374 /*
1375  * dead roots are old snapshots that need to be deleted.  This allocates
1376  * a dirty root struct and adds it into the list of dead roots that need to
1377  * be deleted
1378  */
1379 void btrfs_add_dead_root(struct btrfs_root *root)
1380 {
1381         struct btrfs_fs_info *fs_info = root->fs_info;
1382
1383         spin_lock(&fs_info->trans_lock);
1384         if (list_empty(&root->root_list)) {
1385                 btrfs_grab_root(root);
1386
1387                 /* We want to process the partially complete drops first. */
1388                 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state))
1389                         list_add(&root->root_list, &fs_info->dead_roots);
1390                 else
1391                         list_add_tail(&root->root_list, &fs_info->dead_roots);
1392         }
1393         spin_unlock(&fs_info->trans_lock);
1394 }
1395
1396 /*
1397  * Update each subvolume root and its relocation root, if it exists, in the tree
1398  * of tree roots. Also free log roots if they exist.
1399  */
1400 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1401 {
1402         struct btrfs_fs_info *fs_info = trans->fs_info;
1403         struct btrfs_root *gang[8];
1404         int i;
1405         int ret;
1406
1407         /*
1408          * At this point no one can be using this transaction to modify any tree
1409          * and no one can start another transaction to modify any tree either.
1410          */
1411         ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING);
1412
1413         spin_lock(&fs_info->fs_roots_radix_lock);
1414         while (1) {
1415                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1416                                                  (void **)gang, 0,
1417                                                  ARRAY_SIZE(gang),
1418                                                  BTRFS_ROOT_TRANS_TAG);
1419                 if (ret == 0)
1420                         break;
1421                 for (i = 0; i < ret; i++) {
1422                         struct btrfs_root *root = gang[i];
1423                         int ret2;
1424
1425                         /*
1426                          * At this point we can neither have tasks logging inodes
1427                          * from a root nor trying to commit a log tree.
1428                          */
1429                         ASSERT(atomic_read(&root->log_writers) == 0);
1430                         ASSERT(atomic_read(&root->log_commit[0]) == 0);
1431                         ASSERT(atomic_read(&root->log_commit[1]) == 0);
1432
1433                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
1434                                         (unsigned long)root->root_key.objectid,
1435                                         BTRFS_ROOT_TRANS_TAG);
1436                         spin_unlock(&fs_info->fs_roots_radix_lock);
1437
1438                         btrfs_free_log(trans, root);
1439                         ret2 = btrfs_update_reloc_root(trans, root);
1440                         if (ret2)
1441                                 return ret2;
1442
1443                         /* see comments in should_cow_block() */
1444                         clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1445                         smp_mb__after_atomic();
1446
1447                         if (root->commit_root != root->node) {
1448                                 list_add_tail(&root->dirty_list,
1449                                         &trans->transaction->switch_commits);
1450                                 btrfs_set_root_node(&root->root_item,
1451                                                     root->node);
1452                         }
1453
1454                         ret2 = btrfs_update_root(trans, fs_info->tree_root,
1455                                                 &root->root_key,
1456                                                 &root->root_item);
1457                         if (ret2)
1458                                 return ret2;
1459                         spin_lock(&fs_info->fs_roots_radix_lock);
1460                         btrfs_qgroup_free_meta_all_pertrans(root);
1461                 }
1462         }
1463         spin_unlock(&fs_info->fs_roots_radix_lock);
1464         return 0;
1465 }
1466
1467 /*
1468  * defrag a given btree.
1469  * Every leaf in the btree is read and defragged.
1470  */
1471 int btrfs_defrag_root(struct btrfs_root *root)
1472 {
1473         struct btrfs_fs_info *info = root->fs_info;
1474         struct btrfs_trans_handle *trans;
1475         int ret;
1476
1477         if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1478                 return 0;
1479
1480         while (1) {
1481                 trans = btrfs_start_transaction(root, 0);
1482                 if (IS_ERR(trans)) {
1483                         ret = PTR_ERR(trans);
1484                         break;
1485                 }
1486
1487                 ret = btrfs_defrag_leaves(trans, root);
1488
1489                 btrfs_end_transaction(trans);
1490                 btrfs_btree_balance_dirty(info);
1491                 cond_resched();
1492
1493                 if (btrfs_fs_closing(info) || ret != -EAGAIN)
1494                         break;
1495
1496                 if (btrfs_defrag_cancelled(info)) {
1497                         btrfs_debug(info, "defrag_root cancelled");
1498                         ret = -EAGAIN;
1499                         break;
1500                 }
1501         }
1502         clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1503         return ret;
1504 }
1505
1506 /*
1507  * Do all special snapshot related qgroup dirty hack.
1508  *
1509  * Will do all needed qgroup inherit and dirty hack like switch commit
1510  * roots inside one transaction and write all btree into disk, to make
1511  * qgroup works.
1512  */
1513 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1514                                    struct btrfs_root *src,
1515                                    struct btrfs_root *parent,
1516                                    struct btrfs_qgroup_inherit *inherit,
1517                                    u64 dst_objectid)
1518 {
1519         struct btrfs_fs_info *fs_info = src->fs_info;
1520         int ret;
1521
1522         /*
1523          * Save some performance in the case that qgroups are not
1524          * enabled. If this check races with the ioctl, rescan will
1525          * kick in anyway.
1526          */
1527         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1528                 return 0;
1529
1530         /*
1531          * Ensure dirty @src will be committed.  Or, after coming
1532          * commit_fs_roots() and switch_commit_roots(), any dirty but not
1533          * recorded root will never be updated again, causing an outdated root
1534          * item.
1535          */
1536         ret = record_root_in_trans(trans, src, 1);
1537         if (ret)
1538                 return ret;
1539
1540         /*
1541          * btrfs_qgroup_inherit relies on a consistent view of the usage for the
1542          * src root, so we must run the delayed refs here.
1543          *
1544          * However this isn't particularly fool proof, because there's no
1545          * synchronization keeping us from changing the tree after this point
1546          * before we do the qgroup_inherit, or even from making changes while
1547          * we're doing the qgroup_inherit.  But that's a problem for the future,
1548          * for now flush the delayed refs to narrow the race window where the
1549          * qgroup counters could end up wrong.
1550          */
1551         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1552         if (ret) {
1553                 btrfs_abort_transaction(trans, ret);
1554                 return ret;
1555         }
1556
1557         ret = commit_fs_roots(trans);
1558         if (ret)
1559                 goto out;
1560         ret = btrfs_qgroup_account_extents(trans);
1561         if (ret < 0)
1562                 goto out;
1563
1564         /* Now qgroup are all updated, we can inherit it to new qgroups */
1565         ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1566                                    inherit);
1567         if (ret < 0)
1568                 goto out;
1569
1570         /*
1571          * Now we do a simplified commit transaction, which will:
1572          * 1) commit all subvolume and extent tree
1573          *    To ensure all subvolume and extent tree have a valid
1574          *    commit_root to accounting later insert_dir_item()
1575          * 2) write all btree blocks onto disk
1576          *    This is to make sure later btree modification will be cowed
1577          *    Or commit_root can be populated and cause wrong qgroup numbers
1578          * In this simplified commit, we don't really care about other trees
1579          * like chunk and root tree, as they won't affect qgroup.
1580          * And we don't write super to avoid half committed status.
1581          */
1582         ret = commit_cowonly_roots(trans);
1583         if (ret)
1584                 goto out;
1585         switch_commit_roots(trans);
1586         ret = btrfs_write_and_wait_transaction(trans);
1587         if (ret)
1588                 btrfs_handle_fs_error(fs_info, ret,
1589                         "Error while writing out transaction for qgroup");
1590
1591 out:
1592         /*
1593          * Force parent root to be updated, as we recorded it before so its
1594          * last_trans == cur_transid.
1595          * Or it won't be committed again onto disk after later
1596          * insert_dir_item()
1597          */
1598         if (!ret)
1599                 ret = record_root_in_trans(trans, parent, 1);
1600         return ret;
1601 }
1602
1603 /*
1604  * new snapshots need to be created at a very specific time in the
1605  * transaction commit.  This does the actual creation.
1606  *
1607  * Note:
1608  * If the error which may affect the commitment of the current transaction
1609  * happens, we should return the error number. If the error which just affect
1610  * the creation of the pending snapshots, just return 0.
1611  */
1612 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1613                                    struct btrfs_pending_snapshot *pending)
1614 {
1615
1616         struct btrfs_fs_info *fs_info = trans->fs_info;
1617         struct btrfs_key key;
1618         struct btrfs_root_item *new_root_item;
1619         struct btrfs_root *tree_root = fs_info->tree_root;
1620         struct btrfs_root *root = pending->root;
1621         struct btrfs_root *parent_root;
1622         struct btrfs_block_rsv *rsv;
1623         struct inode *parent_inode = pending->dir;
1624         struct btrfs_path *path;
1625         struct btrfs_dir_item *dir_item;
1626         struct extent_buffer *tmp;
1627         struct extent_buffer *old;
1628         struct timespec64 cur_time;
1629         int ret = 0;
1630         u64 to_reserve = 0;
1631         u64 index = 0;
1632         u64 objectid;
1633         u64 root_flags;
1634         unsigned int nofs_flags;
1635         struct fscrypt_name fname;
1636
1637         ASSERT(pending->path);
1638         path = pending->path;
1639
1640         ASSERT(pending->root_item);
1641         new_root_item = pending->root_item;
1642
1643         /*
1644          * We're inside a transaction and must make sure that any potential
1645          * allocations with GFP_KERNEL in fscrypt won't recurse back to
1646          * filesystem.
1647          */
1648         nofs_flags = memalloc_nofs_save();
1649         pending->error = fscrypt_setup_filename(parent_inode,
1650                                                 &pending->dentry->d_name, 0,
1651                                                 &fname);
1652         memalloc_nofs_restore(nofs_flags);
1653         if (pending->error)
1654                 goto free_pending;
1655
1656         pending->error = btrfs_get_free_objectid(tree_root, &objectid);
1657         if (pending->error)
1658                 goto free_fname;
1659
1660         /*
1661          * Make qgroup to skip current new snapshot's qgroupid, as it is
1662          * accounted by later btrfs_qgroup_inherit().
1663          */
1664         btrfs_set_skip_qgroup(trans, objectid);
1665
1666         btrfs_reloc_pre_snapshot(pending, &to_reserve);
1667
1668         if (to_reserve > 0) {
1669                 pending->error = btrfs_block_rsv_add(fs_info,
1670                                                      &pending->block_rsv,
1671                                                      to_reserve,
1672                                                      BTRFS_RESERVE_NO_FLUSH);
1673                 if (pending->error)
1674                         goto clear_skip_qgroup;
1675         }
1676
1677         key.objectid = objectid;
1678         key.offset = (u64)-1;
1679         key.type = BTRFS_ROOT_ITEM_KEY;
1680
1681         rsv = trans->block_rsv;
1682         trans->block_rsv = &pending->block_rsv;
1683         trans->bytes_reserved = trans->block_rsv->reserved;
1684         trace_btrfs_space_reservation(fs_info, "transaction",
1685                                       trans->transid,
1686                                       trans->bytes_reserved, 1);
1687         parent_root = BTRFS_I(parent_inode)->root;
1688         ret = record_root_in_trans(trans, parent_root, 0);
1689         if (ret)
1690                 goto fail;
1691         cur_time = current_time(parent_inode);
1692
1693         /*
1694          * insert the directory item
1695          */
1696         ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1697         BUG_ON(ret); /* -ENOMEM */
1698
1699         /* check if there is a file/dir which has the same name. */
1700         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1701                                          btrfs_ino(BTRFS_I(parent_inode)),
1702                                          &fname.disk_name, 0);
1703         if (dir_item != NULL && !IS_ERR(dir_item)) {
1704                 pending->error = -EEXIST;
1705                 goto dir_item_existed;
1706         } else if (IS_ERR(dir_item)) {
1707                 ret = PTR_ERR(dir_item);
1708                 btrfs_abort_transaction(trans, ret);
1709                 goto fail;
1710         }
1711         btrfs_release_path(path);
1712
1713         /*
1714          * pull in the delayed directory update
1715          * and the delayed inode item
1716          * otherwise we corrupt the FS during
1717          * snapshot
1718          */
1719         ret = btrfs_run_delayed_items(trans);
1720         if (ret) {      /* Transaction aborted */
1721                 btrfs_abort_transaction(trans, ret);
1722                 goto fail;
1723         }
1724
1725         ret = record_root_in_trans(trans, root, 0);
1726         if (ret) {
1727                 btrfs_abort_transaction(trans, ret);
1728                 goto fail;
1729         }
1730         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1731         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1732         btrfs_check_and_init_root_item(new_root_item);
1733
1734         root_flags = btrfs_root_flags(new_root_item);
1735         if (pending->readonly)
1736                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1737         else
1738                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1739         btrfs_set_root_flags(new_root_item, root_flags);
1740
1741         btrfs_set_root_generation_v2(new_root_item,
1742                         trans->transid);
1743         generate_random_guid(new_root_item->uuid);
1744         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1745                         BTRFS_UUID_SIZE);
1746         if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1747                 memset(new_root_item->received_uuid, 0,
1748                        sizeof(new_root_item->received_uuid));
1749                 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1750                 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1751                 btrfs_set_root_stransid(new_root_item, 0);
1752                 btrfs_set_root_rtransid(new_root_item, 0);
1753         }
1754         btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1755         btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1756         btrfs_set_root_otransid(new_root_item, trans->transid);
1757
1758         old = btrfs_lock_root_node(root);
1759         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old,
1760                               BTRFS_NESTING_COW);
1761         if (ret) {
1762                 btrfs_tree_unlock(old);
1763                 free_extent_buffer(old);
1764                 btrfs_abort_transaction(trans, ret);
1765                 goto fail;
1766         }
1767
1768         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1769         /* clean up in any case */
1770         btrfs_tree_unlock(old);
1771         free_extent_buffer(old);
1772         if (ret) {
1773                 btrfs_abort_transaction(trans, ret);
1774                 goto fail;
1775         }
1776         /* see comments in should_cow_block() */
1777         set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1778         smp_wmb();
1779
1780         btrfs_set_root_node(new_root_item, tmp);
1781         /* record when the snapshot was created in key.offset */
1782         key.offset = trans->transid;
1783         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1784         btrfs_tree_unlock(tmp);
1785         free_extent_buffer(tmp);
1786         if (ret) {
1787                 btrfs_abort_transaction(trans, ret);
1788                 goto fail;
1789         }
1790
1791         /*
1792          * insert root back/forward references
1793          */
1794         ret = btrfs_add_root_ref(trans, objectid,
1795                                  parent_root->root_key.objectid,
1796                                  btrfs_ino(BTRFS_I(parent_inode)), index,
1797                                  &fname.disk_name);
1798         if (ret) {
1799                 btrfs_abort_transaction(trans, ret);
1800                 goto fail;
1801         }
1802
1803         key.offset = (u64)-1;
1804         pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
1805         if (IS_ERR(pending->snap)) {
1806                 ret = PTR_ERR(pending->snap);
1807                 pending->snap = NULL;
1808                 btrfs_abort_transaction(trans, ret);
1809                 goto fail;
1810         }
1811
1812         ret = btrfs_reloc_post_snapshot(trans, pending);
1813         if (ret) {
1814                 btrfs_abort_transaction(trans, ret);
1815                 goto fail;
1816         }
1817
1818         /*
1819          * Do special qgroup accounting for snapshot, as we do some qgroup
1820          * snapshot hack to do fast snapshot.
1821          * To co-operate with that hack, we do hack again.
1822          * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1823          */
1824         ret = qgroup_account_snapshot(trans, root, parent_root,
1825                                       pending->inherit, objectid);
1826         if (ret < 0)
1827                 goto fail;
1828
1829         ret = btrfs_insert_dir_item(trans, &fname.disk_name,
1830                                     BTRFS_I(parent_inode), &key, BTRFS_FT_DIR,
1831                                     index);
1832         /* We have check then name at the beginning, so it is impossible. */
1833         BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1834         if (ret) {
1835                 btrfs_abort_transaction(trans, ret);
1836                 goto fail;
1837         }
1838
1839         btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1840                                                   fname.disk_name.len * 2);
1841         parent_inode->i_mtime = current_time(parent_inode);
1842         parent_inode->i_ctime = parent_inode->i_mtime;
1843         ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
1844         if (ret) {
1845                 btrfs_abort_transaction(trans, ret);
1846                 goto fail;
1847         }
1848         ret = btrfs_uuid_tree_add(trans, new_root_item->uuid,
1849                                   BTRFS_UUID_KEY_SUBVOL,
1850                                   objectid);
1851         if (ret) {
1852                 btrfs_abort_transaction(trans, ret);
1853                 goto fail;
1854         }
1855         if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1856                 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1857                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1858                                           objectid);
1859                 if (ret && ret != -EEXIST) {
1860                         btrfs_abort_transaction(trans, ret);
1861                         goto fail;
1862                 }
1863         }
1864
1865 fail:
1866         pending->error = ret;
1867 dir_item_existed:
1868         trans->block_rsv = rsv;
1869         trans->bytes_reserved = 0;
1870 clear_skip_qgroup:
1871         btrfs_clear_skip_qgroup(trans);
1872 free_fname:
1873         fscrypt_free_filename(&fname);
1874 free_pending:
1875         kfree(new_root_item);
1876         pending->root_item = NULL;
1877         btrfs_free_path(path);
1878         pending->path = NULL;
1879
1880         return ret;
1881 }
1882
1883 /*
1884  * create all the snapshots we've scheduled for creation
1885  */
1886 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1887 {
1888         struct btrfs_pending_snapshot *pending, *next;
1889         struct list_head *head = &trans->transaction->pending_snapshots;
1890         int ret = 0;
1891
1892         list_for_each_entry_safe(pending, next, head, list) {
1893                 list_del(&pending->list);
1894                 ret = create_pending_snapshot(trans, pending);
1895                 if (ret)
1896                         break;
1897         }
1898         return ret;
1899 }
1900
1901 static void update_super_roots(struct btrfs_fs_info *fs_info)
1902 {
1903         struct btrfs_root_item *root_item;
1904         struct btrfs_super_block *super;
1905
1906         super = fs_info->super_copy;
1907
1908         root_item = &fs_info->chunk_root->root_item;
1909         super->chunk_root = root_item->bytenr;
1910         super->chunk_root_generation = root_item->generation;
1911         super->chunk_root_level = root_item->level;
1912
1913         root_item = &fs_info->tree_root->root_item;
1914         super->root = root_item->bytenr;
1915         super->generation = root_item->generation;
1916         super->root_level = root_item->level;
1917         if (btrfs_test_opt(fs_info, SPACE_CACHE))
1918                 super->cache_generation = root_item->generation;
1919         else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags))
1920                 super->cache_generation = 0;
1921         if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1922                 super->uuid_tree_generation = root_item->generation;
1923 }
1924
1925 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1926 {
1927         struct btrfs_transaction *trans;
1928         int ret = 0;
1929
1930         spin_lock(&info->trans_lock);
1931         trans = info->running_transaction;
1932         if (trans)
1933                 ret = (trans->state >= TRANS_STATE_COMMIT_START);
1934         spin_unlock(&info->trans_lock);
1935         return ret;
1936 }
1937
1938 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1939 {
1940         struct btrfs_transaction *trans;
1941         int ret = 0;
1942
1943         spin_lock(&info->trans_lock);
1944         trans = info->running_transaction;
1945         if (trans)
1946                 ret = is_transaction_blocked(trans);
1947         spin_unlock(&info->trans_lock);
1948         return ret;
1949 }
1950
1951 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
1952 {
1953         struct btrfs_fs_info *fs_info = trans->fs_info;
1954         struct btrfs_transaction *cur_trans;
1955
1956         /* Kick the transaction kthread. */
1957         set_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
1958         wake_up_process(fs_info->transaction_kthread);
1959
1960         /* take transaction reference */
1961         cur_trans = trans->transaction;
1962         refcount_inc(&cur_trans->use_count);
1963
1964         btrfs_end_transaction(trans);
1965
1966         /*
1967          * Wait for the current transaction commit to start and block
1968          * subsequent transaction joins
1969          */
1970         btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
1971         wait_event(fs_info->transaction_blocked_wait,
1972                    cur_trans->state >= TRANS_STATE_COMMIT_START ||
1973                    TRANS_ABORTED(cur_trans));
1974         btrfs_put_transaction(cur_trans);
1975 }
1976
1977 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1978 {
1979         struct btrfs_fs_info *fs_info = trans->fs_info;
1980         struct btrfs_transaction *cur_trans = trans->transaction;
1981
1982         WARN_ON(refcount_read(&trans->use_count) > 1);
1983
1984         btrfs_abort_transaction(trans, err);
1985
1986         spin_lock(&fs_info->trans_lock);
1987
1988         /*
1989          * If the transaction is removed from the list, it means this
1990          * transaction has been committed successfully, so it is impossible
1991          * to call the cleanup function.
1992          */
1993         BUG_ON(list_empty(&cur_trans->list));
1994
1995         if (cur_trans == fs_info->running_transaction) {
1996                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
1997                 spin_unlock(&fs_info->trans_lock);
1998
1999                 /*
2000                  * The thread has already released the lockdep map as reader
2001                  * already in btrfs_commit_transaction().
2002                  */
2003                 btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers);
2004                 wait_event(cur_trans->writer_wait,
2005                            atomic_read(&cur_trans->num_writers) == 1);
2006
2007                 spin_lock(&fs_info->trans_lock);
2008         }
2009
2010         /*
2011          * Now that we know no one else is still using the transaction we can
2012          * remove the transaction from the list of transactions. This avoids
2013          * the transaction kthread from cleaning up the transaction while some
2014          * other task is still using it, which could result in a use-after-free
2015          * on things like log trees, as it forces the transaction kthread to
2016          * wait for this transaction to be cleaned up by us.
2017          */
2018         list_del_init(&cur_trans->list);
2019
2020         spin_unlock(&fs_info->trans_lock);
2021
2022         btrfs_cleanup_one_transaction(trans->transaction, fs_info);
2023
2024         spin_lock(&fs_info->trans_lock);
2025         if (cur_trans == fs_info->running_transaction)
2026                 fs_info->running_transaction = NULL;
2027         spin_unlock(&fs_info->trans_lock);
2028
2029         if (trans->type & __TRANS_FREEZABLE)
2030                 sb_end_intwrite(fs_info->sb);
2031         btrfs_put_transaction(cur_trans);
2032         btrfs_put_transaction(cur_trans);
2033
2034         trace_btrfs_transaction_commit(fs_info);
2035
2036         if (current->journal_info == trans)
2037                 current->journal_info = NULL;
2038         btrfs_scrub_cancel(fs_info);
2039
2040         kmem_cache_free(btrfs_trans_handle_cachep, trans);
2041 }
2042
2043 /*
2044  * Release reserved delayed ref space of all pending block groups of the
2045  * transaction and remove them from the list
2046  */
2047 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
2048 {
2049        struct btrfs_fs_info *fs_info = trans->fs_info;
2050        struct btrfs_block_group *block_group, *tmp;
2051
2052        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
2053                btrfs_delayed_refs_rsv_release(fs_info, 1);
2054                list_del_init(&block_group->bg_list);
2055        }
2056 }
2057
2058 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
2059 {
2060         /*
2061          * We use try_to_writeback_inodes_sb() here because if we used
2062          * btrfs_start_delalloc_roots we would deadlock with fs freeze.
2063          * Currently are holding the fs freeze lock, if we do an async flush
2064          * we'll do btrfs_join_transaction() and deadlock because we need to
2065          * wait for the fs freeze lock.  Using the direct flushing we benefit
2066          * from already being in a transaction and our join_transaction doesn't
2067          * have to re-take the fs freeze lock.
2068          *
2069          * Note that try_to_writeback_inodes_sb() will only trigger writeback
2070          * if it can read lock sb->s_umount. It will always be able to lock it,
2071          * except when the filesystem is being unmounted or being frozen, but in
2072          * those cases sync_filesystem() is called, which results in calling
2073          * writeback_inodes_sb() while holding a write lock on sb->s_umount.
2074          * Note that we don't call writeback_inodes_sb() directly, because it
2075          * will emit a warning if sb->s_umount is not locked.
2076          */
2077         if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2078                 try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
2079         return 0;
2080 }
2081
2082 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
2083 {
2084         if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2085                 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
2086 }
2087
2088 /*
2089  * Add a pending snapshot associated with the given transaction handle to the
2090  * respective handle. This must be called after the transaction commit started
2091  * and while holding fs_info->trans_lock.
2092  * This serves to guarantee a caller of btrfs_commit_transaction() that it can
2093  * safely free the pending snapshot pointer in case btrfs_commit_transaction()
2094  * returns an error.
2095  */
2096 static void add_pending_snapshot(struct btrfs_trans_handle *trans)
2097 {
2098         struct btrfs_transaction *cur_trans = trans->transaction;
2099
2100         if (!trans->pending_snapshot)
2101                 return;
2102
2103         lockdep_assert_held(&trans->fs_info->trans_lock);
2104         ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
2105
2106         list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
2107 }
2108
2109 static void update_commit_stats(struct btrfs_fs_info *fs_info, ktime_t interval)
2110 {
2111         fs_info->commit_stats.commit_count++;
2112         fs_info->commit_stats.last_commit_dur = interval;
2113         fs_info->commit_stats.max_commit_dur =
2114                         max_t(u64, fs_info->commit_stats.max_commit_dur, interval);
2115         fs_info->commit_stats.total_commit_dur += interval;
2116 }
2117
2118 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2119 {
2120         struct btrfs_fs_info *fs_info = trans->fs_info;
2121         struct btrfs_transaction *cur_trans = trans->transaction;
2122         struct btrfs_transaction *prev_trans = NULL;
2123         int ret;
2124         ktime_t start_time;
2125         ktime_t interval;
2126
2127         ASSERT(refcount_read(&trans->use_count) == 1);
2128         btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
2129
2130         clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
2131
2132         /* Stop the commit early if ->aborted is set */
2133         if (TRANS_ABORTED(cur_trans)) {
2134                 ret = cur_trans->aborted;
2135                 goto lockdep_trans_commit_start_release;
2136         }
2137
2138         btrfs_trans_release_metadata(trans);
2139         trans->block_rsv = NULL;
2140
2141         /*
2142          * We only want one transaction commit doing the flushing so we do not
2143          * waste a bunch of time on lock contention on the extent root node.
2144          */
2145         if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING,
2146                               &cur_trans->delayed_refs.flags)) {
2147                 /*
2148                  * Make a pass through all the delayed refs we have so far.
2149                  * Any running threads may add more while we are here.
2150                  */
2151                 ret = btrfs_run_delayed_refs(trans, 0);
2152                 if (ret)
2153                         goto lockdep_trans_commit_start_release;
2154         }
2155
2156         btrfs_create_pending_block_groups(trans);
2157
2158         if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
2159                 int run_it = 0;
2160
2161                 /* this mutex is also taken before trying to set
2162                  * block groups readonly.  We need to make sure
2163                  * that nobody has set a block group readonly
2164                  * after a extents from that block group have been
2165                  * allocated for cache files.  btrfs_set_block_group_ro
2166                  * will wait for the transaction to commit if it
2167                  * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2168                  *
2169                  * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2170                  * only one process starts all the block group IO.  It wouldn't
2171                  * hurt to have more than one go through, but there's no
2172                  * real advantage to it either.
2173                  */
2174                 mutex_lock(&fs_info->ro_block_group_mutex);
2175                 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2176                                       &cur_trans->flags))
2177                         run_it = 1;
2178                 mutex_unlock(&fs_info->ro_block_group_mutex);
2179
2180                 if (run_it) {
2181                         ret = btrfs_start_dirty_block_groups(trans);
2182                         if (ret)
2183                                 goto lockdep_trans_commit_start_release;
2184                 }
2185         }
2186
2187         spin_lock(&fs_info->trans_lock);
2188         if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
2189                 enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2190
2191                 add_pending_snapshot(trans);
2192
2193                 spin_unlock(&fs_info->trans_lock);
2194                 refcount_inc(&cur_trans->use_count);
2195
2196                 if (trans->in_fsync)
2197                         want_state = TRANS_STATE_SUPER_COMMITTED;
2198
2199                 btrfs_trans_state_lockdep_release(fs_info,
2200                                                   BTRFS_LOCKDEP_TRANS_COMMIT_START);
2201                 ret = btrfs_end_transaction(trans);
2202                 wait_for_commit(cur_trans, want_state);
2203
2204                 if (TRANS_ABORTED(cur_trans))
2205                         ret = cur_trans->aborted;
2206
2207                 btrfs_put_transaction(cur_trans);
2208
2209                 return ret;
2210         }
2211
2212         cur_trans->state = TRANS_STATE_COMMIT_START;
2213         wake_up(&fs_info->transaction_blocked_wait);
2214         btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
2215
2216         if (cur_trans->list.prev != &fs_info->trans_list) {
2217                 enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2218
2219                 if (trans->in_fsync)
2220                         want_state = TRANS_STATE_SUPER_COMMITTED;
2221
2222                 prev_trans = list_entry(cur_trans->list.prev,
2223                                         struct btrfs_transaction, list);
2224                 if (prev_trans->state < want_state) {
2225                         refcount_inc(&prev_trans->use_count);
2226                         spin_unlock(&fs_info->trans_lock);
2227
2228                         wait_for_commit(prev_trans, want_state);
2229
2230                         ret = READ_ONCE(prev_trans->aborted);
2231
2232                         btrfs_put_transaction(prev_trans);
2233                         if (ret)
2234                                 goto lockdep_release;
2235                 } else {
2236                         spin_unlock(&fs_info->trans_lock);
2237                 }
2238         } else {
2239                 spin_unlock(&fs_info->trans_lock);
2240                 /*
2241                  * The previous transaction was aborted and was already removed
2242                  * from the list of transactions at fs_info->trans_list. So we
2243                  * abort to prevent writing a new superblock that reflects a
2244                  * corrupt state (pointing to trees with unwritten nodes/leafs).
2245                  */
2246                 if (BTRFS_FS_ERROR(fs_info)) {
2247                         ret = -EROFS;
2248                         goto lockdep_release;
2249                 }
2250         }
2251
2252         /*
2253          * Get the time spent on the work done by the commit thread and not
2254          * the time spent waiting on a previous commit
2255          */
2256         start_time = ktime_get_ns();
2257
2258         extwriter_counter_dec(cur_trans, trans->type);
2259
2260         ret = btrfs_start_delalloc_flush(fs_info);
2261         if (ret)
2262                 goto lockdep_release;
2263
2264         ret = btrfs_run_delayed_items(trans);
2265         if (ret)
2266                 goto lockdep_release;
2267
2268         /*
2269          * The thread has started/joined the transaction thus it holds the
2270          * lockdep map as a reader. It has to release it before acquiring the
2271          * lockdep map as a writer.
2272          */
2273         btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
2274         btrfs_might_wait_for_event(fs_info, btrfs_trans_num_extwriters);
2275         wait_event(cur_trans->writer_wait,
2276                    extwriter_counter_read(cur_trans) == 0);
2277
2278         /* some pending stuffs might be added after the previous flush. */
2279         ret = btrfs_run_delayed_items(trans);
2280         if (ret) {
2281                 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2282                 goto cleanup_transaction;
2283         }
2284
2285         btrfs_wait_delalloc_flush(fs_info);
2286
2287         /*
2288          * Wait for all ordered extents started by a fast fsync that joined this
2289          * transaction. Otherwise if this transaction commits before the ordered
2290          * extents complete we lose logged data after a power failure.
2291          */
2292         btrfs_might_wait_for_event(fs_info, btrfs_trans_pending_ordered);
2293         wait_event(cur_trans->pending_wait,
2294                    atomic_read(&cur_trans->pending_ordered) == 0);
2295
2296         btrfs_scrub_pause(fs_info);
2297         /*
2298          * Ok now we need to make sure to block out any other joins while we
2299          * commit the transaction.  We could have started a join before setting
2300          * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2301          */
2302         spin_lock(&fs_info->trans_lock);
2303         add_pending_snapshot(trans);
2304         cur_trans->state = TRANS_STATE_COMMIT_DOING;
2305         spin_unlock(&fs_info->trans_lock);
2306
2307         /*
2308          * The thread has started/joined the transaction thus it holds the
2309          * lockdep map as a reader. It has to release it before acquiring the
2310          * lockdep map as a writer.
2311          */
2312         btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2313         btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers);
2314         wait_event(cur_trans->writer_wait,
2315                    atomic_read(&cur_trans->num_writers) == 1);
2316
2317         /*
2318          * Make lockdep happy by acquiring the state locks after
2319          * btrfs_trans_num_writers is released. If we acquired the state locks
2320          * before releasing the btrfs_trans_num_writers lock then lockdep would
2321          * complain because we did not follow the reverse order unlocking rule.
2322          */
2323         btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2324         btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2325         btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2326
2327         /*
2328          * We've started the commit, clear the flag in case we were triggered to
2329          * do an async commit but somebody else started before the transaction
2330          * kthread could do the work.
2331          */
2332         clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
2333
2334         if (TRANS_ABORTED(cur_trans)) {
2335                 ret = cur_trans->aborted;
2336                 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2337                 goto scrub_continue;
2338         }
2339         /*
2340          * the reloc mutex makes sure that we stop
2341          * the balancing code from coming in and moving
2342          * extents around in the middle of the commit
2343          */
2344         mutex_lock(&fs_info->reloc_mutex);
2345
2346         /*
2347          * We needn't worry about the delayed items because we will
2348          * deal with them in create_pending_snapshot(), which is the
2349          * core function of the snapshot creation.
2350          */
2351         ret = create_pending_snapshots(trans);
2352         if (ret)
2353                 goto unlock_reloc;
2354
2355         /*
2356          * We insert the dir indexes of the snapshots and update the inode
2357          * of the snapshots' parents after the snapshot creation, so there
2358          * are some delayed items which are not dealt with. Now deal with
2359          * them.
2360          *
2361          * We needn't worry that this operation will corrupt the snapshots,
2362          * because all the tree which are snapshoted will be forced to COW
2363          * the nodes and leaves.
2364          */
2365         ret = btrfs_run_delayed_items(trans);
2366         if (ret)
2367                 goto unlock_reloc;
2368
2369         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2370         if (ret)
2371                 goto unlock_reloc;
2372
2373         /*
2374          * make sure none of the code above managed to slip in a
2375          * delayed item
2376          */
2377         btrfs_assert_delayed_root_empty(fs_info);
2378
2379         WARN_ON(cur_trans != trans->transaction);
2380
2381         ret = commit_fs_roots(trans);
2382         if (ret)
2383                 goto unlock_reloc;
2384
2385         /* commit_fs_roots gets rid of all the tree log roots, it is now
2386          * safe to free the root of tree log roots
2387          */
2388         btrfs_free_log_root_tree(trans, fs_info);
2389
2390         /*
2391          * Since fs roots are all committed, we can get a quite accurate
2392          * new_roots. So let's do quota accounting.
2393          */
2394         ret = btrfs_qgroup_account_extents(trans);
2395         if (ret < 0)
2396                 goto unlock_reloc;
2397
2398         ret = commit_cowonly_roots(trans);
2399         if (ret)
2400                 goto unlock_reloc;
2401
2402         /*
2403          * The tasks which save the space cache and inode cache may also
2404          * update ->aborted, check it.
2405          */
2406         if (TRANS_ABORTED(cur_trans)) {
2407                 ret = cur_trans->aborted;
2408                 goto unlock_reloc;
2409         }
2410
2411         cur_trans = fs_info->running_transaction;
2412
2413         btrfs_set_root_node(&fs_info->tree_root->root_item,
2414                             fs_info->tree_root->node);
2415         list_add_tail(&fs_info->tree_root->dirty_list,
2416                       &cur_trans->switch_commits);
2417
2418         btrfs_set_root_node(&fs_info->chunk_root->root_item,
2419                             fs_info->chunk_root->node);
2420         list_add_tail(&fs_info->chunk_root->dirty_list,
2421                       &cur_trans->switch_commits);
2422
2423         if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
2424                 btrfs_set_root_node(&fs_info->block_group_root->root_item,
2425                                     fs_info->block_group_root->node);
2426                 list_add_tail(&fs_info->block_group_root->dirty_list,
2427                               &cur_trans->switch_commits);
2428         }
2429
2430         switch_commit_roots(trans);
2431
2432         ASSERT(list_empty(&cur_trans->dirty_bgs));
2433         ASSERT(list_empty(&cur_trans->io_bgs));
2434         update_super_roots(fs_info);
2435
2436         btrfs_set_super_log_root(fs_info->super_copy, 0);
2437         btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2438         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2439                sizeof(*fs_info->super_copy));
2440
2441         btrfs_commit_device_sizes(cur_trans);
2442
2443         clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2444         clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2445
2446         btrfs_trans_release_chunk_metadata(trans);
2447
2448         /*
2449          * Before changing the transaction state to TRANS_STATE_UNBLOCKED and
2450          * setting fs_info->running_transaction to NULL, lock tree_log_mutex to
2451          * make sure that before we commit our superblock, no other task can
2452          * start a new transaction and commit a log tree before we commit our
2453          * superblock. Anyone trying to commit a log tree locks this mutex before
2454          * writing its superblock.
2455          */
2456         mutex_lock(&fs_info->tree_log_mutex);
2457
2458         spin_lock(&fs_info->trans_lock);
2459         cur_trans->state = TRANS_STATE_UNBLOCKED;
2460         fs_info->running_transaction = NULL;
2461         spin_unlock(&fs_info->trans_lock);
2462         mutex_unlock(&fs_info->reloc_mutex);
2463
2464         wake_up(&fs_info->transaction_wait);
2465         btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2466
2467         /* If we have features changed, wake up the cleaner to update sysfs. */
2468         if (test_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags) &&
2469             fs_info->cleaner_kthread)
2470                 wake_up_process(fs_info->cleaner_kthread);
2471
2472         ret = btrfs_write_and_wait_transaction(trans);
2473         if (ret) {
2474                 btrfs_handle_fs_error(fs_info, ret,
2475                                       "Error while writing out transaction");
2476                 mutex_unlock(&fs_info->tree_log_mutex);
2477                 goto scrub_continue;
2478         }
2479
2480         /*
2481          * At this point, we should have written all the tree blocks allocated
2482          * in this transaction. So it's now safe to free the redirtyied extent
2483          * buffers.
2484          */
2485         btrfs_free_redirty_list(cur_trans);
2486
2487         ret = write_all_supers(fs_info, 0);
2488         /*
2489          * the super is written, we can safely allow the tree-loggers
2490          * to go about their business
2491          */
2492         mutex_unlock(&fs_info->tree_log_mutex);
2493         if (ret)
2494                 goto scrub_continue;
2495
2496         /*
2497          * We needn't acquire the lock here because there is no other task
2498          * which can change it.
2499          */
2500         cur_trans->state = TRANS_STATE_SUPER_COMMITTED;
2501         wake_up(&cur_trans->commit_wait);
2502         btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2503
2504         btrfs_finish_extent_commit(trans);
2505
2506         if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2507                 btrfs_clear_space_info_full(fs_info);
2508
2509         fs_info->last_trans_committed = cur_trans->transid;
2510         /*
2511          * We needn't acquire the lock here because there is no other task
2512          * which can change it.
2513          */
2514         cur_trans->state = TRANS_STATE_COMPLETED;
2515         wake_up(&cur_trans->commit_wait);
2516         btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2517
2518         spin_lock(&fs_info->trans_lock);
2519         list_del_init(&cur_trans->list);
2520         spin_unlock(&fs_info->trans_lock);
2521
2522         btrfs_put_transaction(cur_trans);
2523         btrfs_put_transaction(cur_trans);
2524
2525         if (trans->type & __TRANS_FREEZABLE)
2526                 sb_end_intwrite(fs_info->sb);
2527
2528         trace_btrfs_transaction_commit(fs_info);
2529
2530         interval = ktime_get_ns() - start_time;
2531
2532         btrfs_scrub_continue(fs_info);
2533
2534         if (current->journal_info == trans)
2535                 current->journal_info = NULL;
2536
2537         kmem_cache_free(btrfs_trans_handle_cachep, trans);
2538
2539         update_commit_stats(fs_info, interval);
2540
2541         return ret;
2542
2543 unlock_reloc:
2544         mutex_unlock(&fs_info->reloc_mutex);
2545         btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2546 scrub_continue:
2547         btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2548         btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2549         btrfs_scrub_continue(fs_info);
2550 cleanup_transaction:
2551         btrfs_trans_release_metadata(trans);
2552         btrfs_cleanup_pending_block_groups(trans);
2553         btrfs_trans_release_chunk_metadata(trans);
2554         trans->block_rsv = NULL;
2555         btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2556         if (current->journal_info == trans)
2557                 current->journal_info = NULL;
2558         cleanup_transaction(trans, ret);
2559
2560         return ret;
2561
2562 lockdep_release:
2563         btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
2564         btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2565         goto cleanup_transaction;
2566
2567 lockdep_trans_commit_start_release:
2568         btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
2569         btrfs_end_transaction(trans);
2570         return ret;
2571 }
2572
2573 /*
2574  * return < 0 if error
2575  * 0 if there are no more dead_roots at the time of call
2576  * 1 there are more to be processed, call me again
2577  *
2578  * The return value indicates there are certainly more snapshots to delete, but
2579  * if there comes a new one during processing, it may return 0. We don't mind,
2580  * because btrfs_commit_super will poke cleaner thread and it will process it a
2581  * few seconds later.
2582  */
2583 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info)
2584 {
2585         struct btrfs_root *root;
2586         int ret;
2587
2588         spin_lock(&fs_info->trans_lock);
2589         if (list_empty(&fs_info->dead_roots)) {
2590                 spin_unlock(&fs_info->trans_lock);
2591                 return 0;
2592         }
2593         root = list_first_entry(&fs_info->dead_roots,
2594                         struct btrfs_root, root_list);
2595         list_del_init(&root->root_list);
2596         spin_unlock(&fs_info->trans_lock);
2597
2598         btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2599
2600         btrfs_kill_all_delayed_nodes(root);
2601
2602         if (btrfs_header_backref_rev(root->node) <
2603                         BTRFS_MIXED_BACKREF_REV)
2604                 ret = btrfs_drop_snapshot(root, 0, 0);
2605         else
2606                 ret = btrfs_drop_snapshot(root, 1, 0);
2607
2608         btrfs_put_root(root);
2609         return (ret < 0) ? 0 : 1;
2610 }
2611
2612 /*
2613  * We only mark the transaction aborted and then set the file system read-only.
2614  * This will prevent new transactions from starting or trying to join this
2615  * one.
2616  *
2617  * This means that error recovery at the call site is limited to freeing
2618  * any local memory allocations and passing the error code up without
2619  * further cleanup. The transaction should complete as it normally would
2620  * in the call path but will return -EIO.
2621  *
2622  * We'll complete the cleanup in btrfs_end_transaction and
2623  * btrfs_commit_transaction.
2624  */
2625 void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
2626                                       const char *function,
2627                                       unsigned int line, int errno, bool first_hit)
2628 {
2629         struct btrfs_fs_info *fs_info = trans->fs_info;
2630
2631         WRITE_ONCE(trans->aborted, errno);
2632         WRITE_ONCE(trans->transaction->aborted, errno);
2633         if (first_hit && errno == -ENOSPC)
2634                 btrfs_dump_space_info_for_trans_abort(fs_info);
2635         /* Wake up anybody who may be waiting on this transaction */
2636         wake_up(&fs_info->transaction_wait);
2637         wake_up(&fs_info->transaction_blocked_wait);
2638         __btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
2639 }
2640
2641 int __init btrfs_transaction_init(void)
2642 {
2643         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
2644                         sizeof(struct btrfs_trans_handle), 0,
2645                         SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
2646         if (!btrfs_trans_handle_cachep)
2647                 return -ENOMEM;
2648         return 0;
2649 }
2650
2651 void __cold btrfs_transaction_exit(void)
2652 {
2653         kmem_cache_destroy(btrfs_trans_handle_cachep);
2654 }