Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
6cbd5570 CM |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
6cbd5570 CM |
4 | */ |
5 | ||
79154b1b | 6 | #include <linux/fs.h> |
5a0e3ad6 | 7 | #include <linux/slab.h> |
34088780 | 8 | #include <linux/sched.h> |
ab3c5c18 | 9 | #include <linux/sched/mm.h> |
d3c2fdcf | 10 | #include <linux/writeback.h> |
5f39d397 | 11 | #include <linux/pagemap.h> |
5f2cc086 | 12 | #include <linux/blkdev.h> |
8ea05e3a | 13 | #include <linux/uuid.h> |
e55958c8 | 14 | #include <linux/timekeeping.h> |
602cbe91 | 15 | #include "misc.h" |
79154b1b CM |
16 | #include "ctree.h" |
17 | #include "disk-io.h" | |
18 | #include "transaction.h" | |
925baedd | 19 | #include "locking.h" |
e02119d5 | 20 | #include "tree-log.h" |
733f4fbb | 21 | #include "volumes.h" |
8dabb742 | 22 | #include "dev-replace.h" |
fcebe456 | 23 | #include "qgroup.h" |
aac0023c | 24 | #include "block-group.h" |
9c343784 | 25 | #include "space-info.h" |
c7f13d42 | 26 | #include "fs.h" |
07e81dc9 | 27 | #include "accessors.h" |
a0231804 | 28 | #include "extent-tree.h" |
45c40c8f | 29 | #include "root-tree.h" |
f2b39277 | 30 | #include "dir-item.h" |
c7a03b52 | 31 | #include "uuid-tree.h" |
7572dec8 | 32 | #include "ioctl.h" |
67707479 | 33 | #include "relocation.h" |
2fc6822c | 34 | #include "scrub.h" |
79154b1b | 35 | |
956504a3 JB |
36 | static struct kmem_cache *btrfs_trans_handle_cachep; |
37 | ||
61c047b5 QW |
38 | /* |
39 | * Transaction states and transitions | |
40 | * | |
41 | * No running transaction (fs tree blocks are not modified) | |
42 | * | | |
43 | * | To next stage: | |
44 | * | Call start_transaction() variants. Except btrfs_join_transaction_nostart(). | |
45 | * V | |
46 | * Transaction N [[TRANS_STATE_RUNNING]] | |
47 | * | | |
48 | * | New trans handles can be attached to transaction N by calling all | |
49 | * | start_transaction() variants. | |
50 | * | | |
51 | * | To next stage: | |
52 | * | Call btrfs_commit_transaction() on any trans handle attached to | |
53 | * | transaction N | |
54 | * V | |
77d20c68 JB |
55 | * Transaction N [[TRANS_STATE_COMMIT_PREP]] |
56 | * | | |
57 | * | If there are simultaneous calls to btrfs_commit_transaction() one will win | |
58 | * | the race and the rest will wait for the winner to commit the transaction. | |
59 | * | | |
60 | * | The winner will wait for previous running transaction to completely finish | |
61 | * | if there is one. | |
61c047b5 | 62 | * | |
77d20c68 | 63 | * Transaction N [[TRANS_STATE_COMMIT_START]] |
61c047b5 | 64 | * | |
77d20c68 | 65 | * | Then one of the following happens: |
61c047b5 QW |
66 | * | - Wait for all other trans handle holders to release. |
67 | * | The btrfs_commit_transaction() caller will do the commit work. | |
68 | * | - Wait for current transaction to be committed by others. | |
69 | * | Other btrfs_commit_transaction() caller will do the commit work. | |
70 | * | | |
71 | * | At this stage, only btrfs_join_transaction*() variants can attach | |
72 | * | to this running transaction. | |
73 | * | All other variants will wait for current one to finish and attach to | |
74 | * | transaction N+1. | |
75 | * | | |
76 | * | To next stage: | |
77 | * | Caller is chosen to commit transaction N, and all other trans handle | |
78 | * | haven been released. | |
79 | * V | |
80 | * Transaction N [[TRANS_STATE_COMMIT_DOING]] | |
81 | * | | |
82 | * | The heavy lifting transaction work is started. | |
83 | * | From running delayed refs (modifying extent tree) to creating pending | |
84 | * | snapshots, running qgroups. | |
85 | * | In short, modify supporting trees to reflect modifications of subvolume | |
86 | * | trees. | |
87 | * | | |
88 | * | At this stage, all start_transaction() calls will wait for this | |
89 | * | transaction to finish and attach to transaction N+1. | |
90 | * | | |
91 | * | To next stage: | |
92 | * | Until all supporting trees are updated. | |
93 | * V | |
94 | * Transaction N [[TRANS_STATE_UNBLOCKED]] | |
95 | * | Transaction N+1 | |
96 | * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]] | |
97 | * | need to write them back to disk and update | | |
98 | * | super blocks. | | |
99 | * | | | |
100 | * | At this stage, new transaction is allowed to | | |
101 | * | start. | | |
102 | * | All new start_transaction() calls will be | | |
103 | * | attached to transid N+1. | | |
104 | * | | | |
105 | * | To next stage: | | |
106 | * | Until all tree blocks are super blocks are | | |
107 | * | written to block devices | | |
108 | * V | | |
109 | * Transaction N [[TRANS_STATE_COMPLETED]] V | |
110 | * All tree blocks and super blocks are written. Transaction N+1 | |
111 | * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]] | |
112 | * data structures will be cleaned up. | Life goes on | |
113 | */ | |
e8c9f186 | 114 | static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { |
4a9d8bde | 115 | [TRANS_STATE_RUNNING] = 0U, |
77d20c68 | 116 | [TRANS_STATE_COMMIT_PREP] = 0U, |
bcf3a3e7 NB |
117 | [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), |
118 | [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | | |
4a9d8bde | 119 | __TRANS_ATTACH | |
a6d155d2 FM |
120 | __TRANS_JOIN | |
121 | __TRANS_JOIN_NOSTART), | |
bcf3a3e7 | 122 | [TRANS_STATE_UNBLOCKED] = (__TRANS_START | |
4a9d8bde MX |
123 | __TRANS_ATTACH | |
124 | __TRANS_JOIN | | |
a6d155d2 FM |
125 | __TRANS_JOIN_NOLOCK | |
126 | __TRANS_JOIN_NOSTART), | |
d0c2f4fa FM |
127 | [TRANS_STATE_SUPER_COMMITTED] = (__TRANS_START | |
128 | __TRANS_ATTACH | | |
129 | __TRANS_JOIN | | |
130 | __TRANS_JOIN_NOLOCK | | |
131 | __TRANS_JOIN_NOSTART), | |
bcf3a3e7 | 132 | [TRANS_STATE_COMPLETED] = (__TRANS_START | |
4a9d8bde MX |
133 | __TRANS_ATTACH | |
134 | __TRANS_JOIN | | |
a6d155d2 FM |
135 | __TRANS_JOIN_NOLOCK | |
136 | __TRANS_JOIN_NOSTART), | |
4a9d8bde MX |
137 | }; |
138 | ||
724e2315 | 139 | void btrfs_put_transaction(struct btrfs_transaction *transaction) |
79154b1b | 140 | { |
9b64f57d ER |
141 | WARN_ON(refcount_read(&transaction->use_count) == 0); |
142 | if (refcount_dec_and_test(&transaction->use_count)) { | |
a4abeea4 | 143 | BUG_ON(!list_empty(&transaction->list)); |
928ed134 | 144 | WARN_ON(!xa_empty(&transaction->delayed_refs.head_refs)); |
3cce39a8 | 145 | WARN_ON(!xa_empty(&transaction->delayed_refs.dirty_extents)); |
1262133b | 146 | if (transaction->delayed_refs.pending_csums) |
ab8d0fc4 JM |
147 | btrfs_err(transaction->fs_info, |
148 | "pending csums is %llu", | |
149 | transaction->delayed_refs.pending_csums); | |
7785a663 FM |
150 | /* |
151 | * If any block groups are found in ->deleted_bgs then it's | |
152 | * because the transaction was aborted and a commit did not | |
153 | * happen (things failed before writing the new superblock | |
154 | * and calling btrfs_finish_extent_commit()), so we can not | |
155 | * discard the physical locations of the block groups. | |
156 | */ | |
157 | while (!list_empty(&transaction->deleted_bgs)) { | |
32da5386 | 158 | struct btrfs_block_group *cache; |
7785a663 FM |
159 | |
160 | cache = list_first_entry(&transaction->deleted_bgs, | |
32da5386 | 161 | struct btrfs_block_group, |
7785a663 | 162 | bg_list); |
7511e29c BB |
163 | /* |
164 | * Not strictly necessary to lock, as no other task will be using a | |
165 | * block_group on the deleted_bgs list during a transaction abort. | |
166 | */ | |
167 | spin_lock(&transaction->fs_info->unused_bgs_lock); | |
7785a663 | 168 | list_del_init(&cache->bg_list); |
7511e29c | 169 | spin_unlock(&transaction->fs_info->unused_bgs_lock); |
6b7304af | 170 | btrfs_unfreeze_block_group(cache); |
7785a663 FM |
171 | btrfs_put_block_group(cache); |
172 | } | |
bbbf7243 | 173 | WARN_ON(!list_empty(&transaction->dev_update_list)); |
4b5faeac | 174 | kfree(transaction); |
78fae27e | 175 | } |
79154b1b CM |
176 | } |
177 | ||
889bfa39 | 178 | static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) |
817d52f8 | 179 | { |
889bfa39 | 180 | struct btrfs_transaction *cur_trans = trans->transaction; |
16916a88 | 181 | struct btrfs_fs_info *fs_info = trans->fs_info; |
9e351cc8 JB |
182 | struct btrfs_root *root, *tmp; |
183 | ||
dfba78dc FM |
184 | /* |
185 | * At this point no one can be using this transaction to modify any tree | |
186 | * and no one can start another transaction to modify any tree either. | |
187 | */ | |
188 | ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING); | |
189 | ||
9e351cc8 | 190 | down_write(&fs_info->commit_root_sem); |
d96b3424 FM |
191 | |
192 | if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) | |
193 | fs_info->last_reloc_trans = trans->transid; | |
194 | ||
889bfa39 | 195 | list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits, |
9e351cc8 JB |
196 | dirty_list) { |
197 | list_del_init(&root->dirty_list); | |
198 | free_extent_buffer(root->commit_root); | |
199 | root->commit_root = btrfs_root_node(root); | |
e965835c | 200 | btrfs_extent_io_tree_release(&root->dirty_log_pages); |
370a11b8 | 201 | btrfs_qgroup_clean_swapped_blocks(root); |
9e351cc8 | 202 | } |
2b9dbef2 JB |
203 | |
204 | /* We can free old roots now. */ | |
889bfa39 JB |
205 | spin_lock(&cur_trans->dropped_roots_lock); |
206 | while (!list_empty(&cur_trans->dropped_roots)) { | |
207 | root = list_first_entry(&cur_trans->dropped_roots, | |
2b9dbef2 JB |
208 | struct btrfs_root, root_list); |
209 | list_del_init(&root->root_list); | |
889bfa39 JB |
210 | spin_unlock(&cur_trans->dropped_roots_lock); |
211 | btrfs_free_log(trans, root); | |
2b9dbef2 | 212 | btrfs_drop_and_free_fs_root(fs_info, root); |
889bfa39 | 213 | spin_lock(&cur_trans->dropped_roots_lock); |
2b9dbef2 | 214 | } |
889bfa39 | 215 | spin_unlock(&cur_trans->dropped_roots_lock); |
27d56e62 | 216 | |
9e351cc8 | 217 | up_write(&fs_info->commit_root_sem); |
817d52f8 JB |
218 | } |
219 | ||
0860adfd MX |
220 | static inline void extwriter_counter_inc(struct btrfs_transaction *trans, |
221 | unsigned int type) | |
222 | { | |
223 | if (type & TRANS_EXTWRITERS) | |
224 | atomic_inc(&trans->num_extwriters); | |
225 | } | |
226 | ||
227 | static inline void extwriter_counter_dec(struct btrfs_transaction *trans, | |
228 | unsigned int type) | |
229 | { | |
230 | if (type & TRANS_EXTWRITERS) | |
231 | atomic_dec(&trans->num_extwriters); | |
232 | } | |
233 | ||
234 | static inline void extwriter_counter_init(struct btrfs_transaction *trans, | |
235 | unsigned int type) | |
236 | { | |
237 | atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0)); | |
238 | } | |
239 | ||
240 | static inline int extwriter_counter_read(struct btrfs_transaction *trans) | |
241 | { | |
242 | return atomic_read(&trans->num_extwriters); | |
178260b2 MX |
243 | } |
244 | ||
fb6dea26 | 245 | /* |
79bd3712 FM |
246 | * To be called after doing the chunk btree updates right after allocating a new |
247 | * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a | |
248 | * chunk after all chunk btree updates and after finishing the second phase of | |
249 | * chunk allocation (btrfs_create_pending_block_groups()) in case some block | |
250 | * group had its chunk item insertion delayed to the second phase. | |
fb6dea26 JB |
251 | */ |
252 | void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) | |
253 | { | |
254 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
255 | ||
256 | if (!trans->chunk_bytes_reserved) | |
257 | return; | |
258 | ||
fb6dea26 | 259 | btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv, |
63f018be | 260 | trans->chunk_bytes_reserved, NULL); |
fb6dea26 JB |
261 | trans->chunk_bytes_reserved = 0; |
262 | } | |
263 | ||
d352ac68 CM |
264 | /* |
265 | * either allocate a new transaction or hop into the existing one | |
266 | */ | |
2ff7e61e JM |
267 | static noinline int join_transaction(struct btrfs_fs_info *fs_info, |
268 | unsigned int type) | |
79154b1b CM |
269 | { |
270 | struct btrfs_transaction *cur_trans; | |
a4abeea4 | 271 | |
19ae4e81 | 272 | spin_lock(&fs_info->trans_lock); |
d43317dc | 273 | loop: |
49b25e05 | 274 | /* The file system has been taken offline. No new transactions. */ |
84961539 | 275 | if (BTRFS_FS_ERROR(fs_info)) { |
19ae4e81 | 276 | spin_unlock(&fs_info->trans_lock); |
49b25e05 JM |
277 | return -EROFS; |
278 | } | |
279 | ||
19ae4e81 | 280 | cur_trans = fs_info->running_transaction; |
a4abeea4 | 281 | if (cur_trans) { |
bf31f87f | 282 | if (TRANS_ABORTED(cur_trans)) { |
e2f0943c FM |
283 | const int abort_error = cur_trans->aborted; |
284 | ||
19ae4e81 | 285 | spin_unlock(&fs_info->trans_lock); |
e2f0943c | 286 | return abort_error; |
871383be | 287 | } |
4a9d8bde | 288 | if (btrfs_blocked_trans_types[cur_trans->state] & type) { |
178260b2 MX |
289 | spin_unlock(&fs_info->trans_lock); |
290 | return -EBUSY; | |
291 | } | |
9b64f57d | 292 | refcount_inc(&cur_trans->use_count); |
13c5a93e | 293 | atomic_inc(&cur_trans->num_writers); |
0860adfd | 294 | extwriter_counter_inc(cur_trans, type); |
19ae4e81 | 295 | spin_unlock(&fs_info->trans_lock); |
e1489b4f | 296 | btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers); |
5a9ba670 | 297 | btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters); |
a4abeea4 | 298 | return 0; |
79154b1b | 299 | } |
19ae4e81 | 300 | spin_unlock(&fs_info->trans_lock); |
a4abeea4 | 301 | |
354aa0fb | 302 | /* |
4490e803 FM |
303 | * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the |
304 | * current transaction, and commit it. If there is no transaction, just | |
305 | * return ENOENT. | |
354aa0fb | 306 | */ |
4490e803 | 307 | if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART) |
354aa0fb MX |
308 | return -ENOENT; |
309 | ||
4a9d8bde MX |
310 | /* |
311 | * JOIN_NOLOCK only happens during the transaction commit, so | |
312 | * it is impossible that ->running_transaction is NULL | |
313 | */ | |
314 | BUG_ON(type == TRANS_JOIN_NOLOCK); | |
315 | ||
4b5faeac | 316 | cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS); |
a4abeea4 JB |
317 | if (!cur_trans) |
318 | return -ENOMEM; | |
d43317dc | 319 | |
e1489b4f | 320 | btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers); |
5a9ba670 | 321 | btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters); |
e1489b4f | 322 | |
19ae4e81 JS |
323 | spin_lock(&fs_info->trans_lock); |
324 | if (fs_info->running_transaction) { | |
d43317dc CM |
325 | /* |
326 | * someone started a transaction after we unlocked. Make sure | |
4a9d8bde | 327 | * to redo the checks above |
d43317dc | 328 | */ |
5a9ba670 | 329 | btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); |
e1489b4f | 330 | btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); |
4b5faeac | 331 | kfree(cur_trans); |
d43317dc | 332 | goto loop; |
84961539 | 333 | } else if (BTRFS_FS_ERROR(fs_info)) { |
e4b50e14 | 334 | spin_unlock(&fs_info->trans_lock); |
5a9ba670 | 335 | btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); |
e1489b4f | 336 | btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); |
4b5faeac | 337 | kfree(cur_trans); |
7b8b92af | 338 | return -EROFS; |
79154b1b | 339 | } |
d43317dc | 340 | |
ab8d0fc4 | 341 | cur_trans->fs_info = fs_info; |
48778179 FM |
342 | atomic_set(&cur_trans->pending_ordered, 0); |
343 | init_waitqueue_head(&cur_trans->pending_wait); | |
a4abeea4 | 344 | atomic_set(&cur_trans->num_writers, 1); |
0860adfd | 345 | extwriter_counter_init(cur_trans, type); |
a4abeea4 JB |
346 | init_waitqueue_head(&cur_trans->writer_wait); |
347 | init_waitqueue_head(&cur_trans->commit_wait); | |
4a9d8bde | 348 | cur_trans->state = TRANS_STATE_RUNNING; |
a4abeea4 JB |
349 | /* |
350 | * One for this trans handle, one so it will live on until we | |
351 | * commit the transaction. | |
352 | */ | |
9b64f57d | 353 | refcount_set(&cur_trans->use_count, 2); |
3204d33c | 354 | cur_trans->flags = 0; |
afd48513 | 355 | cur_trans->start_time = ktime_get_seconds(); |
a4abeea4 | 356 | |
a099d0fd AM |
357 | memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); |
358 | ||
928ed134 | 359 | xa_init(&cur_trans->delayed_refs.head_refs); |
3cce39a8 | 360 | xa_init(&cur_trans->delayed_refs.dirty_extents); |
20b297d6 JS |
361 | |
362 | /* | |
363 | * although the tree mod log is per file system and not per transaction, | |
364 | * the log must never go across transaction boundaries. | |
365 | */ | |
366 | smp_mb(); | |
31b1a2bd | 367 | if (!list_empty(&fs_info->tree_mod_seq_list)) |
5d163e0e | 368 | WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n"); |
31b1a2bd | 369 | if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) |
5d163e0e | 370 | WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n"); |
fc36ed7e | 371 | atomic64_set(&fs_info->tree_mod_seq, 0); |
20b297d6 | 372 | |
a4abeea4 JB |
373 | spin_lock_init(&cur_trans->delayed_refs.lock); |
374 | ||
375 | INIT_LIST_HEAD(&cur_trans->pending_snapshots); | |
bbbf7243 | 376 | INIT_LIST_HEAD(&cur_trans->dev_update_list); |
9e351cc8 | 377 | INIT_LIST_HEAD(&cur_trans->switch_commits); |
ce93ec54 | 378 | INIT_LIST_HEAD(&cur_trans->dirty_bgs); |
1bbc621e | 379 | INIT_LIST_HEAD(&cur_trans->io_bgs); |
2b9dbef2 | 380 | INIT_LIST_HEAD(&cur_trans->dropped_roots); |
1bbc621e | 381 | mutex_init(&cur_trans->cache_write_mutex); |
ce93ec54 | 382 | spin_lock_init(&cur_trans->dirty_bgs_lock); |
e33e17ee | 383 | INIT_LIST_HEAD(&cur_trans->deleted_bgs); |
2b9dbef2 | 384 | spin_lock_init(&cur_trans->dropped_roots_lock); |
19ae4e81 | 385 | list_add_tail(&cur_trans->list, &fs_info->trans_list); |
e965835c FM |
386 | btrfs_extent_io_tree_init(fs_info, &cur_trans->dirty_pages, |
387 | IO_TREE_TRANS_DIRTY_PAGES); | |
388 | btrfs_extent_io_tree_init(fs_info, &cur_trans->pinned_extents, | |
389 | IO_TREE_FS_PINNED_EXTENTS); | |
4a4f8fe2 | 390 | btrfs_set_fs_generation(fs_info, fs_info->generation + 1); |
19ae4e81 JS |
391 | cur_trans->transid = fs_info->generation; |
392 | fs_info->running_transaction = cur_trans; | |
49b25e05 | 393 | cur_trans->aborted = 0; |
19ae4e81 | 394 | spin_unlock(&fs_info->trans_lock); |
15ee9bc7 | 395 | |
79154b1b CM |
396 | return 0; |
397 | } | |
398 | ||
d352ac68 | 399 | /* |
92a7cc42 QW |
400 | * This does all the record keeping required to make sure that a shareable root |
401 | * is properly recorded in a given transaction. This is required to make sure | |
402 | * the old root from before we joined the transaction is deleted when the | |
403 | * transaction commits. | |
d352ac68 | 404 | */ |
7585717f | 405 | static int record_root_in_trans(struct btrfs_trans_handle *trans, |
6426c7ad QW |
406 | struct btrfs_root *root, |
407 | int force) | |
6702ed49 | 408 | { |
0b246afa | 409 | struct btrfs_fs_info *fs_info = root->fs_info; |
03a7e111 | 410 | int ret = 0; |
0b246afa | 411 | |
92a7cc42 | 412 | if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && |
ca84529a | 413 | btrfs_get_root_last_trans(root) < trans->transid) || force) { |
4d31778a | 414 | WARN_ON(!force && root->commit_root != root->node); |
5d4f98a2 | 415 | |
7585717f | 416 | /* |
27cdeb70 | 417 | * see below for IN_TRANS_SETUP usage rules |
7585717f CM |
418 | * we have the reloc mutex held now, so there |
419 | * is only one writer in this function | |
420 | */ | |
27cdeb70 | 421 | set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); |
7585717f | 422 | |
27cdeb70 | 423 | /* make sure readers find IN_TRANS_SETUP before |
7585717f CM |
424 | * they find our root->last_trans update |
425 | */ | |
426 | smp_wmb(); | |
427 | ||
fc7cbcd4 | 428 | spin_lock(&fs_info->fs_roots_radix_lock); |
ca84529a | 429 | if (btrfs_get_root_last_trans(root) == trans->transid && !force) { |
fc7cbcd4 | 430 | spin_unlock(&fs_info->fs_roots_radix_lock); |
a4abeea4 JB |
431 | return 0; |
432 | } | |
fc7cbcd4 | 433 | radix_tree_tag_set(&fs_info->fs_roots_radix, |
e094f480 | 434 | (unsigned long)btrfs_root_id(root), |
fc7cbcd4 DS |
435 | BTRFS_ROOT_TRANS_TAG); |
436 | spin_unlock(&fs_info->fs_roots_radix_lock); | |
ca84529a | 437 | btrfs_set_root_last_trans(root, trans->transid); |
7585717f CM |
438 | |
439 | /* this is pretty tricky. We don't want to | |
440 | * take the relocation lock in btrfs_record_root_in_trans | |
441 | * unless we're really doing the first setup for this root in | |
442 | * this transaction. | |
443 | * | |
444 | * Normally we'd use root->last_trans as a flag to decide | |
445 | * if we want to take the expensive mutex. | |
446 | * | |
447 | * But, we have to set root->last_trans before we | |
448 | * init the relocation root, otherwise, we trip over warnings | |
449 | * in ctree.c. The solution used here is to flag ourselves | |
27cdeb70 | 450 | * with root IN_TRANS_SETUP. When this is 1, we're still |
7585717f CM |
451 | * fixing up the reloc trees and everyone must wait. |
452 | * | |
453 | * When this is zero, they can trust root->last_trans and fly | |
454 | * through btrfs_record_root_in_trans without having to take the | |
455 | * lock. smp_wmb() makes sure that all the writes above are | |
456 | * done before we pop in the zero below | |
457 | */ | |
03a7e111 | 458 | ret = btrfs_init_reloc_root(trans, root); |
c7548af6 | 459 | smp_mb__before_atomic(); |
27cdeb70 | 460 | clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); |
5d4f98a2 | 461 | } |
03a7e111 | 462 | return ret; |
5d4f98a2 | 463 | } |
bcc63abb | 464 | |
7585717f | 465 | |
2b9dbef2 JB |
466 | void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, |
467 | struct btrfs_root *root) | |
468 | { | |
0b246afa | 469 | struct btrfs_fs_info *fs_info = root->fs_info; |
2b9dbef2 JB |
470 | struct btrfs_transaction *cur_trans = trans->transaction; |
471 | ||
472 | /* Add ourselves to the transaction dropped list */ | |
473 | spin_lock(&cur_trans->dropped_roots_lock); | |
474 | list_add_tail(&root->root_list, &cur_trans->dropped_roots); | |
475 | spin_unlock(&cur_trans->dropped_roots_lock); | |
476 | ||
477 | /* Make sure we don't try to update the root at commit time */ | |
fc7cbcd4 DS |
478 | spin_lock(&fs_info->fs_roots_radix_lock); |
479 | radix_tree_tag_clear(&fs_info->fs_roots_radix, | |
e094f480 | 480 | (unsigned long)btrfs_root_id(root), |
fc7cbcd4 DS |
481 | BTRFS_ROOT_TRANS_TAG); |
482 | spin_unlock(&fs_info->fs_roots_radix_lock); | |
2b9dbef2 JB |
483 | } |
484 | ||
7585717f CM |
485 | int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, |
486 | struct btrfs_root *root) | |
487 | { | |
0b246afa | 488 | struct btrfs_fs_info *fs_info = root->fs_info; |
1409e6cc | 489 | int ret; |
0b246afa | 490 | |
92a7cc42 | 491 | if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) |
7585717f CM |
492 | return 0; |
493 | ||
494 | /* | |
27cdeb70 | 495 | * see record_root_in_trans for comments about IN_TRANS_SETUP usage |
7585717f CM |
496 | * and barriers |
497 | */ | |
498 | smp_rmb(); | |
ca84529a | 499 | if (btrfs_get_root_last_trans(root) == trans->transid && |
27cdeb70 | 500 | !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) |
7585717f CM |
501 | return 0; |
502 | ||
0b246afa | 503 | mutex_lock(&fs_info->reloc_mutex); |
1409e6cc | 504 | ret = record_root_in_trans(trans, root, 0); |
0b246afa | 505 | mutex_unlock(&fs_info->reloc_mutex); |
7585717f | 506 | |
1409e6cc | 507 | return ret; |
7585717f CM |
508 | } |
509 | ||
4a9d8bde MX |
510 | static inline int is_transaction_blocked(struct btrfs_transaction *trans) |
511 | { | |
3296bf56 | 512 | return (trans->state >= TRANS_STATE_COMMIT_START && |
501407aa | 513 | trans->state < TRANS_STATE_UNBLOCKED && |
bf31f87f | 514 | !TRANS_ABORTED(trans)); |
4a9d8bde MX |
515 | } |
516 | ||
d352ac68 CM |
517 | /* wait for commit against the current transaction to become unblocked |
518 | * when this is done, it is safe to start a new transaction, but the current | |
519 | * transaction might not be fully on disk. | |
520 | */ | |
2ff7e61e | 521 | static void wait_current_trans(struct btrfs_fs_info *fs_info) |
79154b1b | 522 | { |
f9295749 | 523 | struct btrfs_transaction *cur_trans; |
79154b1b | 524 | |
0b246afa JM |
525 | spin_lock(&fs_info->trans_lock); |
526 | cur_trans = fs_info->running_transaction; | |
4a9d8bde | 527 | if (cur_trans && is_transaction_blocked(cur_trans)) { |
9b64f57d | 528 | refcount_inc(&cur_trans->use_count); |
0b246afa | 529 | spin_unlock(&fs_info->trans_lock); |
72d63ed6 | 530 | |
3e738c53 | 531 | btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); |
0b246afa | 532 | wait_event(fs_info->transaction_wait, |
501407aa | 533 | cur_trans->state >= TRANS_STATE_UNBLOCKED || |
bf31f87f | 534 | TRANS_ABORTED(cur_trans)); |
724e2315 | 535 | btrfs_put_transaction(cur_trans); |
a4abeea4 | 536 | } else { |
0b246afa | 537 | spin_unlock(&fs_info->trans_lock); |
f9295749 | 538 | } |
37d1aeee CM |
539 | } |
540 | ||
f963e012 | 541 | static bool may_wait_transaction(struct btrfs_fs_info *fs_info, int type) |
a22285a6 | 542 | { |
0b246afa | 543 | if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) |
f963e012 | 544 | return false; |
a4abeea4 | 545 | |
92e2f7e3 | 546 | if (type == TRANS_START) |
f963e012 | 547 | return true; |
a4abeea4 | 548 | |
f963e012 | 549 | return false; |
a22285a6 YZ |
550 | } |
551 | ||
20dd2cbf MX |
552 | static inline bool need_reserve_reloc_root(struct btrfs_root *root) |
553 | { | |
0b246afa JM |
554 | struct btrfs_fs_info *fs_info = root->fs_info; |
555 | ||
556 | if (!fs_info->reloc_ctl || | |
92a7cc42 | 557 | !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || |
e094f480 | 558 | btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID || |
20dd2cbf MX |
559 | root->reloc_root) |
560 | return false; | |
561 | ||
562 | return true; | |
563 | } | |
564 | ||
28270e25 FM |
565 | static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info, |
566 | enum btrfs_reserve_flush_enum flush, | |
567 | u64 num_bytes, | |
568 | u64 *delayed_refs_bytes) | |
569 | { | |
28270e25 | 570 | struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info; |
2f6397e4 | 571 | u64 bytes = num_bytes + *delayed_refs_bytes; |
28270e25 FM |
572 | int ret; |
573 | ||
28270e25 FM |
574 | /* |
575 | * We want to reserve all the bytes we may need all at once, so we only | |
576 | * do 1 enospc flushing cycle per transaction start. | |
577 | */ | |
578 | ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush); | |
28270e25 FM |
579 | |
580 | /* | |
581 | * If we are an emergency flush, which can steal from the global block | |
582 | * reserve, then attempt to not reserve space for the delayed refs, as | |
583 | * we will consume space for them from the global block reserve. | |
584 | */ | |
2f6397e4 | 585 | if (ret && flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { |
28270e25 FM |
586 | bytes -= *delayed_refs_bytes; |
587 | *delayed_refs_bytes = 0; | |
588 | ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush); | |
589 | } | |
590 | ||
591 | return ret; | |
592 | } | |
593 | ||
08e007d2 | 594 | static struct btrfs_trans_handle * |
5aed1dd8 | 595 | start_transaction(struct btrfs_root *root, unsigned int num_items, |
003d7c59 JM |
596 | unsigned int type, enum btrfs_reserve_flush_enum flush, |
597 | bool enforce_qgroups) | |
37d1aeee | 598 | { |
0b246afa | 599 | struct btrfs_fs_info *fs_info = root->fs_info; |
ba2c4d4e | 600 | struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; |
28270e25 | 601 | struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; |
a22285a6 YZ |
602 | struct btrfs_trans_handle *h; |
603 | struct btrfs_transaction *cur_trans; | |
b5009945 | 604 | u64 num_bytes = 0; |
c5567237 | 605 | u64 qgroup_reserved = 0; |
28270e25 | 606 | u64 delayed_refs_bytes = 0; |
20dd2cbf | 607 | bool reloc_reserved = false; |
9c343784 | 608 | bool do_chunk_alloc = false; |
20dd2cbf | 609 | int ret; |
acce952b | 610 | |
84961539 | 611 | if (BTRFS_FS_ERROR(fs_info)) |
acce952b | 612 | return ERR_PTR(-EROFS); |
2a1eb461 | 613 | |
46c4e71e | 614 | if (current->journal_info) { |
0860adfd | 615 | WARN_ON(type & TRANS_EXTWRITERS); |
2a1eb461 | 616 | h = current->journal_info; |
b50fff81 DS |
617 | refcount_inc(&h->use_count); |
618 | WARN_ON(refcount_read(&h->use_count) > 2); | |
2a1eb461 JB |
619 | h->orig_rsv = h->block_rsv; |
620 | h->block_rsv = NULL; | |
621 | goto got_it; | |
622 | } | |
b5009945 JB |
623 | |
624 | /* | |
625 | * Do the reservation before we join the transaction so we can do all | |
626 | * the appropriate flushing if need be. | |
627 | */ | |
003d7c59 | 628 | if (num_items && root != fs_info->chunk_root) { |
0b246afa | 629 | qgroup_reserved = num_items * fs_info->nodesize; |
a6496849 BB |
630 | /* |
631 | * Use prealloc for now, as there might be a currently running | |
632 | * transaction that could free this reserved space prematurely | |
633 | * by committing. | |
634 | */ | |
635 | ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved, | |
636 | enforce_qgroups, false); | |
7174109c QW |
637 | if (ret) |
638 | return ERR_PTR(ret); | |
c5567237 | 639 | |
28270e25 | 640 | num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); |
ba2c4d4e | 641 | /* |
28270e25 FM |
642 | * If we plan to insert/update/delete "num_items" from a btree, |
643 | * we will also generate delayed refs for extent buffers in the | |
644 | * respective btree paths, so reserve space for the delayed refs | |
645 | * that will be generated by the caller as it modifies btrees. | |
646 | * Try to reserve them to avoid excessive use of the global | |
647 | * block reserve. | |
ba2c4d4e | 648 | */ |
28270e25 | 649 | delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, num_items); |
ba2c4d4e | 650 | |
20dd2cbf MX |
651 | /* |
652 | * Do the reservation for the relocation root creation | |
653 | */ | |
ee39b432 | 654 | if (need_reserve_reloc_root(root)) { |
0b246afa | 655 | num_bytes += fs_info->nodesize; |
20dd2cbf MX |
656 | reloc_reserved = true; |
657 | } | |
658 | ||
28270e25 FM |
659 | ret = btrfs_reserve_trans_metadata(fs_info, flush, num_bytes, |
660 | &delayed_refs_bytes); | |
ba2c4d4e JB |
661 | if (ret) |
662 | goto reserve_fail; | |
9c343784 | 663 | |
28270e25 FM |
664 | btrfs_block_rsv_add_bytes(trans_rsv, num_bytes, true); |
665 | ||
666 | if (trans_rsv->space_info->force_alloc) | |
9c343784 | 667 | do_chunk_alloc = true; |
ba2c4d4e | 668 | } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && |
748f553c | 669 | !btrfs_block_rsv_full(delayed_refs_rsv)) { |
ba2c4d4e JB |
670 | /* |
671 | * Some people call with btrfs_start_transaction(root, 0) | |
672 | * because they can be throttled, but have some other mechanism | |
673 | * for reserving space. We still want these guys to refill the | |
674 | * delayed block_rsv so just add 1 items worth of reservation | |
675 | * here. | |
676 | */ | |
677 | ret = btrfs_delayed_refs_rsv_refill(fs_info, flush); | |
b5009945 | 678 | if (ret) |
843fcf35 | 679 | goto reserve_fail; |
b5009945 | 680 | } |
a22285a6 | 681 | again: |
f2f767e7 | 682 | h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS); |
843fcf35 MX |
683 | if (!h) { |
684 | ret = -ENOMEM; | |
685 | goto alloc_fail; | |
686 | } | |
37d1aeee | 687 | |
98114659 JB |
688 | /* |
689 | * If we are JOIN_NOLOCK we're already committing a transaction and | |
690 | * waiting on this guy, so we don't need to do the sb_start_intwrite | |
691 | * because we're already holding a ref. We need this because we could | |
692 | * have raced in and did an fsync() on a file which can kick a commit | |
693 | * and then we deadlock with somebody doing a freeze. | |
354aa0fb MX |
694 | * |
695 | * If we are ATTACH, it means we just want to catch the current | |
696 | * transaction and commit it, so we needn't do sb_start_intwrite(). | |
98114659 | 697 | */ |
0860adfd | 698 | if (type & __TRANS_FREEZABLE) |
0b246afa | 699 | sb_start_intwrite(fs_info->sb); |
b2b5ef5c | 700 | |
2ff7e61e JM |
701 | if (may_wait_transaction(fs_info, type)) |
702 | wait_current_trans(fs_info); | |
a22285a6 | 703 | |
a4abeea4 | 704 | do { |
2ff7e61e | 705 | ret = join_transaction(fs_info, type); |
178260b2 | 706 | if (ret == -EBUSY) { |
2ff7e61e | 707 | wait_current_trans(fs_info); |
a6d155d2 FM |
708 | if (unlikely(type == TRANS_ATTACH || |
709 | type == TRANS_JOIN_NOSTART)) | |
178260b2 MX |
710 | ret = -ENOENT; |
711 | } | |
a4abeea4 JB |
712 | } while (ret == -EBUSY); |
713 | ||
a43f7f82 | 714 | if (ret < 0) |
843fcf35 | 715 | goto join_fail; |
0f7d52f4 | 716 | |
0b246afa | 717 | cur_trans = fs_info->running_transaction; |
a22285a6 YZ |
718 | |
719 | h->transid = cur_trans->transid; | |
720 | h->transaction = cur_trans; | |
b50fff81 | 721 | refcount_set(&h->use_count, 1); |
64b63580 | 722 | h->fs_info = root->fs_info; |
7174109c | 723 | |
a698d075 | 724 | h->type = type; |
ea658bad | 725 | INIT_LIST_HEAD(&h->new_bgs); |
28270e25 | 726 | btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELOPS); |
b7ec40d7 | 727 | |
a22285a6 | 728 | smp_mb(); |
3296bf56 | 729 | if (cur_trans->state >= TRANS_STATE_COMMIT_START && |
2ff7e61e | 730 | may_wait_transaction(fs_info, type)) { |
abdd2e80 | 731 | current->journal_info = h; |
3a45bb20 | 732 | btrfs_commit_transaction(h); |
a22285a6 YZ |
733 | goto again; |
734 | } | |
735 | ||
b5009945 | 736 | if (num_bytes) { |
0b246afa | 737 | trace_btrfs_space_reservation(fs_info, "transaction", |
2bcc0328 | 738 | h->transid, num_bytes, 1); |
28270e25 | 739 | h->block_rsv = trans_rsv; |
b5009945 | 740 | h->bytes_reserved = num_bytes; |
28270e25 FM |
741 | if (delayed_refs_bytes > 0) { |
742 | trace_btrfs_space_reservation(fs_info, | |
743 | "local_delayed_refs_rsv", | |
744 | h->transid, | |
745 | delayed_refs_bytes, 1); | |
746 | h->delayed_refs_bytes_reserved = delayed_refs_bytes; | |
747 | btrfs_block_rsv_add_bytes(&h->delayed_rsv, delayed_refs_bytes, true); | |
748 | delayed_refs_bytes = 0; | |
749 | } | |
20dd2cbf | 750 | h->reloc_reserved = reloc_reserved; |
a22285a6 | 751 | } |
9ed74f2d | 752 | |
2a1eb461 | 753 | got_it: |
bcf3a3e7 | 754 | if (!current->journal_info) |
a22285a6 | 755 | current->journal_info = h; |
fcc99734 | 756 | |
9c343784 JB |
757 | /* |
758 | * If the space_info is marked ALLOC_FORCE then we'll get upgraded to | |
759 | * ALLOC_FORCE the first run through, and then we won't allocate for | |
760 | * anybody else who races in later. We don't care about the return | |
761 | * value here. | |
762 | */ | |
763 | if (do_chunk_alloc && num_bytes) { | |
098a442d NA |
764 | struct btrfs_space_info *space_info = h->block_rsv->space_info; |
765 | u64 flags = space_info->flags; | |
9c343784 | 766 | |
098a442d | 767 | btrfs_chunk_alloc(h, space_info, btrfs_get_alloc_profile(fs_info, flags), |
9c343784 JB |
768 | CHUNK_ALLOC_NO_FORCE); |
769 | } | |
770 | ||
fcc99734 QW |
771 | /* |
772 | * btrfs_record_root_in_trans() needs to alloc new extents, and may | |
773 | * call btrfs_join_transaction() while we're also starting a | |
774 | * transaction. | |
775 | * | |
776 | * Thus it need to be called after current->journal_info initialized, | |
777 | * or we can deadlock. | |
778 | */ | |
68075ea8 JB |
779 | ret = btrfs_record_root_in_trans(h, root); |
780 | if (ret) { | |
781 | /* | |
782 | * The transaction handle is fully initialized and linked with | |
783 | * other structures so it needs to be ended in case of errors, | |
784 | * not just freed. | |
785 | */ | |
786 | btrfs_end_transaction(h); | |
211de933 | 787 | goto reserve_fail; |
68075ea8 | 788 | } |
211de933 BB |
789 | /* |
790 | * Now that we have found a transaction to be a part of, convert the | |
791 | * qgroup reservation from prealloc to pertrans. A different transaction | |
792 | * can't race in and free our pertrans out from under us. | |
793 | */ | |
794 | if (qgroup_reserved) | |
795 | btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); | |
fcc99734 | 796 | |
79154b1b | 797 | return h; |
843fcf35 MX |
798 | |
799 | join_fail: | |
0860adfd | 800 | if (type & __TRANS_FREEZABLE) |
0b246afa | 801 | sb_end_intwrite(fs_info->sb); |
843fcf35 MX |
802 | kmem_cache_free(btrfs_trans_handle_cachep, h); |
803 | alloc_fail: | |
804 | if (num_bytes) | |
28270e25 FM |
805 | btrfs_block_rsv_release(fs_info, trans_rsv, num_bytes, NULL); |
806 | if (delayed_refs_bytes) | |
7de9ca1f | 807 | btrfs_space_info_free_bytes_may_use(trans_rsv->space_info, delayed_refs_bytes); |
843fcf35 | 808 | reserve_fail: |
a6496849 | 809 | btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); |
843fcf35 | 810 | return ERR_PTR(ret); |
79154b1b CM |
811 | } |
812 | ||
f9295749 | 813 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, |
5aed1dd8 | 814 | unsigned int num_items) |
f9295749 | 815 | { |
08e007d2 | 816 | return start_transaction(root, num_items, TRANS_START, |
003d7c59 | 817 | BTRFS_RESERVE_FLUSH_ALL, true); |
f9295749 | 818 | } |
003d7c59 | 819 | |
8eab77ff FM |
820 | struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( |
821 | struct btrfs_root *root, | |
7f9fe614 | 822 | unsigned int num_items) |
8eab77ff | 823 | { |
7f9fe614 JB |
824 | return start_transaction(root, num_items, TRANS_START, |
825 | BTRFS_RESERVE_FLUSH_ALL_STEAL, false); | |
8eab77ff | 826 | } |
8407aa46 | 827 | |
7a7eaa40 | 828 | struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) |
f9295749 | 829 | { |
003d7c59 JM |
830 | return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH, |
831 | true); | |
f9295749 CM |
832 | } |
833 | ||
8d510121 | 834 | struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root) |
0af3d00b | 835 | { |
575a75d6 | 836 | return start_transaction(root, 0, TRANS_JOIN_NOLOCK, |
003d7c59 | 837 | BTRFS_RESERVE_NO_FLUSH, true); |
0af3d00b JB |
838 | } |
839 | ||
a6d155d2 FM |
840 | /* |
841 | * Similar to regular join but it never starts a transaction when none is | |
19288951 FM |
842 | * running or when there's a running one at a state >= TRANS_STATE_UNBLOCKED. |
843 | * This is similar to btrfs_attach_transaction() but it allows the join to | |
844 | * happen if the transaction commit already started but it's not yet in the | |
845 | * "doing" phase (the state is < TRANS_STATE_COMMIT_DOING). | |
a6d155d2 FM |
846 | */ |
847 | struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root) | |
848 | { | |
849 | return start_transaction(root, 0, TRANS_JOIN_NOSTART, | |
850 | BTRFS_RESERVE_NO_FLUSH, true); | |
851 | } | |
852 | ||
d4edf39b | 853 | /* |
9580503b | 854 | * Catch the running transaction. |
d4edf39b MX |
855 | * |
856 | * It is used when we want to commit the current the transaction, but | |
857 | * don't want to start a new one. | |
858 | * | |
859 | * Note: If this function return -ENOENT, it just means there is no | |
860 | * running transaction. But it is possible that the inactive transaction | |
861 | * is still in the memory, not fully on disk. If you hope there is no | |
862 | * inactive transaction in the fs when -ENOENT is returned, you should | |
863 | * invoke | |
864 | * btrfs_attach_transaction_barrier() | |
865 | */ | |
354aa0fb | 866 | struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) |
60376ce4 | 867 | { |
575a75d6 | 868 | return start_transaction(root, 0, TRANS_ATTACH, |
003d7c59 | 869 | BTRFS_RESERVE_NO_FLUSH, true); |
60376ce4 JB |
870 | } |
871 | ||
d4edf39b | 872 | /* |
9580503b | 873 | * Catch the running transaction. |
d4edf39b | 874 | * |
52042d8e | 875 | * It is similar to the above function, the difference is this one |
d4edf39b MX |
876 | * will wait for all the inactive transactions until they fully |
877 | * complete. | |
878 | */ | |
879 | struct btrfs_trans_handle * | |
880 | btrfs_attach_transaction_barrier(struct btrfs_root *root) | |
881 | { | |
882 | struct btrfs_trans_handle *trans; | |
883 | ||
575a75d6 | 884 | trans = start_transaction(root, 0, TRANS_ATTACH, |
003d7c59 | 885 | BTRFS_RESERVE_NO_FLUSH, true); |
b28ff3a7 FM |
886 | if (trans == ERR_PTR(-ENOENT)) { |
887 | int ret; | |
888 | ||
889 | ret = btrfs_wait_for_commit(root->fs_info, 0); | |
890 | if (ret) | |
891 | return ERR_PTR(ret); | |
892 | } | |
d4edf39b MX |
893 | |
894 | return trans; | |
895 | } | |
896 | ||
d0c2f4fa FM |
897 | /* Wait for a transaction commit to reach at least the given state. */ |
898 | static noinline void wait_for_commit(struct btrfs_transaction *commit, | |
899 | const enum btrfs_trans_state min_state) | |
89ce8a63 | 900 | { |
5fd76bf3 OS |
901 | struct btrfs_fs_info *fs_info = commit->fs_info; |
902 | u64 transid = commit->transid; | |
903 | bool put = false; | |
904 | ||
3e738c53 IA |
905 | /* |
906 | * At the moment this function is called with min_state either being | |
907 | * TRANS_STATE_COMPLETED or TRANS_STATE_SUPER_COMMITTED. | |
908 | */ | |
909 | if (min_state == TRANS_STATE_COMPLETED) | |
910 | btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); | |
911 | else | |
912 | btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); | |
913 | ||
5fd76bf3 OS |
914 | while (1) { |
915 | wait_event(commit->commit_wait, commit->state >= min_state); | |
916 | if (put) | |
917 | btrfs_put_transaction(commit); | |
918 | ||
919 | if (min_state < TRANS_STATE_COMPLETED) | |
920 | break; | |
921 | ||
922 | /* | |
923 | * A transaction isn't really completed until all of the | |
924 | * previous transactions are completed, but with fsync we can | |
925 | * end up with SUPER_COMMITTED transactions before a COMPLETED | |
926 | * transaction. Wait for those. | |
927 | */ | |
928 | ||
929 | spin_lock(&fs_info->trans_lock); | |
930 | commit = list_first_entry_or_null(&fs_info->trans_list, | |
931 | struct btrfs_transaction, | |
932 | list); | |
933 | if (!commit || commit->transid > transid) { | |
934 | spin_unlock(&fs_info->trans_lock); | |
935 | break; | |
936 | } | |
937 | refcount_inc(&commit->use_count); | |
938 | put = true; | |
939 | spin_unlock(&fs_info->trans_lock); | |
940 | } | |
89ce8a63 CM |
941 | } |
942 | ||
2ff7e61e | 943 | int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid) |
46204592 SW |
944 | { |
945 | struct btrfs_transaction *cur_trans = NULL, *t; | |
8cd2807f | 946 | int ret = 0; |
46204592 | 947 | |
46204592 | 948 | if (transid) { |
0124855f | 949 | if (transid <= btrfs_get_last_trans_committed(fs_info)) |
a4abeea4 | 950 | goto out; |
46204592 SW |
951 | |
952 | /* find specified transaction */ | |
0b246afa JM |
953 | spin_lock(&fs_info->trans_lock); |
954 | list_for_each_entry(t, &fs_info->trans_list, list) { | |
46204592 SW |
955 | if (t->transid == transid) { |
956 | cur_trans = t; | |
9b64f57d | 957 | refcount_inc(&cur_trans->use_count); |
8cd2807f | 958 | ret = 0; |
46204592 SW |
959 | break; |
960 | } | |
8cd2807f MX |
961 | if (t->transid > transid) { |
962 | ret = 0; | |
46204592 | 963 | break; |
8cd2807f | 964 | } |
46204592 | 965 | } |
0b246afa | 966 | spin_unlock(&fs_info->trans_lock); |
42383020 SW |
967 | |
968 | /* | |
969 | * The specified transaction doesn't exist, or we | |
970 | * raced with btrfs_commit_transaction | |
971 | */ | |
972 | if (!cur_trans) { | |
0124855f | 973 | if (transid > btrfs_get_last_trans_committed(fs_info)) |
42383020 | 974 | ret = -EINVAL; |
8cd2807f | 975 | goto out; |
42383020 | 976 | } |
46204592 SW |
977 | } else { |
978 | /* find newest transaction that is committing | committed */ | |
0b246afa JM |
979 | spin_lock(&fs_info->trans_lock); |
980 | list_for_each_entry_reverse(t, &fs_info->trans_list, | |
46204592 | 981 | list) { |
4a9d8bde MX |
982 | if (t->state >= TRANS_STATE_COMMIT_START) { |
983 | if (t->state == TRANS_STATE_COMPLETED) | |
3473f3c0 | 984 | break; |
46204592 | 985 | cur_trans = t; |
9b64f57d | 986 | refcount_inc(&cur_trans->use_count); |
46204592 SW |
987 | break; |
988 | } | |
989 | } | |
0b246afa | 990 | spin_unlock(&fs_info->trans_lock); |
46204592 | 991 | if (!cur_trans) |
a4abeea4 | 992 | goto out; /* nothing committing|committed */ |
46204592 SW |
993 | } |
994 | ||
d0c2f4fa | 995 | wait_for_commit(cur_trans, TRANS_STATE_COMPLETED); |
bf7ecbe9 | 996 | ret = cur_trans->aborted; |
724e2315 | 997 | btrfs_put_transaction(cur_trans); |
a4abeea4 | 998 | out: |
46204592 SW |
999 | return ret; |
1000 | } | |
1001 | ||
2ff7e61e | 1002 | void btrfs_throttle(struct btrfs_fs_info *fs_info) |
37d1aeee | 1003 | { |
92e2f7e3 | 1004 | wait_current_trans(fs_info); |
37d1aeee CM |
1005 | } |
1006 | ||
a2633b6a | 1007 | bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans) |
8929ecfa YZ |
1008 | { |
1009 | struct btrfs_transaction *cur_trans = trans->transaction; | |
8929ecfa | 1010 | |
3296bf56 | 1011 | if (cur_trans->state >= TRANS_STATE_COMMIT_START || |
e19eb11f | 1012 | test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags)) |
a2633b6a | 1013 | return true; |
8929ecfa | 1014 | |
04fb3285 FM |
1015 | if (btrfs_check_space_for_delayed_refs(trans->fs_info)) |
1016 | return true; | |
1017 | ||
1018 | return !!btrfs_block_rsv_check(&trans->fs_info->global_block_rsv, 50); | |
8929ecfa YZ |
1019 | } |
1020 | ||
dc60c525 NB |
1021 | static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans) |
1022 | ||
0e34693f | 1023 | { |
dc60c525 NB |
1024 | struct btrfs_fs_info *fs_info = trans->fs_info; |
1025 | ||
0e34693f NB |
1026 | if (!trans->block_rsv) { |
1027 | ASSERT(!trans->bytes_reserved); | |
28270e25 | 1028 | ASSERT(!trans->delayed_refs_bytes_reserved); |
0e34693f NB |
1029 | return; |
1030 | } | |
1031 | ||
28270e25 FM |
1032 | if (!trans->bytes_reserved) { |
1033 | ASSERT(!trans->delayed_refs_bytes_reserved); | |
0e34693f | 1034 | return; |
28270e25 | 1035 | } |
0e34693f NB |
1036 | |
1037 | ASSERT(trans->block_rsv == &fs_info->trans_block_rsv); | |
1038 | trace_btrfs_space_reservation(fs_info, "transaction", | |
1039 | trans->transid, trans->bytes_reserved, 0); | |
1040 | btrfs_block_rsv_release(fs_info, trans->block_rsv, | |
63f018be | 1041 | trans->bytes_reserved, NULL); |
0e34693f | 1042 | trans->bytes_reserved = 0; |
28270e25 FM |
1043 | |
1044 | if (!trans->delayed_refs_bytes_reserved) | |
1045 | return; | |
1046 | ||
1047 | trace_btrfs_space_reservation(fs_info, "local_delayed_refs_rsv", | |
1048 | trans->transid, | |
1049 | trans->delayed_refs_bytes_reserved, 0); | |
1050 | btrfs_block_rsv_release(fs_info, &trans->delayed_rsv, | |
1051 | trans->delayed_refs_bytes_reserved, NULL); | |
1052 | trans->delayed_refs_bytes_reserved = 0; | |
0e34693f NB |
1053 | } |
1054 | ||
89ce8a63 | 1055 | static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, |
3a45bb20 | 1056 | int throttle) |
79154b1b | 1057 | { |
3a45bb20 | 1058 | struct btrfs_fs_info *info = trans->fs_info; |
8929ecfa | 1059 | struct btrfs_transaction *cur_trans = trans->transaction; |
fdee5e55 | 1060 | int ret = 0; |
c3e69d58 | 1061 | |
b50fff81 DS |
1062 | if (refcount_read(&trans->use_count) > 1) { |
1063 | refcount_dec(&trans->use_count); | |
2a1eb461 JB |
1064 | trans->block_rsv = trans->orig_rsv; |
1065 | return 0; | |
1066 | } | |
1067 | ||
dc60c525 | 1068 | btrfs_trans_release_metadata(trans); |
4c13d758 | 1069 | trans->block_rsv = NULL; |
c5567237 | 1070 | |
119e80df | 1071 | btrfs_create_pending_block_groups(trans); |
ea658bad | 1072 | |
4fbcdf66 FM |
1073 | btrfs_trans_release_chunk_metadata(trans); |
1074 | ||
0860adfd | 1075 | if (trans->type & __TRANS_FREEZABLE) |
0b246afa | 1076 | sb_end_intwrite(info->sb); |
6df7881a | 1077 | |
8929ecfa | 1078 | WARN_ON(cur_trans != info->running_transaction); |
13c5a93e JB |
1079 | WARN_ON(atomic_read(&cur_trans->num_writers) < 1); |
1080 | atomic_dec(&cur_trans->num_writers); | |
0860adfd | 1081 | extwriter_counter_dec(cur_trans, trans->type); |
89ce8a63 | 1082 | |
093258e6 | 1083 | cond_wake_up(&cur_trans->writer_wait); |
e1489b4f | 1084 | |
5a9ba670 | 1085 | btrfs_lockdep_release(info, btrfs_trans_num_extwriters); |
e1489b4f IA |
1086 | btrfs_lockdep_release(info, btrfs_trans_num_writers); |
1087 | ||
724e2315 | 1088 | btrfs_put_transaction(cur_trans); |
9ed74f2d JB |
1089 | |
1090 | if (current->journal_info == trans) | |
1091 | current->journal_info = NULL; | |
ab78c84d | 1092 | |
24bbcf04 | 1093 | if (throttle) |
2ff7e61e | 1094 | btrfs_run_delayed_iputs(info); |
24bbcf04 | 1095 | |
84961539 | 1096 | if (TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info)) { |
4e121c06 | 1097 | wake_up_process(info->transaction_kthread); |
fbabd4a3 | 1098 | if (TRANS_ABORTED(trans)) |
fdee5e55 | 1099 | ret = trans->aborted; |
fbabd4a3 | 1100 | else |
fdee5e55 | 1101 | ret = -EROFS; |
4e121c06 | 1102 | } |
49b25e05 | 1103 | |
4edc2ca3 | 1104 | kmem_cache_free(btrfs_trans_handle_cachep, trans); |
fdee5e55 | 1105 | return ret; |
79154b1b CM |
1106 | } |
1107 | ||
3a45bb20 | 1108 | int btrfs_end_transaction(struct btrfs_trans_handle *trans) |
89ce8a63 | 1109 | { |
3a45bb20 | 1110 | return __btrfs_end_transaction(trans, 0); |
89ce8a63 CM |
1111 | } |
1112 | ||
3a45bb20 | 1113 | int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans) |
89ce8a63 | 1114 | { |
3a45bb20 | 1115 | return __btrfs_end_transaction(trans, 1); |
16cdcec7 MX |
1116 | } |
1117 | ||
d352ac68 CM |
1118 | /* |
1119 | * when btree blocks are allocated, they have some corresponding bits set for | |
1120 | * them in one of two extent_io trees. This is used to make sure all of | |
690587d1 | 1121 | * those extents are sent to disk but does not wait on them |
d352ac68 | 1122 | */ |
2ff7e61e | 1123 | int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, |
8cef4e16 | 1124 | struct extent_io_tree *dirty_pages, int mark) |
79154b1b | 1125 | { |
ce875311 | 1126 | int ret = 0; |
0b246afa | 1127 | struct address_space *mapping = fs_info->btree_inode->i_mapping; |
e6138876 | 1128 | struct extent_state *cached_state = NULL; |
777e6bd7 | 1129 | u64 start = 0; |
5f39d397 | 1130 | u64 end; |
7c4452b9 | 1131 | |
66da9c1b FM |
1132 | while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end, |
1133 | mark, &cached_state)) { | |
663dfbb0 FM |
1134 | bool wait_writeback = false; |
1135 | ||
94bd699a FM |
1136 | ret = btrfs_convert_extent_bit(dirty_pages, start, end, |
1137 | EXTENT_NEED_WAIT, | |
1138 | mark, &cached_state); | |
663dfbb0 FM |
1139 | /* |
1140 | * convert_extent_bit can return -ENOMEM, which is most of the | |
1141 | * time a temporary error. So when it happens, ignore the error | |
1142 | * and wait for writeback of this range to finish - because we | |
1143 | * failed to set the bit EXTENT_NEED_WAIT for the range, a call | |
bf89d38f JM |
1144 | * to __btrfs_wait_marked_extents() would not know that |
1145 | * writeback for this range started and therefore wouldn't | |
1146 | * wait for it to finish - we don't want to commit a | |
1147 | * superblock that points to btree nodes/leafs for which | |
1148 | * writeback hasn't finished yet (and without errors). | |
663dfbb0 | 1149 | * We cleanup any entries left in the io tree when committing |
41e7acd3 | 1150 | * the transaction (through extent_io_tree_release()). |
663dfbb0 | 1151 | */ |
ce875311 AJ |
1152 | if (ret == -ENOMEM) { |
1153 | ret = 0; | |
663dfbb0 FM |
1154 | wait_writeback = true; |
1155 | } | |
ce875311 AJ |
1156 | if (!ret) |
1157 | ret = filemap_fdatawrite_range(mapping, start, end); | |
1158 | if (!ret && wait_writeback) | |
5e121ae6 | 1159 | btrfs_btree_wait_writeback_range(fs_info, start, end); |
b351161f | 1160 | btrfs_free_extent_state(cached_state); |
ce875311 | 1161 | if (ret) |
9a7b68d3 | 1162 | break; |
663dfbb0 | 1163 | cached_state = NULL; |
1728366e JB |
1164 | cond_resched(); |
1165 | start = end + 1; | |
7c4452b9 | 1166 | } |
ce875311 | 1167 | return ret; |
690587d1 CM |
1168 | } |
1169 | ||
1170 | /* | |
1171 | * when btree blocks are allocated, they have some corresponding bits set for | |
1172 | * them in one of two extent_io trees. This is used to make sure all of | |
1173 | * those extents are on disk for transaction or log commit. We wait | |
1174 | * on all the pages and clear them from the dirty pages state tree | |
1175 | */ | |
bf89d38f JM |
1176 | static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, |
1177 | struct extent_io_tree *dirty_pages) | |
690587d1 | 1178 | { |
e6138876 | 1179 | struct extent_state *cached_state = NULL; |
690587d1 CM |
1180 | u64 start = 0; |
1181 | u64 end; | |
1e8a4237 | 1182 | int ret = 0; |
777e6bd7 | 1183 | |
66da9c1b FM |
1184 | while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end, |
1185 | EXTENT_NEED_WAIT, &cached_state)) { | |
663dfbb0 FM |
1186 | /* |
1187 | * Ignore -ENOMEM errors returned by clear_extent_bit(). | |
1188 | * When committing the transaction, we'll remove any entries | |
1189 | * left in the io tree. For a log commit, we don't remove them | |
1190 | * after committing the log because the tree can be accessed | |
1191 | * concurrently - we do it only at transaction commit time when | |
41e7acd3 | 1192 | * it's safe to do it (through extent_io_tree_release()). |
663dfbb0 | 1193 | */ |
9d222562 FM |
1194 | ret = btrfs_clear_extent_bit(dirty_pages, start, end, |
1195 | EXTENT_NEED_WAIT, &cached_state); | |
1e8a4237 AJ |
1196 | if (ret == -ENOMEM) |
1197 | ret = 0; | |
1198 | if (!ret) | |
5e121ae6 | 1199 | btrfs_btree_wait_writeback_range(fs_info, start, end); |
b351161f | 1200 | btrfs_free_extent_state(cached_state); |
1e8a4237 | 1201 | if (ret) |
9a7b68d3 | 1202 | break; |
e38e2ed7 | 1203 | cached_state = NULL; |
1728366e JB |
1204 | cond_resched(); |
1205 | start = end + 1; | |
777e6bd7 | 1206 | } |
1e8a4237 | 1207 | return ret; |
bf89d38f | 1208 | } |
656f30db | 1209 | |
b9fae2eb | 1210 | static int btrfs_wait_extents(struct btrfs_fs_info *fs_info, |
bf89d38f JM |
1211 | struct extent_io_tree *dirty_pages) |
1212 | { | |
1213 | bool errors = false; | |
1214 | int err; | |
656f30db | 1215 | |
bf89d38f JM |
1216 | err = __btrfs_wait_marked_extents(fs_info, dirty_pages); |
1217 | if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags)) | |
1218 | errors = true; | |
1219 | ||
1220 | if (errors && !err) | |
1221 | err = -EIO; | |
1222 | return err; | |
1223 | } | |
656f30db | 1224 | |
bf89d38f JM |
1225 | int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark) |
1226 | { | |
1227 | struct btrfs_fs_info *fs_info = log_root->fs_info; | |
1228 | struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages; | |
1229 | bool errors = false; | |
1230 | int err; | |
656f30db | 1231 | |
e094f480 | 1232 | ASSERT(btrfs_root_id(log_root) == BTRFS_TREE_LOG_OBJECTID); |
bf89d38f JM |
1233 | |
1234 | err = __btrfs_wait_marked_extents(fs_info, dirty_pages); | |
1235 | if ((mark & EXTENT_DIRTY) && | |
1236 | test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags)) | |
1237 | errors = true; | |
1238 | ||
1239 | if ((mark & EXTENT_NEW) && | |
1240 | test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags)) | |
1241 | errors = true; | |
1242 | ||
1243 | if (errors && !err) | |
1244 | err = -EIO; | |
1245 | return err; | |
79154b1b CM |
1246 | } |
1247 | ||
690587d1 | 1248 | /* |
c9b577c0 NB |
1249 | * When btree blocks are allocated the corresponding extents are marked dirty. |
1250 | * This function ensures such extents are persisted on disk for transaction or | |
1251 | * log commit. | |
1252 | * | |
1253 | * @trans: transaction whose dirty pages we'd like to write | |
690587d1 | 1254 | */ |
70458a58 | 1255 | static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans) |
690587d1 CM |
1256 | { |
1257 | int ret; | |
1258 | int ret2; | |
c9b577c0 | 1259 | struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages; |
70458a58 | 1260 | struct btrfs_fs_info *fs_info = trans->fs_info; |
c6adc9cc | 1261 | struct blk_plug plug; |
690587d1 | 1262 | |
c6adc9cc | 1263 | blk_start_plug(&plug); |
c9b577c0 | 1264 | ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY); |
c6adc9cc | 1265 | blk_finish_plug(&plug); |
bf89d38f | 1266 | ret2 = btrfs_wait_extents(fs_info, dirty_pages); |
bf0da8c1 | 1267 | |
e965835c | 1268 | btrfs_extent_io_tree_release(&trans->transaction->dirty_pages); |
c9b577c0 | 1269 | |
bf0da8c1 CM |
1270 | if (ret) |
1271 | return ret; | |
c9b577c0 | 1272 | else if (ret2) |
bf0da8c1 | 1273 | return ret2; |
c9b577c0 NB |
1274 | else |
1275 | return 0; | |
d0c803c4 CM |
1276 | } |
1277 | ||
d352ac68 CM |
1278 | /* |
1279 | * this is used to update the root pointer in the tree of tree roots. | |
1280 | * | |
1281 | * But, in the case of the extent allocation tree, updating the root | |
1282 | * pointer may allocate blocks which may change the root of the extent | |
1283 | * allocation tree. | |
1284 | * | |
1285 | * So, this loops and repeats and makes sure the cowonly root didn't | |
1286 | * change while the root pointer was being updated in the metadata. | |
1287 | */ | |
0b86a832 CM |
1288 | static int update_cowonly_root(struct btrfs_trans_handle *trans, |
1289 | struct btrfs_root *root) | |
79154b1b CM |
1290 | { |
1291 | int ret; | |
0b86a832 | 1292 | u64 old_root_bytenr; |
86b9f2ec | 1293 | u64 old_root_used; |
0b246afa JM |
1294 | struct btrfs_fs_info *fs_info = root->fs_info; |
1295 | struct btrfs_root *tree_root = fs_info->tree_root; | |
79154b1b | 1296 | |
86b9f2ec | 1297 | old_root_used = btrfs_root_used(&root->root_item); |
56bec294 | 1298 | |
d397712b | 1299 | while (1) { |
0b86a832 | 1300 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); |
86b9f2ec | 1301 | if (old_root_bytenr == root->node->start && |
ea526d18 | 1302 | old_root_used == btrfs_root_used(&root->root_item)) |
79154b1b | 1303 | break; |
87ef2bb4 | 1304 | |
5d4f98a2 | 1305 | btrfs_set_root_node(&root->root_item, root->node); |
79154b1b | 1306 | ret = btrfs_update_root(trans, tree_root, |
0b86a832 CM |
1307 | &root->root_key, |
1308 | &root->root_item); | |
49b25e05 JM |
1309 | if (ret) |
1310 | return ret; | |
56bec294 | 1311 | |
86b9f2ec | 1312 | old_root_used = btrfs_root_used(&root->root_item); |
0b86a832 | 1313 | } |
276e680d | 1314 | |
0b86a832 CM |
1315 | return 0; |
1316 | } | |
1317 | ||
d352ac68 CM |
1318 | /* |
1319 | * update all the cowonly tree roots on disk | |
49b25e05 JM |
1320 | * |
1321 | * The error handling in this function may not be obvious. Any of the | |
1322 | * failures will cause the file system to go offline. We still need | |
1323 | * to clean up the delayed refs. | |
d352ac68 | 1324 | */ |
9386d8bc | 1325 | static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans) |
0b86a832 | 1326 | { |
9386d8bc | 1327 | struct btrfs_fs_info *fs_info = trans->fs_info; |
ea526d18 | 1328 | struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; |
1bbc621e | 1329 | struct list_head *io_bgs = &trans->transaction->io_bgs; |
84234f3a | 1330 | struct extent_buffer *eb; |
56bec294 | 1331 | int ret; |
84234f3a | 1332 | |
dfba78dc FM |
1333 | /* |
1334 | * At this point no one can be using this transaction to modify any tree | |
1335 | * and no one can start another transaction to modify any tree either. | |
1336 | */ | |
1337 | ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); | |
1338 | ||
84234f3a | 1339 | eb = btrfs_lock_root_node(fs_info->tree_root); |
49b25e05 | 1340 | ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, |
9631e4cc | 1341 | 0, &eb, BTRFS_NESTING_COW); |
84234f3a YZ |
1342 | btrfs_tree_unlock(eb); |
1343 | free_extent_buffer(eb); | |
0b86a832 | 1344 | |
49b25e05 JM |
1345 | if (ret) |
1346 | return ret; | |
87ef2bb4 | 1347 | |
196c9d8d | 1348 | ret = btrfs_run_dev_stats(trans); |
c16ce190 JB |
1349 | if (ret) |
1350 | return ret; | |
2b584c68 | 1351 | ret = btrfs_run_dev_replace(trans); |
c16ce190 JB |
1352 | if (ret) |
1353 | return ret; | |
280f8bd2 | 1354 | ret = btrfs_run_qgroups(trans); |
c16ce190 JB |
1355 | if (ret) |
1356 | return ret; | |
546adb0d | 1357 | |
bbebb3e0 | 1358 | ret = btrfs_setup_space_cache(trans); |
dcdf7f6d JB |
1359 | if (ret) |
1360 | return ret; | |
1361 | ||
ea526d18 | 1362 | again: |
d397712b | 1363 | while (!list_empty(&fs_info->dirty_cowonly_roots)) { |
2ff7e61e | 1364 | struct btrfs_root *root; |
d26d16a4 FM |
1365 | |
1366 | root = list_first_entry(&fs_info->dirty_cowonly_roots, | |
1367 | struct btrfs_root, dirty_list); | |
e7070be1 | 1368 | clear_bit(BTRFS_ROOT_DIRTY, &root->state); |
d26d16a4 FM |
1369 | list_move_tail(&root->dirty_list, |
1370 | &trans->transaction->switch_commits); | |
87ef2bb4 | 1371 | |
49b25e05 JM |
1372 | ret = update_cowonly_root(trans, root); |
1373 | if (ret) | |
1374 | return ret; | |
79154b1b | 1375 | } |
276e680d | 1376 | |
488bc2a2 | 1377 | /* Now flush any delayed refs generated by updating all of the roots */ |
8a526c44 | 1378 | ret = btrfs_run_delayed_refs(trans, U64_MAX); |
488bc2a2 JB |
1379 | if (ret) |
1380 | return ret; | |
1381 | ||
1bbc621e | 1382 | while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) { |
5742d15f | 1383 | ret = btrfs_write_dirty_block_groups(trans); |
ea526d18 JB |
1384 | if (ret) |
1385 | return ret; | |
488bc2a2 JB |
1386 | |
1387 | /* | |
1388 | * We're writing the dirty block groups, which could generate | |
1389 | * delayed refs, which could generate more dirty block groups, | |
1390 | * so we want to keep this flushing in this loop to make sure | |
1391 | * everything gets run. | |
1392 | */ | |
8a526c44 | 1393 | ret = btrfs_run_delayed_refs(trans, U64_MAX); |
ea526d18 JB |
1394 | if (ret) |
1395 | return ret; | |
1396 | } | |
1397 | ||
1398 | if (!list_empty(&fs_info->dirty_cowonly_roots)) | |
1399 | goto again; | |
1400 | ||
9f6cbcbb DS |
1401 | /* Update dev-replace pointer once everything is committed */ |
1402 | fs_info->dev_replace.committed_cursor_left = | |
1403 | fs_info->dev_replace.cursor_left_last_write_of_item; | |
8dabb742 | 1404 | |
79154b1b CM |
1405 | return 0; |
1406 | } | |
1407 | ||
b4be6aef JB |
1408 | /* |
1409 | * If we had a pending drop we need to see if there are any others left in our | |
1410 | * dead roots list, and if not clear our bit and wake any waiters. | |
1411 | */ | |
1412 | void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info) | |
1413 | { | |
1414 | /* | |
1415 | * We put the drop in progress roots at the front of the list, so if the | |
1416 | * first entry doesn't have UNFINISHED_DROP set we can wake everybody | |
1417 | * up. | |
1418 | */ | |
1419 | spin_lock(&fs_info->trans_lock); | |
1420 | if (!list_empty(&fs_info->dead_roots)) { | |
1421 | struct btrfs_root *root = list_first_entry(&fs_info->dead_roots, | |
1422 | struct btrfs_root, | |
1423 | root_list); | |
1424 | if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) { | |
1425 | spin_unlock(&fs_info->trans_lock); | |
1426 | return; | |
1427 | } | |
1428 | } | |
1429 | spin_unlock(&fs_info->trans_lock); | |
1430 | ||
1431 | btrfs_wake_unfinished_drop(fs_info); | |
1432 | } | |
1433 | ||
d352ac68 CM |
1434 | /* |
1435 | * dead roots are old snapshots that need to be deleted. This allocates | |
1436 | * a dirty root struct and adds it into the list of dead roots that need to | |
1437 | * be deleted | |
1438 | */ | |
cfad392b | 1439 | void btrfs_add_dead_root(struct btrfs_root *root) |
5eda7b5e | 1440 | { |
0b246afa JM |
1441 | struct btrfs_fs_info *fs_info = root->fs_info; |
1442 | ||
1443 | spin_lock(&fs_info->trans_lock); | |
dc9492c1 JB |
1444 | if (list_empty(&root->root_list)) { |
1445 | btrfs_grab_root(root); | |
b4be6aef JB |
1446 | |
1447 | /* We want to process the partially complete drops first. */ | |
1448 | if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) | |
1449 | list_add(&root->root_list, &fs_info->dead_roots); | |
1450 | else | |
1451 | list_add_tail(&root->root_list, &fs_info->dead_roots); | |
dc9492c1 | 1452 | } |
0b246afa | 1453 | spin_unlock(&fs_info->trans_lock); |
5eda7b5e CM |
1454 | } |
1455 | ||
d352ac68 | 1456 | /* |
dfba78dc FM |
1457 | * Update each subvolume root and its relocation root, if it exists, in the tree |
1458 | * of tree roots. Also free log roots if they exist. | |
d352ac68 | 1459 | */ |
7e4443d9 | 1460 | static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) |
0f7d52f4 | 1461 | { |
7e4443d9 | 1462 | struct btrfs_fs_info *fs_info = trans->fs_info; |
fc7cbcd4 DS |
1463 | struct btrfs_root *gang[8]; |
1464 | int i; | |
1465 | int ret; | |
54aa1f4d | 1466 | |
dfba78dc FM |
1467 | /* |
1468 | * At this point no one can be using this transaction to modify any tree | |
1469 | * and no one can start another transaction to modify any tree either. | |
1470 | */ | |
1471 | ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); | |
1472 | ||
fc7cbcd4 DS |
1473 | spin_lock(&fs_info->fs_roots_radix_lock); |
1474 | while (1) { | |
1475 | ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, | |
1476 | (void **)gang, 0, | |
1477 | ARRAY_SIZE(gang), | |
1478 | BTRFS_ROOT_TRANS_TAG); | |
1479 | if (ret == 0) | |
1480 | break; | |
1481 | for (i = 0; i < ret; i++) { | |
1482 | struct btrfs_root *root = gang[i]; | |
1483 | int ret2; | |
1484 | ||
1485 | /* | |
1486 | * At this point we can neither have tasks logging inodes | |
1487 | * from a root nor trying to commit a log tree. | |
1488 | */ | |
1489 | ASSERT(atomic_read(&root->log_writers) == 0); | |
1490 | ASSERT(atomic_read(&root->log_commit[0]) == 0); | |
1491 | ASSERT(atomic_read(&root->log_commit[1]) == 0); | |
1492 | ||
1493 | radix_tree_tag_clear(&fs_info->fs_roots_radix, | |
e094f480 | 1494 | (unsigned long)btrfs_root_id(root), |
fc7cbcd4 | 1495 | BTRFS_ROOT_TRANS_TAG); |
6e68de0b | 1496 | btrfs_qgroup_free_meta_all_pertrans(root); |
fc7cbcd4 DS |
1497 | spin_unlock(&fs_info->fs_roots_radix_lock); |
1498 | ||
1499 | btrfs_free_log(trans, root); | |
1500 | ret2 = btrfs_update_reloc_root(trans, root); | |
1501 | if (ret2) | |
1502 | return ret2; | |
1503 | ||
1504 | /* see comments in should_cow_block() */ | |
1505 | clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); | |
1506 | smp_mb__after_atomic(); | |
1507 | ||
1508 | if (root->commit_root != root->node) { | |
1509 | list_add_tail(&root->dirty_list, | |
1510 | &trans->transaction->switch_commits); | |
1511 | btrfs_set_root_node(&root->root_item, | |
1512 | root->node); | |
1513 | } | |
48b36a60 | 1514 | |
fc7cbcd4 DS |
1515 | ret2 = btrfs_update_root(trans, fs_info->tree_root, |
1516 | &root->root_key, | |
1517 | &root->root_item); | |
1518 | if (ret2) | |
1519 | return ret2; | |
1520 | spin_lock(&fs_info->fs_roots_radix_lock); | |
0f7d52f4 CM |
1521 | } |
1522 | } | |
fc7cbcd4 | 1523 | spin_unlock(&fs_info->fs_roots_radix_lock); |
4f4317c1 | 1524 | return 0; |
0f7d52f4 CM |
1525 | } |
1526 | ||
6426c7ad QW |
1527 | /* |
1528 | * Do all special snapshot related qgroup dirty hack. | |
1529 | * | |
1530 | * Will do all needed qgroup inherit and dirty hack like switch commit | |
1531 | * roots inside one transaction and write all btree into disk, to make | |
1532 | * qgroup works. | |
1533 | */ | |
1534 | static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, | |
1535 | struct btrfs_root *src, | |
1536 | struct btrfs_root *parent, | |
1537 | struct btrfs_qgroup_inherit *inherit, | |
1538 | u64 dst_objectid) | |
1539 | { | |
1540 | struct btrfs_fs_info *fs_info = src->fs_info; | |
1541 | int ret; | |
1542 | ||
1543 | /* | |
5343cd93 BB |
1544 | * Save some performance in the case that qgroups are not enabled. If |
1545 | * this check races with the ioctl, rescan will kick in anyway. | |
6426c7ad | 1546 | */ |
182940f4 | 1547 | if (!btrfs_qgroup_full_accounting(fs_info)) |
6426c7ad | 1548 | return 0; |
6426c7ad | 1549 | |
4d31778a | 1550 | /* |
52042d8e | 1551 | * Ensure dirty @src will be committed. Or, after coming |
4d31778a QW |
1552 | * commit_fs_roots() and switch_commit_roots(), any dirty but not |
1553 | * recorded root will never be updated again, causing an outdated root | |
1554 | * item. | |
1555 | */ | |
1c442d22 JB |
1556 | ret = record_root_in_trans(trans, src, 1); |
1557 | if (ret) | |
1558 | return ret; | |
4d31778a | 1559 | |
2a4d84c1 JB |
1560 | /* |
1561 | * btrfs_qgroup_inherit relies on a consistent view of the usage for the | |
1562 | * src root, so we must run the delayed refs here. | |
1563 | * | |
1564 | * However this isn't particularly fool proof, because there's no | |
1565 | * synchronization keeping us from changing the tree after this point | |
1566 | * before we do the qgroup_inherit, or even from making changes while | |
1567 | * we're doing the qgroup_inherit. But that's a problem for the future, | |
1568 | * for now flush the delayed refs to narrow the race window where the | |
1569 | * qgroup counters could end up wrong. | |
1570 | */ | |
8a526c44 | 1571 | ret = btrfs_run_delayed_refs(trans, U64_MAX); |
2a4d84c1 JB |
1572 | if (ret) { |
1573 | btrfs_abort_transaction(trans, ret); | |
44365827 | 1574 | return ret; |
2a4d84c1 JB |
1575 | } |
1576 | ||
7e4443d9 | 1577 | ret = commit_fs_roots(trans); |
6426c7ad QW |
1578 | if (ret) |
1579 | goto out; | |
460fb20a | 1580 | ret = btrfs_qgroup_account_extents(trans); |
6426c7ad QW |
1581 | if (ret < 0) |
1582 | goto out; | |
1583 | ||
1584 | /* Now qgroup are all updated, we can inherit it to new qgroups */ | |
e094f480 JB |
1585 | ret = btrfs_qgroup_inherit(trans, btrfs_root_id(src), dst_objectid, |
1586 | btrfs_root_id(parent), inherit); | |
6426c7ad QW |
1587 | if (ret < 0) |
1588 | goto out; | |
1589 | ||
1590 | /* | |
1591 | * Now we do a simplified commit transaction, which will: | |
1592 | * 1) commit all subvolume and extent tree | |
1593 | * To ensure all subvolume and extent tree have a valid | |
1594 | * commit_root to accounting later insert_dir_item() | |
1595 | * 2) write all btree blocks onto disk | |
1596 | * This is to make sure later btree modification will be cowed | |
1597 | * Or commit_root can be populated and cause wrong qgroup numbers | |
1598 | * In this simplified commit, we don't really care about other trees | |
1599 | * like chunk and root tree, as they won't affect qgroup. | |
1600 | * And we don't write super to avoid half committed status. | |
1601 | */ | |
9386d8bc | 1602 | ret = commit_cowonly_roots(trans); |
6426c7ad QW |
1603 | if (ret) |
1604 | goto out; | |
889bfa39 | 1605 | switch_commit_roots(trans); |
70458a58 | 1606 | ret = btrfs_write_and_wait_transaction(trans); |
6426c7ad | 1607 | if (ret) |
f7af3934 | 1608 | btrfs_handle_fs_error(fs_info, ret, |
6426c7ad QW |
1609 | "Error while writing out transaction for qgroup"); |
1610 | ||
1611 | out: | |
6426c7ad QW |
1612 | /* |
1613 | * Force parent root to be updated, as we recorded it before so its | |
1614 | * last_trans == cur_transid. | |
1615 | * Or it won't be committed again onto disk after later | |
1616 | * insert_dir_item() | |
1617 | */ | |
1618 | if (!ret) | |
1c442d22 | 1619 | ret = record_root_in_trans(trans, parent, 1); |
6426c7ad QW |
1620 | return ret; |
1621 | } | |
1622 | ||
d352ac68 CM |
1623 | /* |
1624 | * new snapshots need to be created at a very specific time in the | |
aec8030a MX |
1625 | * transaction commit. This does the actual creation. |
1626 | * | |
1627 | * Note: | |
1628 | * If the error which may affect the commitment of the current transaction | |
1629 | * happens, we should return the error number. If the error which just affect | |
1630 | * the creation of the pending snapshots, just return 0. | |
d352ac68 | 1631 | */ |
80b6794d | 1632 | static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, |
3063d29f CM |
1633 | struct btrfs_pending_snapshot *pending) |
1634 | { | |
08d50ca3 NB |
1635 | |
1636 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
3063d29f | 1637 | struct btrfs_key key; |
80b6794d | 1638 | struct btrfs_root_item *new_root_item; |
3063d29f CM |
1639 | struct btrfs_root *tree_root = fs_info->tree_root; |
1640 | struct btrfs_root *root = pending->root; | |
6bdb72de | 1641 | struct btrfs_root *parent_root; |
98c9942a | 1642 | struct btrfs_block_rsv *rsv; |
cb9a1f5f | 1643 | struct btrfs_inode *parent_inode = pending->dir; |
42874b3d MX |
1644 | struct btrfs_path *path; |
1645 | struct btrfs_dir_item *dir_item; | |
3063d29f | 1646 | struct extent_buffer *tmp; |
925baedd | 1647 | struct extent_buffer *old; |
95582b00 | 1648 | struct timespec64 cur_time; |
aec8030a | 1649 | int ret = 0; |
d68fc57b | 1650 | u64 to_reserve = 0; |
6bdb72de | 1651 | u64 index = 0; |
a22285a6 | 1652 | u64 objectid; |
b83cc969 | 1653 | u64 root_flags; |
ab3c5c18 STD |
1654 | unsigned int nofs_flags; |
1655 | struct fscrypt_name fname; | |
3063d29f | 1656 | |
8546b570 DS |
1657 | ASSERT(pending->path); |
1658 | path = pending->path; | |
42874b3d | 1659 | |
b0c0ea63 DS |
1660 | ASSERT(pending->root_item); |
1661 | new_root_item = pending->root_item; | |
a22285a6 | 1662 | |
ab3c5c18 STD |
1663 | /* |
1664 | * We're inside a transaction and must make sure that any potential | |
1665 | * allocations with GFP_KERNEL in fscrypt won't recurse back to | |
1666 | * filesystem. | |
1667 | */ | |
1668 | nofs_flags = memalloc_nofs_save(); | |
cb9a1f5f | 1669 | pending->error = fscrypt_setup_filename(&parent_inode->vfs_inode, |
ab3c5c18 STD |
1670 | &pending->dentry->d_name, 0, |
1671 | &fname); | |
1672 | memalloc_nofs_restore(nofs_flags); | |
1673 | if (pending->error) | |
1674 | goto free_pending; | |
ab3c5c18 | 1675 | |
543068a2 | 1676 | pending->error = btrfs_get_free_objectid(tree_root, &objectid); |
aec8030a | 1677 | if (pending->error) |
ab3c5c18 | 1678 | goto free_fname; |
3063d29f | 1679 | |
d6726335 QW |
1680 | /* |
1681 | * Make qgroup to skip current new snapshot's qgroupid, as it is | |
1682 | * accounted by later btrfs_qgroup_inherit(). | |
1683 | */ | |
1684 | btrfs_set_skip_qgroup(trans, objectid); | |
1685 | ||
147d256e | 1686 | btrfs_reloc_pre_snapshot(pending, &to_reserve); |
d68fc57b YZ |
1687 | |
1688 | if (to_reserve > 0) { | |
9270501c | 1689 | pending->error = btrfs_block_rsv_add(fs_info, |
aec8030a MX |
1690 | &pending->block_rsv, |
1691 | to_reserve, | |
1692 | BTRFS_RESERVE_NO_FLUSH); | |
1693 | if (pending->error) | |
d6726335 | 1694 | goto clear_skip_qgroup; |
d68fc57b YZ |
1695 | } |
1696 | ||
3063d29f | 1697 | key.objectid = objectid; |
a22285a6 | 1698 | key.type = BTRFS_ROOT_ITEM_KEY; |
dba6ae0b | 1699 | key.offset = (u64)-1; |
3063d29f | 1700 | |
6fa9700e | 1701 | rsv = trans->block_rsv; |
a22285a6 | 1702 | trans->block_rsv = &pending->block_rsv; |
2382c5cc | 1703 | trans->bytes_reserved = trans->block_rsv->reserved; |
0b246afa | 1704 | trace_btrfs_space_reservation(fs_info, "transaction", |
88d3a5aa JB |
1705 | trans->transid, |
1706 | trans->bytes_reserved, 1); | |
cb9a1f5f | 1707 | parent_root = parent_inode->root; |
f0118cb6 JB |
1708 | ret = record_root_in_trans(trans, parent_root, 0); |
1709 | if (ret) | |
1710 | goto fail; | |
cb9a1f5f | 1711 | cur_time = current_time(&parent_inode->vfs_inode); |
04b285f3 | 1712 | |
3063d29f CM |
1713 | /* |
1714 | * insert the directory item | |
1715 | */ | |
cb9a1f5f | 1716 | ret = btrfs_set_inode_index(parent_inode, &index); |
df9f2782 FM |
1717 | if (ret) { |
1718 | btrfs_abort_transaction(trans, ret); | |
1719 | goto fail; | |
1720 | } | |
42874b3d MX |
1721 | |
1722 | /* check if there is a file/dir which has the same name. */ | |
1723 | dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, | |
cb9a1f5f | 1724 | btrfs_ino(parent_inode), |
6db75318 | 1725 | &fname.disk_name, 0); |
42874b3d | 1726 | if (dir_item != NULL && !IS_ERR(dir_item)) { |
fe66a05a | 1727 | pending->error = -EEXIST; |
aec8030a | 1728 | goto dir_item_existed; |
42874b3d MX |
1729 | } else if (IS_ERR(dir_item)) { |
1730 | ret = PTR_ERR(dir_item); | |
66642832 | 1731 | btrfs_abort_transaction(trans, ret); |
8732d44f | 1732 | goto fail; |
79787eaa | 1733 | } |
42874b3d | 1734 | btrfs_release_path(path); |
52c26179 | 1735 | |
6ed05643 | 1736 | ret = btrfs_create_qgroup(trans, objectid); |
8049ba5d | 1737 | if (ret && ret != -EEXIST) { |
6ed05643 BB |
1738 | btrfs_abort_transaction(trans, ret); |
1739 | goto fail; | |
1740 | } | |
1741 | ||
e999376f CM |
1742 | /* |
1743 | * pull in the delayed directory update | |
1744 | * and the delayed inode item | |
1745 | * otherwise we corrupt the FS during | |
1746 | * snapshot | |
1747 | */ | |
e5c304e6 | 1748 | ret = btrfs_run_delayed_items(trans); |
8732d44f | 1749 | if (ret) { /* Transaction aborted */ |
66642832 | 1750 | btrfs_abort_transaction(trans, ret); |
8732d44f MX |
1751 | goto fail; |
1752 | } | |
e999376f | 1753 | |
f0118cb6 JB |
1754 | ret = record_root_in_trans(trans, root, 0); |
1755 | if (ret) { | |
1756 | btrfs_abort_transaction(trans, ret); | |
1757 | goto fail; | |
1758 | } | |
6bdb72de SW |
1759 | btrfs_set_root_last_snapshot(&root->root_item, trans->transid); |
1760 | memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); | |
08fe4db1 | 1761 | btrfs_check_and_init_root_item(new_root_item); |
6bdb72de | 1762 | |
b83cc969 LZ |
1763 | root_flags = btrfs_root_flags(new_root_item); |
1764 | if (pending->readonly) | |
1765 | root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; | |
1766 | else | |
1767 | root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; | |
1768 | btrfs_set_root_flags(new_root_item, root_flags); | |
1769 | ||
8ea05e3a AB |
1770 | btrfs_set_root_generation_v2(new_root_item, |
1771 | trans->transid); | |
807fc790 | 1772 | generate_random_guid(new_root_item->uuid); |
8ea05e3a AB |
1773 | memcpy(new_root_item->parent_uuid, root->root_item.uuid, |
1774 | BTRFS_UUID_SIZE); | |
70023da2 SB |
1775 | if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) { |
1776 | memset(new_root_item->received_uuid, 0, | |
1777 | sizeof(new_root_item->received_uuid)); | |
1778 | memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); | |
1779 | memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); | |
1780 | btrfs_set_root_stransid(new_root_item, 0); | |
1781 | btrfs_set_root_rtransid(new_root_item, 0); | |
1782 | } | |
3cae210f QW |
1783 | btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec); |
1784 | btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec); | |
8ea05e3a | 1785 | btrfs_set_root_otransid(new_root_item, trans->transid); |
8ea05e3a | 1786 | |
6bdb72de | 1787 | old = btrfs_lock_root_node(root); |
9631e4cc JB |
1788 | ret = btrfs_cow_block(trans, root, old, NULL, 0, &old, |
1789 | BTRFS_NESTING_COW); | |
79787eaa JM |
1790 | if (ret) { |
1791 | btrfs_tree_unlock(old); | |
1792 | free_extent_buffer(old); | |
66642832 | 1793 | btrfs_abort_transaction(trans, ret); |
8732d44f | 1794 | goto fail; |
79787eaa | 1795 | } |
49b25e05 | 1796 | |
49b25e05 | 1797 | ret = btrfs_copy_root(trans, root, old, &tmp, objectid); |
79787eaa | 1798 | /* clean up in any case */ |
6bdb72de SW |
1799 | btrfs_tree_unlock(old); |
1800 | free_extent_buffer(old); | |
8732d44f | 1801 | if (ret) { |
66642832 | 1802 | btrfs_abort_transaction(trans, ret); |
8732d44f MX |
1803 | goto fail; |
1804 | } | |
f1ebcc74 | 1805 | /* see comments in should_cow_block() */ |
27cdeb70 | 1806 | set_bit(BTRFS_ROOT_FORCE_COW, &root->state); |
f1ebcc74 LB |
1807 | smp_wmb(); |
1808 | ||
6bdb72de | 1809 | btrfs_set_root_node(new_root_item, tmp); |
a22285a6 YZ |
1810 | /* record when the snapshot was created in key.offset */ |
1811 | key.offset = trans->transid; | |
1812 | ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); | |
6bdb72de SW |
1813 | btrfs_tree_unlock(tmp); |
1814 | free_extent_buffer(tmp); | |
8732d44f | 1815 | if (ret) { |
66642832 | 1816 | btrfs_abort_transaction(trans, ret); |
8732d44f MX |
1817 | goto fail; |
1818 | } | |
6bdb72de | 1819 | |
a22285a6 YZ |
1820 | /* |
1821 | * insert root back/forward references | |
1822 | */ | |
6025c19f | 1823 | ret = btrfs_add_root_ref(trans, objectid, |
e094f480 | 1824 | btrfs_root_id(parent_root), |
cb9a1f5f | 1825 | btrfs_ino(parent_inode), index, |
6db75318 | 1826 | &fname.disk_name); |
8732d44f | 1827 | if (ret) { |
66642832 | 1828 | btrfs_abort_transaction(trans, ret); |
8732d44f MX |
1829 | goto fail; |
1830 | } | |
0660b5af | 1831 | |
a22285a6 | 1832 | key.offset = (u64)-1; |
e2b54eaf | 1833 | pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev); |
79787eaa JM |
1834 | if (IS_ERR(pending->snap)) { |
1835 | ret = PTR_ERR(pending->snap); | |
2d892ccd | 1836 | pending->snap = NULL; |
66642832 | 1837 | btrfs_abort_transaction(trans, ret); |
8732d44f | 1838 | goto fail; |
79787eaa | 1839 | } |
d68fc57b | 1840 | |
49b25e05 | 1841 | ret = btrfs_reloc_post_snapshot(trans, pending); |
8732d44f | 1842 | if (ret) { |
66642832 | 1843 | btrfs_abort_transaction(trans, ret); |
8732d44f MX |
1844 | goto fail; |
1845 | } | |
361048f5 | 1846 | |
6426c7ad QW |
1847 | /* |
1848 | * Do special qgroup accounting for snapshot, as we do some qgroup | |
1849 | * snapshot hack to do fast snapshot. | |
1850 | * To co-operate with that hack, we do hack again. | |
1851 | * Or snapshot will be greatly slowed down by a subtree qgroup rescan | |
1852 | */ | |
5343cd93 BB |
1853 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) |
1854 | ret = qgroup_account_snapshot(trans, root, parent_root, | |
1855 | pending->inherit, objectid); | |
1856 | else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) | |
e094f480 JB |
1857 | ret = btrfs_qgroup_inherit(trans, btrfs_root_id(root), objectid, |
1858 | btrfs_root_id(parent_root), pending->inherit); | |
6426c7ad QW |
1859 | if (ret < 0) |
1860 | goto fail; | |
1861 | ||
6db75318 | 1862 | ret = btrfs_insert_dir_item(trans, &fname.disk_name, |
cb9a1f5f | 1863 | parent_inode, &key, BTRFS_FT_DIR, |
6db75318 | 1864 | index); |
8732d44f | 1865 | if (ret) { |
66642832 | 1866 | btrfs_abort_transaction(trans, ret); |
8732d44f MX |
1867 | goto fail; |
1868 | } | |
42874b3d | 1869 | |
cb9a1f5f | 1870 | btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + |
6db75318 | 1871 | fname.disk_name.len * 2); |
cb9a1f5f DS |
1872 | inode_set_mtime_to_ts(&parent_inode->vfs_inode, |
1873 | inode_set_ctime_current(&parent_inode->vfs_inode)); | |
1874 | ret = btrfs_update_inode_fallback(trans, parent_inode); | |
dd5f9615 | 1875 | if (ret) { |
66642832 | 1876 | btrfs_abort_transaction(trans, ret); |
dd5f9615 SB |
1877 | goto fail; |
1878 | } | |
807fc790 AS |
1879 | ret = btrfs_uuid_tree_add(trans, new_root_item->uuid, |
1880 | BTRFS_UUID_KEY_SUBVOL, | |
cdb345a8 | 1881 | objectid); |
dd5f9615 | 1882 | if (ret) { |
66642832 | 1883 | btrfs_abort_transaction(trans, ret); |
dd5f9615 SB |
1884 | goto fail; |
1885 | } | |
1886 | if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) { | |
cdb345a8 | 1887 | ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid, |
dd5f9615 SB |
1888 | BTRFS_UUID_KEY_RECEIVED_SUBVOL, |
1889 | objectid); | |
1890 | if (ret && ret != -EEXIST) { | |
66642832 | 1891 | btrfs_abort_transaction(trans, ret); |
dd5f9615 SB |
1892 | goto fail; |
1893 | } | |
1894 | } | |
d6726335 | 1895 | |
3063d29f | 1896 | fail: |
aec8030a MX |
1897 | pending->error = ret; |
1898 | dir_item_existed: | |
98c9942a | 1899 | trans->block_rsv = rsv; |
2382c5cc | 1900 | trans->bytes_reserved = 0; |
d6726335 QW |
1901 | clear_skip_qgroup: |
1902 | btrfs_clear_skip_qgroup(trans); | |
ab3c5c18 STD |
1903 | free_fname: |
1904 | fscrypt_free_filename(&fname); | |
1905 | free_pending: | |
6fa9700e | 1906 | kfree(new_root_item); |
b0c0ea63 | 1907 | pending->root_item = NULL; |
42874b3d | 1908 | btrfs_free_path(path); |
8546b570 DS |
1909 | pending->path = NULL; |
1910 | ||
49b25e05 | 1911 | return ret; |
3063d29f CM |
1912 | } |
1913 | ||
d352ac68 CM |
1914 | /* |
1915 | * create all the snapshots we've scheduled for creation | |
1916 | */ | |
08d50ca3 | 1917 | static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans) |
3de4586c | 1918 | { |
aec8030a | 1919 | struct btrfs_pending_snapshot *pending, *next; |
3de4586c | 1920 | struct list_head *head = &trans->transaction->pending_snapshots; |
aec8030a | 1921 | int ret = 0; |
3de4586c | 1922 | |
aec8030a MX |
1923 | list_for_each_entry_safe(pending, next, head, list) { |
1924 | list_del(&pending->list); | |
08d50ca3 | 1925 | ret = create_pending_snapshot(trans, pending); |
aec8030a MX |
1926 | if (ret) |
1927 | break; | |
1928 | } | |
1929 | return ret; | |
3de4586c CM |
1930 | } |
1931 | ||
2ff7e61e | 1932 | static void update_super_roots(struct btrfs_fs_info *fs_info) |
5d4f98a2 YZ |
1933 | { |
1934 | struct btrfs_root_item *root_item; | |
1935 | struct btrfs_super_block *super; | |
1936 | ||
0b246afa | 1937 | super = fs_info->super_copy; |
5d4f98a2 | 1938 | |
0b246afa | 1939 | root_item = &fs_info->chunk_root->root_item; |
093e037c DS |
1940 | super->chunk_root = root_item->bytenr; |
1941 | super->chunk_root_generation = root_item->generation; | |
1942 | super->chunk_root_level = root_item->level; | |
5d4f98a2 | 1943 | |
0b246afa | 1944 | root_item = &fs_info->tree_root->root_item; |
093e037c DS |
1945 | super->root = root_item->bytenr; |
1946 | super->generation = root_item->generation; | |
1947 | super->root_level = root_item->level; | |
0b246afa | 1948 | if (btrfs_test_opt(fs_info, SPACE_CACHE)) |
093e037c | 1949 | super->cache_generation = root_item->generation; |
94846229 BB |
1950 | else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags)) |
1951 | super->cache_generation = 0; | |
0b246afa | 1952 | if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) |
093e037c | 1953 | super->uuid_tree_generation = root_item->generation; |
5d4f98a2 YZ |
1954 | } |
1955 | ||
8929ecfa YZ |
1956 | int btrfs_transaction_blocked(struct btrfs_fs_info *info) |
1957 | { | |
4a9d8bde | 1958 | struct btrfs_transaction *trans; |
8929ecfa | 1959 | int ret = 0; |
4a9d8bde | 1960 | |
a4abeea4 | 1961 | spin_lock(&info->trans_lock); |
4a9d8bde MX |
1962 | trans = info->running_transaction; |
1963 | if (trans) | |
1964 | ret = is_transaction_blocked(trans); | |
a4abeea4 | 1965 | spin_unlock(&info->trans_lock); |
8929ecfa YZ |
1966 | return ret; |
1967 | } | |
1968 | ||
fdfbf020 | 1969 | void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans) |
bb9c12c9 | 1970 | { |
3a45bb20 | 1971 | struct btrfs_fs_info *fs_info = trans->fs_info; |
bb9c12c9 SW |
1972 | struct btrfs_transaction *cur_trans; |
1973 | ||
fdfbf020 JB |
1974 | /* Kick the transaction kthread. */ |
1975 | set_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags); | |
1976 | wake_up_process(fs_info->transaction_kthread); | |
bb9c12c9 SW |
1977 | |
1978 | /* take transaction reference */ | |
bb9c12c9 | 1979 | cur_trans = trans->transaction; |
9b64f57d | 1980 | refcount_inc(&cur_trans->use_count); |
bb9c12c9 | 1981 | |
3a45bb20 | 1982 | btrfs_end_transaction(trans); |
6fc4e354 | 1983 | |
ae5d29d4 DS |
1984 | /* |
1985 | * Wait for the current transaction commit to start and block | |
1986 | * subsequent transaction joins | |
1987 | */ | |
77d20c68 | 1988 | btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); |
ae5d29d4 DS |
1989 | wait_event(fs_info->transaction_blocked_wait, |
1990 | cur_trans->state >= TRANS_STATE_COMMIT_START || | |
1991 | TRANS_ABORTED(cur_trans)); | |
724e2315 | 1992 | btrfs_put_transaction(cur_trans); |
bb9c12c9 SW |
1993 | } |
1994 | ||
ded980eb FM |
1995 | /* |
1996 | * If there is a running transaction commit it or if it's already committing, | |
1997 | * wait for its commit to complete. Does not start and commit a new transaction | |
1998 | * if there isn't any running. | |
1999 | */ | |
2000 | int btrfs_commit_current_transaction(struct btrfs_root *root) | |
2001 | { | |
2002 | struct btrfs_trans_handle *trans; | |
2003 | ||
2004 | trans = btrfs_attach_transaction_barrier(root); | |
2005 | if (IS_ERR(trans)) { | |
2006 | int ret = PTR_ERR(trans); | |
2007 | ||
2008 | return (ret == -ENOENT) ? 0 : ret; | |
2009 | } | |
2010 | ||
2011 | return btrfs_commit_transaction(trans); | |
2012 | } | |
2013 | ||
97cb39bb | 2014 | static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) |
49b25e05 | 2015 | { |
97cb39bb | 2016 | struct btrfs_fs_info *fs_info = trans->fs_info; |
49b25e05 JM |
2017 | struct btrfs_transaction *cur_trans = trans->transaction; |
2018 | ||
b50fff81 | 2019 | WARN_ON(refcount_read(&trans->use_count) > 1); |
49b25e05 | 2020 | |
66642832 | 2021 | btrfs_abort_transaction(trans, err); |
7b8b92af | 2022 | |
0b246afa | 2023 | spin_lock(&fs_info->trans_lock); |
66b6135b | 2024 | |
25d8c284 MX |
2025 | /* |
2026 | * If the transaction is removed from the list, it means this | |
2027 | * transaction has been committed successfully, so it is impossible | |
2028 | * to call the cleanup function. | |
2029 | */ | |
2030 | BUG_ON(list_empty(&cur_trans->list)); | |
66b6135b | 2031 | |
0b246afa | 2032 | if (cur_trans == fs_info->running_transaction) { |
4a9d8bde | 2033 | cur_trans->state = TRANS_STATE_COMMIT_DOING; |
0b246afa | 2034 | spin_unlock(&fs_info->trans_lock); |
e1489b4f IA |
2035 | |
2036 | /* | |
2037 | * The thread has already released the lockdep map as reader | |
2038 | * already in btrfs_commit_transaction(). | |
2039 | */ | |
2040 | btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers); | |
f094ac32 LB |
2041 | wait_event(cur_trans->writer_wait, |
2042 | atomic_read(&cur_trans->num_writers) == 1); | |
2043 | ||
0b246afa | 2044 | spin_lock(&fs_info->trans_lock); |
d7096fc3 | 2045 | } |
061dde82 FM |
2046 | |
2047 | /* | |
2048 | * Now that we know no one else is still using the transaction we can | |
2049 | * remove the transaction from the list of transactions. This avoids | |
2050 | * the transaction kthread from cleaning up the transaction while some | |
2051 | * other task is still using it, which could result in a use-after-free | |
2052 | * on things like log trees, as it forces the transaction kthread to | |
2053 | * wait for this transaction to be cleaned up by us. | |
2054 | */ | |
2055 | list_del_init(&cur_trans->list); | |
2056 | ||
0b246afa | 2057 | spin_unlock(&fs_info->trans_lock); |
49b25e05 | 2058 | |
c3a5888e | 2059 | btrfs_cleanup_one_transaction(trans->transaction); |
49b25e05 | 2060 | |
0b246afa JM |
2061 | spin_lock(&fs_info->trans_lock); |
2062 | if (cur_trans == fs_info->running_transaction) | |
2063 | fs_info->running_transaction = NULL; | |
2064 | spin_unlock(&fs_info->trans_lock); | |
4a9d8bde | 2065 | |
e0228285 | 2066 | if (trans->type & __TRANS_FREEZABLE) |
0b246afa | 2067 | sb_end_intwrite(fs_info->sb); |
724e2315 JB |
2068 | btrfs_put_transaction(cur_trans); |
2069 | btrfs_put_transaction(cur_trans); | |
49b25e05 | 2070 | |
2e4e97ab | 2071 | trace_btrfs_transaction_commit(fs_info); |
49b25e05 | 2072 | |
49b25e05 JM |
2073 | if (current->journal_info == trans) |
2074 | current->journal_info = NULL; | |
2d82a40a FM |
2075 | |
2076 | /* | |
2077 | * If relocation is running, we can't cancel scrub because that will | |
2078 | * result in a deadlock. Before relocating a block group, relocation | |
2079 | * pauses scrub, then starts and commits a transaction before unpausing | |
2080 | * scrub. If the transaction commit is being done by the relocation | |
2081 | * task or triggered by another task and the relocation task is waiting | |
2082 | * for the commit, and we end up here due to an error in the commit | |
2083 | * path, then calling btrfs_scrub_cancel() will deadlock, as we are | |
2084 | * asking for scrub to stop while having it asked to be paused higher | |
2085 | * above in relocation code. | |
2086 | */ | |
2087 | if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) | |
2088 | btrfs_scrub_cancel(fs_info); | |
49b25e05 JM |
2089 | |
2090 | kmem_cache_free(btrfs_trans_handle_cachep, trans); | |
2091 | } | |
2092 | ||
c7cc64a9 DS |
2093 | /* |
2094 | * Release reserved delayed ref space of all pending block groups of the | |
2095 | * transaction and remove them from the list | |
2096 | */ | |
2097 | static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) | |
2098 | { | |
2099 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 2100 | struct btrfs_block_group *block_group, *tmp; |
c7cc64a9 DS |
2101 | |
2102 | list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { | |
9ef17228 | 2103 | btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); |
7511e29c BB |
2104 | /* |
2105 | * Not strictly necessary to lock, as no other task will be using a | |
2106 | * block_group on the new_bgs list during a transaction abort. | |
2107 | */ | |
2108 | spin_lock(&fs_info->unused_bgs_lock); | |
c7cc64a9 | 2109 | list_del_init(&block_group->bg_list); |
7cbce3cb | 2110 | btrfs_put_block_group(block_group); |
7511e29c | 2111 | spin_unlock(&fs_info->unused_bgs_lock); |
c7cc64a9 DS |
2112 | } |
2113 | } | |
2114 | ||
88090ad3 | 2115 | static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) |
82436617 | 2116 | { |
ce8ea7cc | 2117 | /* |
a0f0cf83 | 2118 | * We use try_to_writeback_inodes_sb() here because if we used |
ce8ea7cc JB |
2119 | * btrfs_start_delalloc_roots we would deadlock with fs freeze. |
2120 | * Currently are holding the fs freeze lock, if we do an async flush | |
2121 | * we'll do btrfs_join_transaction() and deadlock because we need to | |
2122 | * wait for the fs freeze lock. Using the direct flushing we benefit | |
2123 | * from already being in a transaction and our join_transaction doesn't | |
2124 | * have to re-take the fs freeze lock. | |
a0f0cf83 FM |
2125 | * |
2126 | * Note that try_to_writeback_inodes_sb() will only trigger writeback | |
2127 | * if it can read lock sb->s_umount. It will always be able to lock it, | |
2128 | * except when the filesystem is being unmounted or being frozen, but in | |
2129 | * those cases sync_filesystem() is called, which results in calling | |
2130 | * writeback_inodes_sb() while holding a write lock on sb->s_umount. | |
2131 | * Note that we don't call writeback_inodes_sb() directly, because it | |
2132 | * will emit a warning if sb->s_umount is not locked. | |
ce8ea7cc | 2133 | */ |
88090ad3 | 2134 | if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) |
a0f0cf83 | 2135 | try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); |
82436617 MX |
2136 | return 0; |
2137 | } | |
2138 | ||
88090ad3 | 2139 | static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) |
82436617 | 2140 | { |
88090ad3 | 2141 | if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) |
42317ab4 | 2142 | btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); |
82436617 MX |
2143 | } |
2144 | ||
28b21c55 FM |
2145 | /* |
2146 | * Add a pending snapshot associated with the given transaction handle to the | |
2147 | * respective handle. This must be called after the transaction commit started | |
2148 | * and while holding fs_info->trans_lock. | |
2149 | * This serves to guarantee a caller of btrfs_commit_transaction() that it can | |
2150 | * safely free the pending snapshot pointer in case btrfs_commit_transaction() | |
2151 | * returns an error. | |
2152 | */ | |
2153 | static void add_pending_snapshot(struct btrfs_trans_handle *trans) | |
2154 | { | |
2155 | struct btrfs_transaction *cur_trans = trans->transaction; | |
2156 | ||
2157 | if (!trans->pending_snapshot) | |
2158 | return; | |
2159 | ||
2160 | lockdep_assert_held(&trans->fs_info->trans_lock); | |
77d20c68 | 2161 | ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP); |
28b21c55 FM |
2162 | |
2163 | list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots); | |
2164 | } | |
2165 | ||
e55958c8 IA |
2166 | static void update_commit_stats(struct btrfs_fs_info *fs_info, ktime_t interval) |
2167 | { | |
2168 | fs_info->commit_stats.commit_count++; | |
2169 | fs_info->commit_stats.last_commit_dur = interval; | |
2170 | fs_info->commit_stats.max_commit_dur = | |
2171 | max_t(u64, fs_info->commit_stats.max_commit_dur, interval); | |
2172 | fs_info->commit_stats.total_commit_dur += interval; | |
2173 | } | |
2174 | ||
3a45bb20 | 2175 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans) |
79154b1b | 2176 | { |
3a45bb20 | 2177 | struct btrfs_fs_info *fs_info = trans->fs_info; |
49b25e05 | 2178 | struct btrfs_transaction *cur_trans = trans->transaction; |
8fd17795 | 2179 | struct btrfs_transaction *prev_trans = NULL; |
25287e0a | 2180 | int ret; |
e55958c8 IA |
2181 | ktime_t start_time; |
2182 | ktime_t interval; | |
79154b1b | 2183 | |
35b814f3 | 2184 | ASSERT(refcount_read(&trans->use_count) == 1); |
77d20c68 | 2185 | btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); |
35b814f3 | 2186 | |
c52cc7b7 JB |
2187 | clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags); |
2188 | ||
8d25a086 | 2189 | /* Stop the commit early if ->aborted is set */ |
bf31f87f | 2190 | if (TRANS_ABORTED(cur_trans)) { |
25287e0a | 2191 | ret = cur_trans->aborted; |
3e738c53 | 2192 | goto lockdep_trans_commit_start_release; |
25287e0a | 2193 | } |
49b25e05 | 2194 | |
f45c752b JB |
2195 | btrfs_trans_release_metadata(trans); |
2196 | trans->block_rsv = NULL; | |
2197 | ||
56bec294 | 2198 | /* |
e19eb11f JB |
2199 | * We only want one transaction commit doing the flushing so we do not |
2200 | * waste a bunch of time on lock contention on the extent root node. | |
56bec294 | 2201 | */ |
e19eb11f JB |
2202 | if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING, |
2203 | &cur_trans->delayed_refs.flags)) { | |
2204 | /* | |
2205 | * Make a pass through all the delayed refs we have so far. | |
2206 | * Any running threads may add more while we are here. | |
2207 | */ | |
2208 | ret = btrfs_run_delayed_refs(trans, 0); | |
3e738c53 IA |
2209 | if (ret) |
2210 | goto lockdep_trans_commit_start_release; | |
e19eb11f | 2211 | } |
56bec294 | 2212 | |
119e80df | 2213 | btrfs_create_pending_block_groups(trans); |
ea658bad | 2214 | |
3204d33c | 2215 | if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) { |
1bbc621e CM |
2216 | int run_it = 0; |
2217 | ||
2218 | /* this mutex is also taken before trying to set | |
2219 | * block groups readonly. We need to make sure | |
2220 | * that nobody has set a block group readonly | |
2221 | * after a extents from that block group have been | |
2222 | * allocated for cache files. btrfs_set_block_group_ro | |
2223 | * will wait for the transaction to commit if it | |
3204d33c | 2224 | * finds BTRFS_TRANS_DIRTY_BG_RUN set. |
1bbc621e | 2225 | * |
3204d33c JB |
2226 | * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure |
2227 | * only one process starts all the block group IO. It wouldn't | |
1bbc621e CM |
2228 | * hurt to have more than one go through, but there's no |
2229 | * real advantage to it either. | |
2230 | */ | |
0b246afa | 2231 | mutex_lock(&fs_info->ro_block_group_mutex); |
3204d33c JB |
2232 | if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN, |
2233 | &cur_trans->flags)) | |
1bbc621e | 2234 | run_it = 1; |
0b246afa | 2235 | mutex_unlock(&fs_info->ro_block_group_mutex); |
1bbc621e | 2236 | |
f9cacae3 | 2237 | if (run_it) { |
21217054 | 2238 | ret = btrfs_start_dirty_block_groups(trans); |
3e738c53 IA |
2239 | if (ret) |
2240 | goto lockdep_trans_commit_start_release; | |
f9cacae3 | 2241 | } |
1bbc621e CM |
2242 | } |
2243 | ||
0b246afa | 2244 | spin_lock(&fs_info->trans_lock); |
77d20c68 | 2245 | if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) { |
d0c2f4fa FM |
2246 | enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; |
2247 | ||
28b21c55 FM |
2248 | add_pending_snapshot(trans); |
2249 | ||
0b246afa | 2250 | spin_unlock(&fs_info->trans_lock); |
9b64f57d | 2251 | refcount_inc(&cur_trans->use_count); |
ccd467d6 | 2252 | |
d0c2f4fa FM |
2253 | if (trans->in_fsync) |
2254 | want_state = TRANS_STATE_SUPER_COMMITTED; | |
3e738c53 IA |
2255 | |
2256 | btrfs_trans_state_lockdep_release(fs_info, | |
77d20c68 | 2257 | BTRFS_LOCKDEP_TRANS_COMMIT_PREP); |
d0c2f4fa FM |
2258 | ret = btrfs_end_transaction(trans); |
2259 | wait_for_commit(cur_trans, want_state); | |
15ee9bc7 | 2260 | |
bf31f87f | 2261 | if (TRANS_ABORTED(cur_trans)) |
b4924a0f LB |
2262 | ret = cur_trans->aborted; |
2263 | ||
724e2315 | 2264 | btrfs_put_transaction(cur_trans); |
15ee9bc7 | 2265 | |
49b25e05 | 2266 | return ret; |
79154b1b | 2267 | } |
4313b399 | 2268 | |
77d20c68 | 2269 | cur_trans->state = TRANS_STATE_COMMIT_PREP; |
0b246afa | 2270 | wake_up(&fs_info->transaction_blocked_wait); |
77d20c68 | 2271 | btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); |
bb9c12c9 | 2272 | |
d887f03f | 2273 | if (!list_is_first(&cur_trans->list, &fs_info->trans_list)) { |
d0c2f4fa FM |
2274 | enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; |
2275 | ||
2276 | if (trans->in_fsync) | |
2277 | want_state = TRANS_STATE_SUPER_COMMITTED; | |
2278 | ||
d887f03f | 2279 | prev_trans = list_prev_entry(cur_trans, list); |
d0c2f4fa | 2280 | if (prev_trans->state < want_state) { |
9b64f57d | 2281 | refcount_inc(&prev_trans->use_count); |
0b246afa | 2282 | spin_unlock(&fs_info->trans_lock); |
ccd467d6 | 2283 | |
d0c2f4fa FM |
2284 | wait_for_commit(prev_trans, want_state); |
2285 | ||
bf31f87f | 2286 | ret = READ_ONCE(prev_trans->aborted); |
ccd467d6 | 2287 | |
724e2315 | 2288 | btrfs_put_transaction(prev_trans); |
1f9b8c8f | 2289 | if (ret) |
e1489b4f | 2290 | goto lockdep_release; |
77d20c68 | 2291 | spin_lock(&fs_info->trans_lock); |
ccd467d6 | 2292 | } |
a4abeea4 | 2293 | } else { |
cb2d3dad FM |
2294 | /* |
2295 | * The previous transaction was aborted and was already removed | |
2296 | * from the list of transactions at fs_info->trans_list. So we | |
2297 | * abort to prevent writing a new superblock that reflects a | |
2298 | * corrupt state (pointing to trees with unwritten nodes/leafs). | |
2299 | */ | |
84961539 | 2300 | if (BTRFS_FS_ERROR(fs_info)) { |
77d20c68 | 2301 | spin_unlock(&fs_info->trans_lock); |
cb2d3dad | 2302 | ret = -EROFS; |
e1489b4f | 2303 | goto lockdep_release; |
cb2d3dad | 2304 | } |
ccd467d6 | 2305 | } |
15ee9bc7 | 2306 | |
77d20c68 JB |
2307 | cur_trans->state = TRANS_STATE_COMMIT_START; |
2308 | wake_up(&fs_info->transaction_blocked_wait); | |
2309 | spin_unlock(&fs_info->trans_lock); | |
2310 | ||
e55958c8 IA |
2311 | /* |
2312 | * Get the time spent on the work done by the commit thread and not | |
2313 | * the time spent waiting on a previous commit | |
2314 | */ | |
2315 | start_time = ktime_get_ns(); | |
2316 | ||
0860adfd MX |
2317 | extwriter_counter_dec(cur_trans, trans->type); |
2318 | ||
88090ad3 | 2319 | ret = btrfs_start_delalloc_flush(fs_info); |
82436617 | 2320 | if (ret) |
e1489b4f | 2321 | goto lockdep_release; |
82436617 | 2322 | |
e5c304e6 | 2323 | ret = btrfs_run_delayed_items(trans); |
581227d0 | 2324 | if (ret) |
e1489b4f | 2325 | goto lockdep_release; |
15ee9bc7 | 2326 | |
5a9ba670 IA |
2327 | /* |
2328 | * The thread has started/joined the transaction thus it holds the | |
2329 | * lockdep map as a reader. It has to release it before acquiring the | |
2330 | * lockdep map as a writer. | |
2331 | */ | |
2332 | btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); | |
2333 | btrfs_might_wait_for_event(fs_info, btrfs_trans_num_extwriters); | |
581227d0 MX |
2334 | wait_event(cur_trans->writer_wait, |
2335 | extwriter_counter_read(cur_trans) == 0); | |
15ee9bc7 | 2336 | |
581227d0 | 2337 | /* some pending stuffs might be added after the previous flush. */ |
e5c304e6 | 2338 | ret = btrfs_run_delayed_items(trans); |
e1489b4f IA |
2339 | if (ret) { |
2340 | btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); | |
ca469637 | 2341 | goto cleanup_transaction; |
e1489b4f | 2342 | } |
ca469637 | 2343 | |
88090ad3 | 2344 | btrfs_wait_delalloc_flush(fs_info); |
cb7ab021 | 2345 | |
48778179 FM |
2346 | /* |
2347 | * Wait for all ordered extents started by a fast fsync that joined this | |
2348 | * transaction. Otherwise if this transaction commits before the ordered | |
2349 | * extents complete we lose logged data after a power failure. | |
2350 | */ | |
8b53779e | 2351 | btrfs_might_wait_for_event(fs_info, btrfs_trans_pending_ordered); |
48778179 FM |
2352 | wait_event(cur_trans->pending_wait, |
2353 | atomic_read(&cur_trans->pending_ordered) == 0); | |
2354 | ||
2ff7e61e | 2355 | btrfs_scrub_pause(fs_info); |
ed0ca140 JB |
2356 | /* |
2357 | * Ok now we need to make sure to block out any other joins while we | |
2358 | * commit the transaction. We could have started a join before setting | |
4a9d8bde | 2359 | * COMMIT_DOING so make sure to wait for num_writers to == 1 again. |
ed0ca140 | 2360 | */ |
0b246afa | 2361 | spin_lock(&fs_info->trans_lock); |
28b21c55 | 2362 | add_pending_snapshot(trans); |
4a9d8bde | 2363 | cur_trans->state = TRANS_STATE_COMMIT_DOING; |
0b246afa | 2364 | spin_unlock(&fs_info->trans_lock); |
e1489b4f IA |
2365 | |
2366 | /* | |
2367 | * The thread has started/joined the transaction thus it holds the | |
2368 | * lockdep map as a reader. It has to release it before acquiring the | |
2369 | * lockdep map as a writer. | |
2370 | */ | |
2371 | btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); | |
2372 | btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers); | |
ed0ca140 JB |
2373 | wait_event(cur_trans->writer_wait, |
2374 | atomic_read(&cur_trans->num_writers) == 1); | |
2375 | ||
3e738c53 IA |
2376 | /* |
2377 | * Make lockdep happy by acquiring the state locks after | |
2378 | * btrfs_trans_num_writers is released. If we acquired the state locks | |
2379 | * before releasing the btrfs_trans_num_writers lock then lockdep would | |
2380 | * complain because we did not follow the reverse order unlocking rule. | |
2381 | */ | |
2382 | btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); | |
2383 | btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); | |
2384 | btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); | |
2385 | ||
fdfbf020 JB |
2386 | /* |
2387 | * We've started the commit, clear the flag in case we were triggered to | |
2388 | * do an async commit but somebody else started before the transaction | |
2389 | * kthread could do the work. | |
2390 | */ | |
2391 | clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags); | |
2392 | ||
bf31f87f | 2393 | if (TRANS_ABORTED(cur_trans)) { |
2cba30f1 | 2394 | ret = cur_trans->aborted; |
3e738c53 | 2395 | btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); |
6cf7f77e | 2396 | goto scrub_continue; |
2cba30f1 | 2397 | } |
7585717f CM |
2398 | /* |
2399 | * the reloc mutex makes sure that we stop | |
2400 | * the balancing code from coming in and moving | |
2401 | * extents around in the middle of the commit | |
2402 | */ | |
0b246afa | 2403 | mutex_lock(&fs_info->reloc_mutex); |
7585717f | 2404 | |
42874b3d MX |
2405 | /* |
2406 | * We needn't worry about the delayed items because we will | |
2407 | * deal with them in create_pending_snapshot(), which is the | |
2408 | * core function of the snapshot creation. | |
2409 | */ | |
08d50ca3 | 2410 | ret = create_pending_snapshots(trans); |
56e9f6ea DS |
2411 | if (ret) |
2412 | goto unlock_reloc; | |
3063d29f | 2413 | |
42874b3d MX |
2414 | /* |
2415 | * We insert the dir indexes of the snapshots and update the inode | |
2416 | * of the snapshots' parents after the snapshot creation, so there | |
2417 | * are some delayed items which are not dealt with. Now deal with | |
2418 | * them. | |
2419 | * | |
2420 | * We needn't worry that this operation will corrupt the snapshots, | |
2421 | * because all the tree which are snapshoted will be forced to COW | |
2422 | * the nodes and leaves. | |
2423 | */ | |
e5c304e6 | 2424 | ret = btrfs_run_delayed_items(trans); |
56e9f6ea DS |
2425 | if (ret) |
2426 | goto unlock_reloc; | |
16cdcec7 | 2427 | |
8a526c44 | 2428 | ret = btrfs_run_delayed_refs(trans, U64_MAX); |
56e9f6ea DS |
2429 | if (ret) |
2430 | goto unlock_reloc; | |
56bec294 | 2431 | |
e999376f CM |
2432 | /* |
2433 | * make sure none of the code above managed to slip in a | |
2434 | * delayed item | |
2435 | */ | |
ccdf9b30 | 2436 | btrfs_assert_delayed_root_empty(fs_info); |
e999376f | 2437 | |
2c90e5d6 | 2438 | WARN_ON(cur_trans != trans->transaction); |
dc17ff8f | 2439 | |
7e4443d9 | 2440 | ret = commit_fs_roots(trans); |
56e9f6ea | 2441 | if (ret) |
dfba78dc | 2442 | goto unlock_reloc; |
54aa1f4d | 2443 | |
5d4f98a2 | 2444 | /* commit_fs_roots gets rid of all the tree log roots, it is now |
e02119d5 CM |
2445 | * safe to free the root of tree log roots |
2446 | */ | |
0b246afa | 2447 | btrfs_free_log_root_tree(trans, fs_info); |
e02119d5 | 2448 | |
0ed4792a QW |
2449 | /* |
2450 | * Since fs roots are all committed, we can get a quite accurate | |
2451 | * new_roots. So let's do quota accounting. | |
2452 | */ | |
460fb20a | 2453 | ret = btrfs_qgroup_account_extents(trans); |
56e9f6ea | 2454 | if (ret < 0) |
dfba78dc | 2455 | goto unlock_reloc; |
0ed4792a | 2456 | |
9386d8bc | 2457 | ret = commit_cowonly_roots(trans); |
56e9f6ea | 2458 | if (ret) |
dfba78dc | 2459 | goto unlock_reloc; |
54aa1f4d | 2460 | |
2cba30f1 MX |
2461 | /* |
2462 | * The tasks which save the space cache and inode cache may also | |
2463 | * update ->aborted, check it. | |
2464 | */ | |
bf31f87f | 2465 | if (TRANS_ABORTED(cur_trans)) { |
2cba30f1 | 2466 | ret = cur_trans->aborted; |
dfba78dc | 2467 | goto unlock_reloc; |
2cba30f1 MX |
2468 | } |
2469 | ||
0b246afa | 2470 | cur_trans = fs_info->running_transaction; |
5d4f98a2 | 2471 | |
0b246afa JM |
2472 | btrfs_set_root_node(&fs_info->tree_root->root_item, |
2473 | fs_info->tree_root->node); | |
2474 | list_add_tail(&fs_info->tree_root->dirty_list, | |
9e351cc8 | 2475 | &cur_trans->switch_commits); |
5d4f98a2 | 2476 | |
0b246afa JM |
2477 | btrfs_set_root_node(&fs_info->chunk_root->root_item, |
2478 | fs_info->chunk_root->node); | |
2479 | list_add_tail(&fs_info->chunk_root->dirty_list, | |
9e351cc8 JB |
2480 | &cur_trans->switch_commits); |
2481 | ||
f7238e50 JB |
2482 | if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { |
2483 | btrfs_set_root_node(&fs_info->block_group_root->root_item, | |
2484 | fs_info->block_group_root->node); | |
2485 | list_add_tail(&fs_info->block_group_root->dirty_list, | |
2486 | &cur_trans->switch_commits); | |
2487 | } | |
2488 | ||
889bfa39 | 2489 | switch_commit_roots(trans); |
5d4f98a2 | 2490 | |
ce93ec54 | 2491 | ASSERT(list_empty(&cur_trans->dirty_bgs)); |
1bbc621e | 2492 | ASSERT(list_empty(&cur_trans->io_bgs)); |
2ff7e61e | 2493 | update_super_roots(fs_info); |
e02119d5 | 2494 | |
0b246afa JM |
2495 | btrfs_set_super_log_root(fs_info->super_copy, 0); |
2496 | btrfs_set_super_log_root_level(fs_info->super_copy, 0); | |
2497 | memcpy(fs_info->super_for_commit, fs_info->super_copy, | |
2498 | sizeof(*fs_info->super_copy)); | |
ccd467d6 | 2499 | |
bbbf7243 | 2500 | btrfs_commit_device_sizes(cur_trans); |
935e5cc9 | 2501 | |
0b246afa JM |
2502 | clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags); |
2503 | clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags); | |
656f30db | 2504 | |
4fbcdf66 FM |
2505 | btrfs_trans_release_chunk_metadata(trans); |
2506 | ||
dfba78dc FM |
2507 | /* |
2508 | * Before changing the transaction state to TRANS_STATE_UNBLOCKED and | |
2509 | * setting fs_info->running_transaction to NULL, lock tree_log_mutex to | |
2510 | * make sure that before we commit our superblock, no other task can | |
2511 | * start a new transaction and commit a log tree before we commit our | |
2512 | * superblock. Anyone trying to commit a log tree locks this mutex before | |
2513 | * writing its superblock. | |
2514 | */ | |
2515 | mutex_lock(&fs_info->tree_log_mutex); | |
2516 | ||
0b246afa | 2517 | spin_lock(&fs_info->trans_lock); |
4a9d8bde | 2518 | cur_trans->state = TRANS_STATE_UNBLOCKED; |
0b246afa JM |
2519 | fs_info->running_transaction = NULL; |
2520 | spin_unlock(&fs_info->trans_lock); | |
2521 | mutex_unlock(&fs_info->reloc_mutex); | |
b7ec40d7 | 2522 | |
0b246afa | 2523 | wake_up(&fs_info->transaction_wait); |
3e738c53 | 2524 | btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); |
e6dcd2dc | 2525 | |
b7625f46 QW |
2526 | /* If we have features changed, wake up the cleaner to update sysfs. */ |
2527 | if (test_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags) && | |
2528 | fs_info->cleaner_kthread) | |
2529 | wake_up_process(fs_info->cleaner_kthread); | |
2530 | ||
70458a58 | 2531 | ret = btrfs_write_and_wait_transaction(trans); |
49b25e05 | 2532 | if (ret) { |
0b246afa JM |
2533 | btrfs_handle_fs_error(fs_info, ret, |
2534 | "Error while writing out transaction"); | |
2535 | mutex_unlock(&fs_info->tree_log_mutex); | |
6cf7f77e | 2536 | goto scrub_continue; |
49b25e05 JM |
2537 | } |
2538 | ||
eece6a9c | 2539 | ret = write_all_supers(fs_info, 0); |
e02119d5 CM |
2540 | /* |
2541 | * the super is written, we can safely allow the tree-loggers | |
2542 | * to go about their business | |
2543 | */ | |
0b246afa | 2544 | mutex_unlock(&fs_info->tree_log_mutex); |
c1f32b7c AJ |
2545 | if (ret) |
2546 | goto scrub_continue; | |
e02119d5 | 2547 | |
d0c2f4fa FM |
2548 | /* |
2549 | * We needn't acquire the lock here because there is no other task | |
2550 | * which can change it. | |
2551 | */ | |
2552 | cur_trans->state = TRANS_STATE_SUPER_COMMITTED; | |
2553 | wake_up(&cur_trans->commit_wait); | |
3e738c53 | 2554 | btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); |
d0c2f4fa | 2555 | |
25983713 FM |
2556 | ret = btrfs_finish_extent_commit(trans); |
2557 | if (ret) | |
2558 | goto scrub_continue; | |
4313b399 | 2559 | |
3204d33c | 2560 | if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) |
0b246afa | 2561 | btrfs_clear_space_info_full(fs_info); |
13212b54 | 2562 | |
0124855f | 2563 | btrfs_set_last_trans_committed(fs_info, cur_trans->transid); |
4a9d8bde MX |
2564 | /* |
2565 | * We needn't acquire the lock here because there is no other task | |
2566 | * which can change it. | |
2567 | */ | |
2568 | cur_trans->state = TRANS_STATE_COMPLETED; | |
2c90e5d6 | 2569 | wake_up(&cur_trans->commit_wait); |
3e738c53 | 2570 | btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); |
3de4586c | 2571 | |
0b246afa | 2572 | spin_lock(&fs_info->trans_lock); |
13c5a93e | 2573 | list_del_init(&cur_trans->list); |
0b246afa | 2574 | spin_unlock(&fs_info->trans_lock); |
a4abeea4 | 2575 | |
724e2315 JB |
2576 | btrfs_put_transaction(cur_trans); |
2577 | btrfs_put_transaction(cur_trans); | |
58176a96 | 2578 | |
0860adfd | 2579 | if (trans->type & __TRANS_FREEZABLE) |
0b246afa | 2580 | sb_end_intwrite(fs_info->sb); |
b2b5ef5c | 2581 | |
2e4e97ab | 2582 | trace_btrfs_transaction_commit(fs_info); |
1abe9b8a | 2583 | |
e55958c8 IA |
2584 | interval = ktime_get_ns() - start_time; |
2585 | ||
2ff7e61e | 2586 | btrfs_scrub_continue(fs_info); |
a2de733c | 2587 | |
9ed74f2d JB |
2588 | if (current->journal_info == trans) |
2589 | current->journal_info = NULL; | |
2590 | ||
2c90e5d6 | 2591 | kmem_cache_free(btrfs_trans_handle_cachep, trans); |
24bbcf04 | 2592 | |
e55958c8 IA |
2593 | update_commit_stats(fs_info, interval); |
2594 | ||
79154b1b | 2595 | return ret; |
49b25e05 | 2596 | |
56e9f6ea DS |
2597 | unlock_reloc: |
2598 | mutex_unlock(&fs_info->reloc_mutex); | |
3e738c53 | 2599 | btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); |
6cf7f77e | 2600 | scrub_continue: |
3e738c53 IA |
2601 | btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); |
2602 | btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); | |
2ff7e61e | 2603 | btrfs_scrub_continue(fs_info); |
49b25e05 | 2604 | cleanup_transaction: |
dc60c525 | 2605 | btrfs_trans_release_metadata(trans); |
c7cc64a9 | 2606 | btrfs_cleanup_pending_block_groups(trans); |
4fbcdf66 | 2607 | btrfs_trans_release_chunk_metadata(trans); |
0e721106 | 2608 | trans->block_rsv = NULL; |
0b246afa | 2609 | btrfs_warn(fs_info, "Skipping commit of aborted transaction."); |
49b25e05 JM |
2610 | if (current->journal_info == trans) |
2611 | current->journal_info = NULL; | |
97cb39bb | 2612 | cleanup_transaction(trans, ret); |
49b25e05 JM |
2613 | |
2614 | return ret; | |
e1489b4f IA |
2615 | |
2616 | lockdep_release: | |
5a9ba670 | 2617 | btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); |
e1489b4f IA |
2618 | btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); |
2619 | goto cleanup_transaction; | |
3e738c53 IA |
2620 | |
2621 | lockdep_trans_commit_start_release: | |
77d20c68 | 2622 | btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); |
3e738c53 IA |
2623 | btrfs_end_transaction(trans); |
2624 | return ret; | |
79154b1b CM |
2625 | } |
2626 | ||
d352ac68 | 2627 | /* |
9d1a2a3a DS |
2628 | * return < 0 if error |
2629 | * 0 if there are no more dead_roots at the time of call | |
2630 | * 1 there are more to be processed, call me again | |
2631 | * | |
2632 | * The return value indicates there are certainly more snapshots to delete, but | |
2633 | * if there comes a new one during processing, it may return 0. We don't mind, | |
2634 | * because btrfs_commit_super will poke cleaner thread and it will process it a | |
2635 | * few seconds later. | |
d352ac68 | 2636 | */ |
33c44184 | 2637 | int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info) |
e9d0b13b | 2638 | { |
33c44184 | 2639 | struct btrfs_root *root; |
9d1a2a3a | 2640 | int ret; |
5d4f98a2 | 2641 | |
a4abeea4 | 2642 | spin_lock(&fs_info->trans_lock); |
9d1a2a3a DS |
2643 | if (list_empty(&fs_info->dead_roots)) { |
2644 | spin_unlock(&fs_info->trans_lock); | |
2645 | return 0; | |
2646 | } | |
2647 | root = list_first_entry(&fs_info->dead_roots, | |
2648 | struct btrfs_root, root_list); | |
cfad392b | 2649 | list_del_init(&root->root_list); |
a4abeea4 | 2650 | spin_unlock(&fs_info->trans_lock); |
e9d0b13b | 2651 | |
e094f480 | 2652 | btrfs_debug(fs_info, "cleaner removing %llu", btrfs_root_id(root)); |
76dda93c | 2653 | |
9d1a2a3a | 2654 | btrfs_kill_all_delayed_nodes(root); |
16cdcec7 | 2655 | |
9d1a2a3a DS |
2656 | if (btrfs_header_backref_rev(root->node) < |
2657 | BTRFS_MIXED_BACKREF_REV) | |
0078a9f9 | 2658 | ret = btrfs_drop_snapshot(root, 0, 0); |
9d1a2a3a | 2659 | else |
0078a9f9 | 2660 | ret = btrfs_drop_snapshot(root, 1, 0); |
32471dc2 | 2661 | |
dc9492c1 | 2662 | btrfs_put_root(root); |
6596a928 | 2663 | return (ret < 0) ? 0 : 1; |
e9d0b13b | 2664 | } |
572d9ab7 | 2665 | |
fccf0c84 JB |
2666 | /* |
2667 | * We only mark the transaction aborted and then set the file system read-only. | |
2668 | * This will prevent new transactions from starting or trying to join this | |
2669 | * one. | |
2670 | * | |
2671 | * This means that error recovery at the call site is limited to freeing | |
2672 | * any local memory allocations and passing the error code up without | |
2673 | * further cleanup. The transaction should complete as it normally would | |
2674 | * in the call path but will return -EIO. | |
2675 | * | |
2676 | * We'll complete the cleanup in btrfs_end_transaction and | |
2677 | * btrfs_commit_transaction. | |
2678 | */ | |
2679 | void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans, | |
2680 | const char *function, | |
ed164802 | 2681 | unsigned int line, int error, bool first_hit) |
fccf0c84 JB |
2682 | { |
2683 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
2684 | ||
ed164802 DS |
2685 | WRITE_ONCE(trans->aborted, error); |
2686 | WRITE_ONCE(trans->transaction->aborted, error); | |
2687 | if (first_hit && error == -ENOSPC) | |
fccf0c84 JB |
2688 | btrfs_dump_space_info_for_trans_abort(fs_info); |
2689 | /* Wake up anybody who may be waiting on this transaction */ | |
2690 | wake_up(&fs_info->transaction_wait); | |
2691 | wake_up(&fs_info->transaction_blocked_wait); | |
ed164802 | 2692 | __btrfs_handle_fs_error(fs_info, function, line, error, NULL); |
fccf0c84 JB |
2693 | } |
2694 | ||
956504a3 JB |
2695 | int __init btrfs_transaction_init(void) |
2696 | { | |
ef5a05c5 | 2697 | btrfs_trans_handle_cachep = KMEM_CACHE(btrfs_trans_handle, SLAB_TEMPORARY); |
956504a3 JB |
2698 | if (!btrfs_trans_handle_cachep) |
2699 | return -ENOMEM; | |
2700 | return 0; | |
2701 | } | |
2702 | ||
2703 | void __cold btrfs_transaction_exit(void) | |
2704 | { | |
2705 | kmem_cache_destroy(btrfs_trans_handle_cachep); | |
2706 | } |