Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
dc17ff8f CM |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
dc17ff8f CM |
4 | */ |
5 | ||
dc17ff8f | 6 | #include <linux/slab.h> |
d6bfde87 | 7 | #include <linux/blkdev.h> |
f421950f | 8 | #include <linux/writeback.h> |
a3d46aea | 9 | #include <linux/sched/mm.h> |
602cbe91 | 10 | #include "misc.h" |
dc17ff8f CM |
11 | #include "ctree.h" |
12 | #include "transaction.h" | |
13 | #include "btrfs_inode.h" | |
e6dcd2dc | 14 | #include "extent_io.h" |
199c2a9c | 15 | #include "disk-io.h" |
ebb8765b | 16 | #include "compression.h" |
86736342 | 17 | #include "delalloc-space.h" |
7dbeaad0 | 18 | #include "qgroup.h" |
dc17ff8f | 19 | |
6352b91d MX |
20 | static struct kmem_cache *btrfs_ordered_extent_cache; |
21 | ||
e6dcd2dc | 22 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
dc17ff8f | 23 | { |
bffe633e | 24 | if (entry->file_offset + entry->num_bytes < entry->file_offset) |
e6dcd2dc | 25 | return (u64)-1; |
bffe633e | 26 | return entry->file_offset + entry->num_bytes; |
dc17ff8f CM |
27 | } |
28 | ||
d352ac68 CM |
29 | /* returns NULL if the insertion worked, or it returns the node it did find |
30 | * in the tree | |
31 | */ | |
e6dcd2dc CM |
32 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
33 | struct rb_node *node) | |
dc17ff8f | 34 | { |
d397712b CM |
35 | struct rb_node **p = &root->rb_node; |
36 | struct rb_node *parent = NULL; | |
e6dcd2dc | 37 | struct btrfs_ordered_extent *entry; |
dc17ff8f | 38 | |
d397712b | 39 | while (*p) { |
dc17ff8f | 40 | parent = *p; |
e6dcd2dc | 41 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
dc17ff8f | 42 | |
e6dcd2dc | 43 | if (file_offset < entry->file_offset) |
dc17ff8f | 44 | p = &(*p)->rb_left; |
e6dcd2dc | 45 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
46 | p = &(*p)->rb_right; |
47 | else | |
48 | return parent; | |
49 | } | |
50 | ||
51 | rb_link_node(node, parent, p); | |
52 | rb_insert_color(node, root); | |
53 | return NULL; | |
54 | } | |
55 | ||
d352ac68 CM |
56 | /* |
57 | * look for a given offset in the tree, and if it can't be found return the | |
58 | * first lesser offset | |
59 | */ | |
e6dcd2dc CM |
60 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
61 | struct rb_node **prev_ret) | |
dc17ff8f | 62 | { |
d397712b | 63 | struct rb_node *n = root->rb_node; |
dc17ff8f | 64 | struct rb_node *prev = NULL; |
e6dcd2dc CM |
65 | struct rb_node *test; |
66 | struct btrfs_ordered_extent *entry; | |
67 | struct btrfs_ordered_extent *prev_entry = NULL; | |
dc17ff8f | 68 | |
d397712b | 69 | while (n) { |
e6dcd2dc | 70 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
dc17ff8f CM |
71 | prev = n; |
72 | prev_entry = entry; | |
dc17ff8f | 73 | |
e6dcd2dc | 74 | if (file_offset < entry->file_offset) |
dc17ff8f | 75 | n = n->rb_left; |
e6dcd2dc | 76 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
77 | n = n->rb_right; |
78 | else | |
79 | return n; | |
80 | } | |
81 | if (!prev_ret) | |
82 | return NULL; | |
83 | ||
d397712b | 84 | while (prev && file_offset >= entry_end(prev_entry)) { |
e6dcd2dc CM |
85 | test = rb_next(prev); |
86 | if (!test) | |
87 | break; | |
88 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
89 | rb_node); | |
90 | if (file_offset < entry_end(prev_entry)) | |
91 | break; | |
92 | ||
93 | prev = test; | |
94 | } | |
95 | if (prev) | |
96 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, | |
97 | rb_node); | |
d397712b | 98 | while (prev && file_offset < entry_end(prev_entry)) { |
e6dcd2dc CM |
99 | test = rb_prev(prev); |
100 | if (!test) | |
101 | break; | |
102 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
103 | rb_node); | |
104 | prev = test; | |
dc17ff8f CM |
105 | } |
106 | *prev_ret = prev; | |
107 | return NULL; | |
108 | } | |
109 | ||
d352ac68 CM |
110 | /* |
111 | * helper to check if a given offset is inside a given entry | |
112 | */ | |
e6dcd2dc CM |
113 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
114 | { | |
115 | if (file_offset < entry->file_offset || | |
bffe633e | 116 | entry->file_offset + entry->num_bytes <= file_offset) |
e6dcd2dc CM |
117 | return 0; |
118 | return 1; | |
119 | } | |
120 | ||
4b46fce2 JB |
121 | static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, |
122 | u64 len) | |
123 | { | |
124 | if (file_offset + len <= entry->file_offset || | |
bffe633e | 125 | entry->file_offset + entry->num_bytes <= file_offset) |
4b46fce2 JB |
126 | return 0; |
127 | return 1; | |
128 | } | |
129 | ||
d352ac68 CM |
130 | /* |
131 | * look find the first ordered struct that has this offset, otherwise | |
132 | * the first one less than this offset | |
133 | */ | |
e6dcd2dc CM |
134 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
135 | u64 file_offset) | |
dc17ff8f | 136 | { |
e6dcd2dc | 137 | struct rb_root *root = &tree->tree; |
c87fb6fd | 138 | struct rb_node *prev = NULL; |
dc17ff8f | 139 | struct rb_node *ret; |
e6dcd2dc CM |
140 | struct btrfs_ordered_extent *entry; |
141 | ||
142 | if (tree->last) { | |
143 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, | |
144 | rb_node); | |
145 | if (offset_in_entry(entry, file_offset)) | |
146 | return tree->last; | |
147 | } | |
148 | ret = __tree_search(root, file_offset, &prev); | |
dc17ff8f | 149 | if (!ret) |
e6dcd2dc CM |
150 | ret = prev; |
151 | if (ret) | |
152 | tree->last = ret; | |
dc17ff8f CM |
153 | return ret; |
154 | } | |
155 | ||
7dbeaad0 QW |
156 | /* |
157 | * Allocate and add a new ordered_extent into the per-inode tree. | |
eb84ae03 | 158 | * |
eb84ae03 CM |
159 | * The tree is given a single reference on the ordered extent that was |
160 | * inserted. | |
161 | */ | |
4b46fce2 | 162 | static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
bffe633e OS |
163 | u64 disk_bytenr, u64 num_bytes, |
164 | u64 disk_num_bytes, int type, int dio, | |
165 | int compress_type) | |
dc17ff8f | 166 | { |
0b246afa | 167 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
199c2a9c | 168 | struct btrfs_root *root = BTRFS_I(inode)->root; |
dc17ff8f | 169 | struct btrfs_ordered_inode_tree *tree; |
e6dcd2dc CM |
170 | struct rb_node *node; |
171 | struct btrfs_ordered_extent *entry; | |
7dbeaad0 QW |
172 | int ret; |
173 | ||
174 | if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) { | |
175 | /* For nocow write, we can release the qgroup rsv right now */ | |
176 | ret = btrfs_qgroup_free_data(inode, NULL, file_offset, | |
177 | num_bytes); | |
178 | if (ret < 0) | |
179 | return ret; | |
180 | ret = 0; | |
181 | } else { | |
182 | /* | |
183 | * The ordered extent has reserved qgroup space, release now | |
184 | * and pass the reserved number for qgroup_record to free. | |
185 | */ | |
186 | ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes); | |
187 | if (ret < 0) | |
188 | return ret; | |
189 | } | |
e6dcd2dc | 190 | tree = &BTRFS_I(inode)->ordered_tree; |
6352b91d | 191 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
dc17ff8f CM |
192 | if (!entry) |
193 | return -ENOMEM; | |
194 | ||
e6dcd2dc | 195 | entry->file_offset = file_offset; |
bffe633e OS |
196 | entry->disk_bytenr = disk_bytenr; |
197 | entry->num_bytes = num_bytes; | |
198 | entry->disk_num_bytes = disk_num_bytes; | |
199 | entry->bytes_left = num_bytes; | |
5fd02043 | 200 | entry->inode = igrab(inode); |
261507a0 | 201 | entry->compress_type = compress_type; |
77cef2ec | 202 | entry->truncated_len = (u64)-1; |
7dbeaad0 | 203 | entry->qgroup_rsv = ret; |
d899e052 | 204 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
80ff3856 | 205 | set_bit(type, &entry->flags); |
3eaa2885 | 206 | |
4297ff84 | 207 | if (dio) { |
bffe633e | 208 | percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes, |
4297ff84 | 209 | fs_info->delalloc_batch); |
4b46fce2 | 210 | set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); |
4297ff84 | 211 | } |
4b46fce2 | 212 | |
e6dcd2dc | 213 | /* one ref for the tree */ |
e76edab7 | 214 | refcount_set(&entry->refs, 1); |
e6dcd2dc CM |
215 | init_waitqueue_head(&entry->wait); |
216 | INIT_LIST_HEAD(&entry->list); | |
3eaa2885 | 217 | INIT_LIST_HEAD(&entry->root_extent_list); |
9afab882 MX |
218 | INIT_LIST_HEAD(&entry->work_list); |
219 | init_completion(&entry->completion); | |
dc17ff8f | 220 | |
1abe9b8a | 221 | trace_btrfs_ordered_extent_add(inode, entry); |
222 | ||
5fd02043 | 223 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
224 | node = tree_insert(&tree->tree, file_offset, |
225 | &entry->rb_node); | |
43c04fb1 | 226 | if (node) |
511a32b5 NB |
227 | btrfs_panic(fs_info, -EEXIST, |
228 | "inconsistency in ordered tree at offset %llu", | |
229 | file_offset); | |
5fd02043 | 230 | spin_unlock_irq(&tree->lock); |
d397712b | 231 | |
199c2a9c | 232 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 233 | list_add_tail(&entry->root_extent_list, |
199c2a9c MX |
234 | &root->ordered_extents); |
235 | root->nr_ordered_extents++; | |
236 | if (root->nr_ordered_extents == 1) { | |
0b246afa | 237 | spin_lock(&fs_info->ordered_root_lock); |
199c2a9c | 238 | BUG_ON(!list_empty(&root->ordered_root)); |
0b246afa JM |
239 | list_add_tail(&root->ordered_root, &fs_info->ordered_roots); |
240 | spin_unlock(&fs_info->ordered_root_lock); | |
199c2a9c MX |
241 | } |
242 | spin_unlock(&root->ordered_extent_lock); | |
3eaa2885 | 243 | |
8b62f87b JB |
244 | /* |
245 | * We don't need the count_max_extents here, we can assume that all of | |
246 | * that work has been done at higher layers, so this is truly the | |
247 | * smallest the extent is going to get. | |
248 | */ | |
249 | spin_lock(&BTRFS_I(inode)->lock); | |
250 | btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); | |
251 | spin_unlock(&BTRFS_I(inode)->lock); | |
252 | ||
dc17ff8f CM |
253 | return 0; |
254 | } | |
255 | ||
4b46fce2 | 256 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
bffe633e OS |
257 | u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes, |
258 | int type) | |
4b46fce2 | 259 | { |
bffe633e OS |
260 | return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr, |
261 | num_bytes, disk_num_bytes, type, 0, | |
261507a0 | 262 | BTRFS_COMPRESS_NONE); |
4b46fce2 JB |
263 | } |
264 | ||
265 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, | |
bffe633e OS |
266 | u64 disk_bytenr, u64 num_bytes, |
267 | u64 disk_num_bytes, int type) | |
4b46fce2 | 268 | { |
bffe633e OS |
269 | return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr, |
270 | num_bytes, disk_num_bytes, type, 1, | |
261507a0 LZ |
271 | BTRFS_COMPRESS_NONE); |
272 | } | |
273 | ||
274 | int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, | |
bffe633e OS |
275 | u64 disk_bytenr, u64 num_bytes, |
276 | u64 disk_num_bytes, int type, | |
277 | int compress_type) | |
261507a0 | 278 | { |
bffe633e OS |
279 | return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr, |
280 | num_bytes, disk_num_bytes, type, 0, | |
261507a0 | 281 | compress_type); |
4b46fce2 JB |
282 | } |
283 | ||
eb84ae03 CM |
284 | /* |
285 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted | |
3edf7d33 CM |
286 | * when an ordered extent is finished. If the list covers more than one |
287 | * ordered extent, it is split across multiples. | |
eb84ae03 | 288 | */ |
f9756261 | 289 | void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, |
143bede5 | 290 | struct btrfs_ordered_sum *sum) |
dc17ff8f | 291 | { |
e6dcd2dc | 292 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 293 | |
f9756261 | 294 | tree = &BTRFS_I(entry->inode)->ordered_tree; |
5fd02043 | 295 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 296 | list_add_tail(&sum->list, &entry->list); |
5fd02043 | 297 | spin_unlock_irq(&tree->lock); |
dc17ff8f CM |
298 | } |
299 | ||
163cf09c CM |
300 | /* |
301 | * this is used to account for finished IO across a given range | |
302 | * of the file. The IO may span ordered extents. If | |
303 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
304 | * 0. | |
305 | * | |
306 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
307 | * to make sure this function only returns 1 once for a given ordered extent. | |
308 | * | |
309 | * file_offset is updated to one byte past the range that is recorded as | |
310 | * complete. This allows you to walk forward in the file. | |
311 | */ | |
312 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, | |
313 | struct btrfs_ordered_extent **cached, | |
5fd02043 | 314 | u64 *file_offset, u64 io_size, int uptodate) |
163cf09c | 315 | { |
0b246afa | 316 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
163cf09c CM |
317 | struct btrfs_ordered_inode_tree *tree; |
318 | struct rb_node *node; | |
319 | struct btrfs_ordered_extent *entry = NULL; | |
320 | int ret; | |
5fd02043 | 321 | unsigned long flags; |
163cf09c CM |
322 | u64 dec_end; |
323 | u64 dec_start; | |
324 | u64 to_dec; | |
325 | ||
326 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 327 | spin_lock_irqsave(&tree->lock, flags); |
163cf09c CM |
328 | node = tree_search(tree, *file_offset); |
329 | if (!node) { | |
330 | ret = 1; | |
331 | goto out; | |
332 | } | |
333 | ||
334 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
335 | if (!offset_in_entry(entry, *file_offset)) { | |
336 | ret = 1; | |
337 | goto out; | |
338 | } | |
339 | ||
340 | dec_start = max(*file_offset, entry->file_offset); | |
bffe633e OS |
341 | dec_end = min(*file_offset + io_size, |
342 | entry->file_offset + entry->num_bytes); | |
163cf09c CM |
343 | *file_offset = dec_end; |
344 | if (dec_start > dec_end) { | |
0b246afa JM |
345 | btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu", |
346 | dec_start, dec_end); | |
163cf09c CM |
347 | } |
348 | to_dec = dec_end - dec_start; | |
349 | if (to_dec > entry->bytes_left) { | |
0b246afa JM |
350 | btrfs_crit(fs_info, |
351 | "bad ordered accounting left %llu size %llu", | |
352 | entry->bytes_left, to_dec); | |
163cf09c CM |
353 | } |
354 | entry->bytes_left -= to_dec; | |
5fd02043 JB |
355 | if (!uptodate) |
356 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
357 | ||
af7a6509 | 358 | if (entry->bytes_left == 0) { |
163cf09c | 359 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
093258e6 DS |
360 | /* test_and_set_bit implies a barrier */ |
361 | cond_wake_up_nomb(&entry->wait); | |
af7a6509 | 362 | } else { |
163cf09c | 363 | ret = 1; |
af7a6509 | 364 | } |
163cf09c CM |
365 | out: |
366 | if (!ret && cached && entry) { | |
367 | *cached = entry; | |
e76edab7 | 368 | refcount_inc(&entry->refs); |
163cf09c | 369 | } |
5fd02043 | 370 | spin_unlock_irqrestore(&tree->lock, flags); |
163cf09c CM |
371 | return ret == 0; |
372 | } | |
373 | ||
eb84ae03 CM |
374 | /* |
375 | * this is used to account for finished IO across a given range | |
376 | * of the file. The IO should not span ordered extents. If | |
377 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
378 | * 0. | |
379 | * | |
380 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
381 | * to make sure this function only returns 1 once for a given ordered extent. | |
382 | */ | |
e6dcd2dc | 383 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
5a1a3df1 | 384 | struct btrfs_ordered_extent **cached, |
5fd02043 | 385 | u64 file_offset, u64 io_size, int uptodate) |
dc17ff8f | 386 | { |
e6dcd2dc | 387 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 388 | struct rb_node *node; |
5a1a3df1 | 389 | struct btrfs_ordered_extent *entry = NULL; |
5fd02043 | 390 | unsigned long flags; |
e6dcd2dc CM |
391 | int ret; |
392 | ||
393 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 JB |
394 | spin_lock_irqsave(&tree->lock, flags); |
395 | if (cached && *cached) { | |
396 | entry = *cached; | |
397 | goto have_entry; | |
398 | } | |
399 | ||
e6dcd2dc | 400 | node = tree_search(tree, file_offset); |
dc17ff8f | 401 | if (!node) { |
e6dcd2dc CM |
402 | ret = 1; |
403 | goto out; | |
dc17ff8f CM |
404 | } |
405 | ||
e6dcd2dc | 406 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 407 | have_entry: |
e6dcd2dc CM |
408 | if (!offset_in_entry(entry, file_offset)) { |
409 | ret = 1; | |
410 | goto out; | |
dc17ff8f | 411 | } |
e6dcd2dc | 412 | |
8b62b72b | 413 | if (io_size > entry->bytes_left) { |
efe120a0 FH |
414 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
415 | "bad ordered accounting left %llu size %llu", | |
c1c9ff7c | 416 | entry->bytes_left, io_size); |
8b62b72b CM |
417 | } |
418 | entry->bytes_left -= io_size; | |
5fd02043 JB |
419 | if (!uptodate) |
420 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
421 | ||
af7a6509 | 422 | if (entry->bytes_left == 0) { |
e6dcd2dc | 423 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
093258e6 DS |
424 | /* test_and_set_bit implies a barrier */ |
425 | cond_wake_up_nomb(&entry->wait); | |
af7a6509 | 426 | } else { |
8b62b72b | 427 | ret = 1; |
af7a6509 | 428 | } |
e6dcd2dc | 429 | out: |
5a1a3df1 JB |
430 | if (!ret && cached && entry) { |
431 | *cached = entry; | |
e76edab7 | 432 | refcount_inc(&entry->refs); |
5a1a3df1 | 433 | } |
5fd02043 | 434 | spin_unlock_irqrestore(&tree->lock, flags); |
e6dcd2dc CM |
435 | return ret == 0; |
436 | } | |
dc17ff8f | 437 | |
eb84ae03 CM |
438 | /* |
439 | * used to drop a reference on an ordered extent. This will free | |
440 | * the extent if the last reference is dropped | |
441 | */ | |
143bede5 | 442 | void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
e6dcd2dc | 443 | { |
ba1da2f4 CM |
444 | struct list_head *cur; |
445 | struct btrfs_ordered_sum *sum; | |
446 | ||
1abe9b8a | 447 | trace_btrfs_ordered_extent_put(entry->inode, entry); |
448 | ||
e76edab7 | 449 | if (refcount_dec_and_test(&entry->refs)) { |
61de718f FM |
450 | ASSERT(list_empty(&entry->root_extent_list)); |
451 | ASSERT(RB_EMPTY_NODE(&entry->rb_node)); | |
5fd02043 JB |
452 | if (entry->inode) |
453 | btrfs_add_delayed_iput(entry->inode); | |
d397712b | 454 | while (!list_empty(&entry->list)) { |
ba1da2f4 CM |
455 | cur = entry->list.next; |
456 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | |
457 | list_del(&sum->list); | |
a3d46aea | 458 | kvfree(sum); |
ba1da2f4 | 459 | } |
6352b91d | 460 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
ba1da2f4 | 461 | } |
dc17ff8f | 462 | } |
cee36a03 | 463 | |
eb84ae03 CM |
464 | /* |
465 | * remove an ordered extent from the tree. No references are dropped | |
5fd02043 | 466 | * and waiters are woken up. |
eb84ae03 | 467 | */ |
5fd02043 JB |
468 | void btrfs_remove_ordered_extent(struct inode *inode, |
469 | struct btrfs_ordered_extent *entry) | |
cee36a03 | 470 | { |
0b246afa | 471 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
e6dcd2dc | 472 | struct btrfs_ordered_inode_tree *tree; |
8b62f87b JB |
473 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); |
474 | struct btrfs_root *root = btrfs_inode->root; | |
cee36a03 | 475 | struct rb_node *node; |
cee36a03 | 476 | |
8b62f87b JB |
477 | /* This is paired with btrfs_add_ordered_extent. */ |
478 | spin_lock(&btrfs_inode->lock); | |
479 | btrfs_mod_outstanding_extents(btrfs_inode, -1); | |
480 | spin_unlock(&btrfs_inode->lock); | |
481 | if (root != fs_info->tree_root) | |
bffe633e OS |
482 | btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes, |
483 | false); | |
8b62f87b | 484 | |
4297ff84 | 485 | if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
bffe633e | 486 | percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes, |
4297ff84 JB |
487 | fs_info->delalloc_batch); |
488 | ||
8b62f87b | 489 | tree = &btrfs_inode->ordered_tree; |
5fd02043 | 490 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 491 | node = &entry->rb_node; |
cee36a03 | 492 | rb_erase(node, &tree->tree); |
61de718f | 493 | RB_CLEAR_NODE(node); |
1b8e7e45 FDBM |
494 | if (tree->last == node) |
495 | tree->last = NULL; | |
e6dcd2dc | 496 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
5fd02043 | 497 | spin_unlock_irq(&tree->lock); |
3eaa2885 | 498 | |
199c2a9c | 499 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 500 | list_del_init(&entry->root_extent_list); |
199c2a9c | 501 | root->nr_ordered_extents--; |
5a3f23d5 | 502 | |
1abe9b8a | 503 | trace_btrfs_ordered_extent_remove(inode, entry); |
504 | ||
199c2a9c | 505 | if (!root->nr_ordered_extents) { |
0b246afa | 506 | spin_lock(&fs_info->ordered_root_lock); |
199c2a9c MX |
507 | BUG_ON(list_empty(&root->ordered_root)); |
508 | list_del_init(&root->ordered_root); | |
0b246afa | 509 | spin_unlock(&fs_info->ordered_root_lock); |
199c2a9c MX |
510 | } |
511 | spin_unlock(&root->ordered_extent_lock); | |
e6dcd2dc | 512 | wake_up(&entry->wait); |
cee36a03 CM |
513 | } |
514 | ||
d458b054 | 515 | static void btrfs_run_ordered_extent_work(struct btrfs_work *work) |
9afab882 MX |
516 | { |
517 | struct btrfs_ordered_extent *ordered; | |
518 | ||
519 | ordered = container_of(work, struct btrfs_ordered_extent, flush_work); | |
520 | btrfs_start_ordered_extent(ordered->inode, ordered, 1); | |
521 | complete(&ordered->completion); | |
522 | } | |
523 | ||
d352ac68 CM |
524 | /* |
525 | * wait for all the ordered extents in a root. This is done when balancing | |
526 | * space between drives. | |
527 | */ | |
6374e57a | 528 | u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, |
578def7c | 529 | const u64 range_start, const u64 range_len) |
3eaa2885 | 530 | { |
0b246afa | 531 | struct btrfs_fs_info *fs_info = root->fs_info; |
578def7c FM |
532 | LIST_HEAD(splice); |
533 | LIST_HEAD(skipped); | |
534 | LIST_HEAD(works); | |
9afab882 | 535 | struct btrfs_ordered_extent *ordered, *next; |
6374e57a | 536 | u64 count = 0; |
578def7c | 537 | const u64 range_end = range_start + range_len; |
3eaa2885 | 538 | |
31f3d255 | 539 | mutex_lock(&root->ordered_extent_mutex); |
199c2a9c MX |
540 | spin_lock(&root->ordered_extent_lock); |
541 | list_splice_init(&root->ordered_extents, &splice); | |
b0244199 | 542 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
543 | ordered = list_first_entry(&splice, struct btrfs_ordered_extent, |
544 | root_extent_list); | |
578def7c | 545 | |
bffe633e OS |
546 | if (range_end <= ordered->disk_bytenr || |
547 | ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) { | |
578def7c FM |
548 | list_move_tail(&ordered->root_extent_list, &skipped); |
549 | cond_resched_lock(&root->ordered_extent_lock); | |
550 | continue; | |
551 | } | |
552 | ||
199c2a9c MX |
553 | list_move_tail(&ordered->root_extent_list, |
554 | &root->ordered_extents); | |
e76edab7 | 555 | refcount_inc(&ordered->refs); |
199c2a9c | 556 | spin_unlock(&root->ordered_extent_lock); |
3eaa2885 | 557 | |
a44903ab QW |
558 | btrfs_init_work(&ordered->flush_work, |
559 | btrfs_run_ordered_extent_work, NULL, NULL); | |
199c2a9c | 560 | list_add_tail(&ordered->work_list, &works); |
0b246afa | 561 | btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); |
3eaa2885 | 562 | |
9afab882 | 563 | cond_resched(); |
199c2a9c | 564 | spin_lock(&root->ordered_extent_lock); |
6374e57a | 565 | if (nr != U64_MAX) |
b0244199 MX |
566 | nr--; |
567 | count++; | |
3eaa2885 | 568 | } |
578def7c | 569 | list_splice_tail(&skipped, &root->ordered_extents); |
b0244199 | 570 | list_splice_tail(&splice, &root->ordered_extents); |
199c2a9c | 571 | spin_unlock(&root->ordered_extent_lock); |
9afab882 MX |
572 | |
573 | list_for_each_entry_safe(ordered, next, &works, work_list) { | |
574 | list_del_init(&ordered->work_list); | |
575 | wait_for_completion(&ordered->completion); | |
9afab882 | 576 | btrfs_put_ordered_extent(ordered); |
9afab882 MX |
577 | cond_resched(); |
578 | } | |
31f3d255 | 579 | mutex_unlock(&root->ordered_extent_mutex); |
b0244199 MX |
580 | |
581 | return count; | |
3eaa2885 CM |
582 | } |
583 | ||
042528f8 | 584 | void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, |
6374e57a | 585 | const u64 range_start, const u64 range_len) |
199c2a9c MX |
586 | { |
587 | struct btrfs_root *root; | |
588 | struct list_head splice; | |
6374e57a | 589 | u64 done; |
199c2a9c MX |
590 | |
591 | INIT_LIST_HEAD(&splice); | |
592 | ||
8b9d83cd | 593 | mutex_lock(&fs_info->ordered_operations_mutex); |
199c2a9c MX |
594 | spin_lock(&fs_info->ordered_root_lock); |
595 | list_splice_init(&fs_info->ordered_roots, &splice); | |
b0244199 | 596 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
597 | root = list_first_entry(&splice, struct btrfs_root, |
598 | ordered_root); | |
00246528 | 599 | root = btrfs_grab_root(root); |
199c2a9c MX |
600 | BUG_ON(!root); |
601 | list_move_tail(&root->ordered_root, | |
602 | &fs_info->ordered_roots); | |
603 | spin_unlock(&fs_info->ordered_root_lock); | |
604 | ||
578def7c FM |
605 | done = btrfs_wait_ordered_extents(root, nr, |
606 | range_start, range_len); | |
00246528 | 607 | btrfs_put_root(root); |
199c2a9c MX |
608 | |
609 | spin_lock(&fs_info->ordered_root_lock); | |
6374e57a | 610 | if (nr != U64_MAX) { |
b0244199 | 611 | nr -= done; |
b0244199 | 612 | } |
199c2a9c | 613 | } |
931aa877 | 614 | list_splice_tail(&splice, &fs_info->ordered_roots); |
199c2a9c | 615 | spin_unlock(&fs_info->ordered_root_lock); |
8b9d83cd | 616 | mutex_unlock(&fs_info->ordered_operations_mutex); |
199c2a9c MX |
617 | } |
618 | ||
eb84ae03 CM |
619 | /* |
620 | * Used to start IO or wait for a given ordered extent to finish. | |
621 | * | |
622 | * If wait is one, this effectively waits on page writeback for all the pages | |
623 | * in the extent, and it waits on the io completion code to insert | |
624 | * metadata into the btree corresponding to the extent | |
625 | */ | |
626 | void btrfs_start_ordered_extent(struct inode *inode, | |
627 | struct btrfs_ordered_extent *entry, | |
628 | int wait) | |
e6dcd2dc CM |
629 | { |
630 | u64 start = entry->file_offset; | |
bffe633e | 631 | u64 end = start + entry->num_bytes - 1; |
e1b81e67 | 632 | |
1abe9b8a | 633 | trace_btrfs_ordered_extent_start(inode, entry); |
634 | ||
eb84ae03 CM |
635 | /* |
636 | * pages in the range can be dirty, clean or writeback. We | |
637 | * start IO on any dirty ones so the wait doesn't stall waiting | |
b2570314 | 638 | * for the flusher thread to find them |
eb84ae03 | 639 | */ |
4b46fce2 JB |
640 | if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
641 | filemap_fdatawrite_range(inode->i_mapping, start, end); | |
c8b97818 | 642 | if (wait) { |
e6dcd2dc CM |
643 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
644 | &entry->flags)); | |
c8b97818 | 645 | } |
e6dcd2dc | 646 | } |
cee36a03 | 647 | |
eb84ae03 CM |
648 | /* |
649 | * Used to wait on ordered extents across a large range of bytes. | |
650 | */ | |
0ef8b726 | 651 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
e6dcd2dc | 652 | { |
0ef8b726 | 653 | int ret = 0; |
28aeeac1 | 654 | int ret_wb = 0; |
e6dcd2dc | 655 | u64 end; |
e5a2217e | 656 | u64 orig_end; |
e6dcd2dc | 657 | struct btrfs_ordered_extent *ordered; |
e5a2217e CM |
658 | |
659 | if (start + len < start) { | |
f421950f | 660 | orig_end = INT_LIMIT(loff_t); |
e5a2217e CM |
661 | } else { |
662 | orig_end = start + len - 1; | |
f421950f CM |
663 | if (orig_end > INT_LIMIT(loff_t)) |
664 | orig_end = INT_LIMIT(loff_t); | |
e5a2217e | 665 | } |
551ebb2d | 666 | |
e5a2217e CM |
667 | /* start IO across the range first to instantiate any delalloc |
668 | * extents | |
669 | */ | |
728404da | 670 | ret = btrfs_fdatawrite_range(inode, start, orig_end); |
0ef8b726 JB |
671 | if (ret) |
672 | return ret; | |
728404da | 673 | |
28aeeac1 FM |
674 | /* |
675 | * If we have a writeback error don't return immediately. Wait first | |
676 | * for any ordered extents that haven't completed yet. This is to make | |
677 | * sure no one can dirty the same page ranges and call writepages() | |
678 | * before the ordered extents complete - to avoid failures (-EEXIST) | |
679 | * when adding the new ordered extents to the ordered tree. | |
680 | */ | |
681 | ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); | |
e5a2217e | 682 | |
f421950f | 683 | end = orig_end; |
d397712b | 684 | while (1) { |
e6dcd2dc | 685 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
d397712b | 686 | if (!ordered) |
e6dcd2dc | 687 | break; |
e5a2217e | 688 | if (ordered->file_offset > orig_end) { |
e6dcd2dc CM |
689 | btrfs_put_ordered_extent(ordered); |
690 | break; | |
691 | } | |
bffe633e | 692 | if (ordered->file_offset + ordered->num_bytes <= start) { |
e6dcd2dc CM |
693 | btrfs_put_ordered_extent(ordered); |
694 | break; | |
695 | } | |
e5a2217e | 696 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc | 697 | end = ordered->file_offset; |
e75fd33b FM |
698 | /* |
699 | * If the ordered extent had an error save the error but don't | |
700 | * exit without waiting first for all other ordered extents in | |
701 | * the range to complete. | |
702 | */ | |
0ef8b726 JB |
703 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) |
704 | ret = -EIO; | |
e6dcd2dc | 705 | btrfs_put_ordered_extent(ordered); |
e75fd33b | 706 | if (end == 0 || end == start) |
e6dcd2dc CM |
707 | break; |
708 | end--; | |
709 | } | |
28aeeac1 | 710 | return ret_wb ? ret_wb : ret; |
cee36a03 CM |
711 | } |
712 | ||
eb84ae03 CM |
713 | /* |
714 | * find an ordered extent corresponding to file_offset. return NULL if | |
715 | * nothing is found, otherwise take a reference on the extent and return it | |
716 | */ | |
e6dcd2dc CM |
717 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
718 | u64 file_offset) | |
719 | { | |
720 | struct btrfs_ordered_inode_tree *tree; | |
721 | struct rb_node *node; | |
722 | struct btrfs_ordered_extent *entry = NULL; | |
723 | ||
724 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 725 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
726 | node = tree_search(tree, file_offset); |
727 | if (!node) | |
728 | goto out; | |
729 | ||
730 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
731 | if (!offset_in_entry(entry, file_offset)) | |
732 | entry = NULL; | |
733 | if (entry) | |
e76edab7 | 734 | refcount_inc(&entry->refs); |
e6dcd2dc | 735 | out: |
5fd02043 | 736 | spin_unlock_irq(&tree->lock); |
e6dcd2dc CM |
737 | return entry; |
738 | } | |
739 | ||
4b46fce2 JB |
740 | /* Since the DIO code tries to lock a wide area we need to look for any ordered |
741 | * extents that exist in the range, rather than just the start of the range. | |
742 | */ | |
a776c6fa NB |
743 | struct btrfs_ordered_extent *btrfs_lookup_ordered_range( |
744 | struct btrfs_inode *inode, u64 file_offset, u64 len) | |
4b46fce2 JB |
745 | { |
746 | struct btrfs_ordered_inode_tree *tree; | |
747 | struct rb_node *node; | |
748 | struct btrfs_ordered_extent *entry = NULL; | |
749 | ||
a776c6fa | 750 | tree = &inode->ordered_tree; |
5fd02043 | 751 | spin_lock_irq(&tree->lock); |
4b46fce2 JB |
752 | node = tree_search(tree, file_offset); |
753 | if (!node) { | |
754 | node = tree_search(tree, file_offset + len); | |
755 | if (!node) | |
756 | goto out; | |
757 | } | |
758 | ||
759 | while (1) { | |
760 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
761 | if (range_overlaps(entry, file_offset, len)) | |
762 | break; | |
763 | ||
764 | if (entry->file_offset >= file_offset + len) { | |
765 | entry = NULL; | |
766 | break; | |
767 | } | |
768 | entry = NULL; | |
769 | node = rb_next(node); | |
770 | if (!node) | |
771 | break; | |
772 | } | |
773 | out: | |
774 | if (entry) | |
e76edab7 | 775 | refcount_inc(&entry->refs); |
5fd02043 | 776 | spin_unlock_irq(&tree->lock); |
4b46fce2 JB |
777 | return entry; |
778 | } | |
779 | ||
eb84ae03 CM |
780 | /* |
781 | * lookup and return any extent before 'file_offset'. NULL is returned | |
782 | * if none is found | |
783 | */ | |
e6dcd2dc | 784 | struct btrfs_ordered_extent * |
d397712b | 785 | btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) |
e6dcd2dc CM |
786 | { |
787 | struct btrfs_ordered_inode_tree *tree; | |
788 | struct rb_node *node; | |
789 | struct btrfs_ordered_extent *entry = NULL; | |
790 | ||
791 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 792 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
793 | node = tree_search(tree, file_offset); |
794 | if (!node) | |
795 | goto out; | |
796 | ||
797 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
e76edab7 | 798 | refcount_inc(&entry->refs); |
e6dcd2dc | 799 | out: |
5fd02043 | 800 | spin_unlock_irq(&tree->lock); |
e6dcd2dc | 801 | return entry; |
81d7ed29 | 802 | } |
dbe674a9 | 803 | |
eb84ae03 CM |
804 | /* |
805 | * search the ordered extents for one corresponding to 'offset' and | |
806 | * try to find a checksum. This is used because we allow pages to | |
807 | * be reclaimed before their checksum is actually put into the btree | |
808 | */ | |
d20f7043 | 809 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, |
1e25a2e3 | 810 | u8 *sum, int len) |
ba1da2f4 | 811 | { |
1e25a2e3 | 812 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
ba1da2f4 | 813 | struct btrfs_ordered_sum *ordered_sum; |
ba1da2f4 CM |
814 | struct btrfs_ordered_extent *ordered; |
815 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
3edf7d33 CM |
816 | unsigned long num_sectors; |
817 | unsigned long i; | |
da17066c | 818 | u32 sectorsize = btrfs_inode_sectorsize(inode); |
1e25a2e3 | 819 | const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
e4100d98 | 820 | int index = 0; |
ba1da2f4 CM |
821 | |
822 | ordered = btrfs_lookup_ordered_extent(inode, offset); | |
823 | if (!ordered) | |
e4100d98 | 824 | return 0; |
ba1da2f4 | 825 | |
5fd02043 | 826 | spin_lock_irq(&tree->lock); |
c6e30871 | 827 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
e4100d98 MX |
828 | if (disk_bytenr >= ordered_sum->bytenr && |
829 | disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { | |
830 | i = (disk_bytenr - ordered_sum->bytenr) >> | |
831 | inode->i_sb->s_blocksize_bits; | |
e4100d98 MX |
832 | num_sectors = ordered_sum->len >> |
833 | inode->i_sb->s_blocksize_bits; | |
f51a4a18 | 834 | num_sectors = min_t(int, len - index, num_sectors - i); |
1e25a2e3 JT |
835 | memcpy(sum + index, ordered_sum->sums + i * csum_size, |
836 | num_sectors * csum_size); | |
f51a4a18 | 837 | |
1e25a2e3 | 838 | index += (int)num_sectors * csum_size; |
f51a4a18 MX |
839 | if (index == len) |
840 | goto out; | |
841 | disk_bytenr += num_sectors * sectorsize; | |
ba1da2f4 CM |
842 | } |
843 | } | |
844 | out: | |
5fd02043 | 845 | spin_unlock_irq(&tree->lock); |
89642229 | 846 | btrfs_put_ordered_extent(ordered); |
e4100d98 | 847 | return index; |
ba1da2f4 CM |
848 | } |
849 | ||
ffa87214 NB |
850 | /* |
851 | * btrfs_flush_ordered_range - Lock the passed range and ensures all pending | |
852 | * ordered extents in it are run to completion. | |
853 | * | |
ffa87214 NB |
854 | * @inode: Inode whose ordered tree is to be searched |
855 | * @start: Beginning of range to flush | |
856 | * @end: Last byte of range to lock | |
857 | * @cached_state: If passed, will return the extent state responsible for the | |
858 | * locked range. It's the caller's responsibility to free the cached state. | |
859 | * | |
860 | * This function always returns with the given range locked, ensuring after it's | |
861 | * called no order extent can be pending. | |
862 | */ | |
b272ae22 | 863 | void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, |
ffa87214 NB |
864 | u64 end, |
865 | struct extent_state **cached_state) | |
866 | { | |
867 | struct btrfs_ordered_extent *ordered; | |
a3b46b86 NA |
868 | struct extent_state *cache = NULL; |
869 | struct extent_state **cachedp = &cache; | |
bd80d94e NB |
870 | |
871 | if (cached_state) | |
a3b46b86 | 872 | cachedp = cached_state; |
ffa87214 NB |
873 | |
874 | while (1) { | |
b272ae22 | 875 | lock_extent_bits(&inode->io_tree, start, end, cachedp); |
ffa87214 NB |
876 | ordered = btrfs_lookup_ordered_range(inode, start, |
877 | end - start + 1); | |
bd80d94e NB |
878 | if (!ordered) { |
879 | /* | |
880 | * If no external cached_state has been passed then | |
881 | * decrement the extra ref taken for cachedp since we | |
882 | * aren't exposing it outside of this function | |
883 | */ | |
884 | if (!cached_state) | |
a3b46b86 | 885 | refcount_dec(&cache->refs); |
ffa87214 | 886 | break; |
bd80d94e | 887 | } |
b272ae22 | 888 | unlock_extent_cached(&inode->io_tree, start, end, cachedp); |
ffa87214 NB |
889 | btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); |
890 | btrfs_put_ordered_extent(ordered); | |
891 | } | |
892 | } | |
893 | ||
6352b91d MX |
894 | int __init ordered_data_init(void) |
895 | { | |
896 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", | |
897 | sizeof(struct btrfs_ordered_extent), 0, | |
fba4b697 | 898 | SLAB_MEM_SPREAD, |
6352b91d MX |
899 | NULL); |
900 | if (!btrfs_ordered_extent_cache) | |
901 | return -ENOMEM; | |
25287e0a | 902 | |
6352b91d MX |
903 | return 0; |
904 | } | |
905 | ||
e67c718b | 906 | void __cold ordered_data_exit(void) |
6352b91d | 907 | { |
5598e900 | 908 | kmem_cache_destroy(btrfs_ordered_extent_cache); |
6352b91d | 909 | } |