Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
dc17ff8f CM |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
dc17ff8f CM |
4 | */ |
5 | ||
dc17ff8f | 6 | #include <linux/slab.h> |
d6bfde87 | 7 | #include <linux/blkdev.h> |
f421950f | 8 | #include <linux/writeback.h> |
a3d46aea | 9 | #include <linux/sched/mm.h> |
dc17ff8f CM |
10 | #include "ctree.h" |
11 | #include "transaction.h" | |
12 | #include "btrfs_inode.h" | |
e6dcd2dc | 13 | #include "extent_io.h" |
199c2a9c | 14 | #include "disk-io.h" |
ebb8765b | 15 | #include "compression.h" |
86736342 | 16 | #include "delalloc-space.h" |
dc17ff8f | 17 | |
6352b91d MX |
18 | static struct kmem_cache *btrfs_ordered_extent_cache; |
19 | ||
e6dcd2dc | 20 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
dc17ff8f | 21 | { |
e6dcd2dc CM |
22 | if (entry->file_offset + entry->len < entry->file_offset) |
23 | return (u64)-1; | |
24 | return entry->file_offset + entry->len; | |
dc17ff8f CM |
25 | } |
26 | ||
d352ac68 CM |
27 | /* returns NULL if the insertion worked, or it returns the node it did find |
28 | * in the tree | |
29 | */ | |
e6dcd2dc CM |
30 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
31 | struct rb_node *node) | |
dc17ff8f | 32 | { |
d397712b CM |
33 | struct rb_node **p = &root->rb_node; |
34 | struct rb_node *parent = NULL; | |
e6dcd2dc | 35 | struct btrfs_ordered_extent *entry; |
dc17ff8f | 36 | |
d397712b | 37 | while (*p) { |
dc17ff8f | 38 | parent = *p; |
e6dcd2dc | 39 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
dc17ff8f | 40 | |
e6dcd2dc | 41 | if (file_offset < entry->file_offset) |
dc17ff8f | 42 | p = &(*p)->rb_left; |
e6dcd2dc | 43 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
44 | p = &(*p)->rb_right; |
45 | else | |
46 | return parent; | |
47 | } | |
48 | ||
49 | rb_link_node(node, parent, p); | |
50 | rb_insert_color(node, root); | |
51 | return NULL; | |
52 | } | |
53 | ||
43c04fb1 JM |
54 | static void ordered_data_tree_panic(struct inode *inode, int errno, |
55 | u64 offset) | |
56 | { | |
57 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | |
5d163e0e JM |
58 | btrfs_panic(fs_info, errno, |
59 | "Inconsistency in ordered tree at offset %llu", offset); | |
43c04fb1 JM |
60 | } |
61 | ||
d352ac68 CM |
62 | /* |
63 | * look for a given offset in the tree, and if it can't be found return the | |
64 | * first lesser offset | |
65 | */ | |
e6dcd2dc CM |
66 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
67 | struct rb_node **prev_ret) | |
dc17ff8f | 68 | { |
d397712b | 69 | struct rb_node *n = root->rb_node; |
dc17ff8f | 70 | struct rb_node *prev = NULL; |
e6dcd2dc CM |
71 | struct rb_node *test; |
72 | struct btrfs_ordered_extent *entry; | |
73 | struct btrfs_ordered_extent *prev_entry = NULL; | |
dc17ff8f | 74 | |
d397712b | 75 | while (n) { |
e6dcd2dc | 76 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
dc17ff8f CM |
77 | prev = n; |
78 | prev_entry = entry; | |
dc17ff8f | 79 | |
e6dcd2dc | 80 | if (file_offset < entry->file_offset) |
dc17ff8f | 81 | n = n->rb_left; |
e6dcd2dc | 82 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
83 | n = n->rb_right; |
84 | else | |
85 | return n; | |
86 | } | |
87 | if (!prev_ret) | |
88 | return NULL; | |
89 | ||
d397712b | 90 | while (prev && file_offset >= entry_end(prev_entry)) { |
e6dcd2dc CM |
91 | test = rb_next(prev); |
92 | if (!test) | |
93 | break; | |
94 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
95 | rb_node); | |
96 | if (file_offset < entry_end(prev_entry)) | |
97 | break; | |
98 | ||
99 | prev = test; | |
100 | } | |
101 | if (prev) | |
102 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, | |
103 | rb_node); | |
d397712b | 104 | while (prev && file_offset < entry_end(prev_entry)) { |
e6dcd2dc CM |
105 | test = rb_prev(prev); |
106 | if (!test) | |
107 | break; | |
108 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
109 | rb_node); | |
110 | prev = test; | |
dc17ff8f CM |
111 | } |
112 | *prev_ret = prev; | |
113 | return NULL; | |
114 | } | |
115 | ||
d352ac68 CM |
116 | /* |
117 | * helper to check if a given offset is inside a given entry | |
118 | */ | |
e6dcd2dc CM |
119 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
120 | { | |
121 | if (file_offset < entry->file_offset || | |
122 | entry->file_offset + entry->len <= file_offset) | |
123 | return 0; | |
124 | return 1; | |
125 | } | |
126 | ||
4b46fce2 JB |
127 | static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, |
128 | u64 len) | |
129 | { | |
130 | if (file_offset + len <= entry->file_offset || | |
131 | entry->file_offset + entry->len <= file_offset) | |
132 | return 0; | |
133 | return 1; | |
134 | } | |
135 | ||
d352ac68 CM |
136 | /* |
137 | * look find the first ordered struct that has this offset, otherwise | |
138 | * the first one less than this offset | |
139 | */ | |
e6dcd2dc CM |
140 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
141 | u64 file_offset) | |
dc17ff8f | 142 | { |
e6dcd2dc | 143 | struct rb_root *root = &tree->tree; |
c87fb6fd | 144 | struct rb_node *prev = NULL; |
dc17ff8f | 145 | struct rb_node *ret; |
e6dcd2dc CM |
146 | struct btrfs_ordered_extent *entry; |
147 | ||
148 | if (tree->last) { | |
149 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, | |
150 | rb_node); | |
151 | if (offset_in_entry(entry, file_offset)) | |
152 | return tree->last; | |
153 | } | |
154 | ret = __tree_search(root, file_offset, &prev); | |
dc17ff8f | 155 | if (!ret) |
e6dcd2dc CM |
156 | ret = prev; |
157 | if (ret) | |
158 | tree->last = ret; | |
dc17ff8f CM |
159 | return ret; |
160 | } | |
161 | ||
eb84ae03 CM |
162 | /* allocate and add a new ordered_extent into the per-inode tree. |
163 | * file_offset is the logical offset in the file | |
164 | * | |
165 | * start is the disk block number of an extent already reserved in the | |
166 | * extent allocation tree | |
167 | * | |
168 | * len is the length of the extent | |
169 | * | |
eb84ae03 CM |
170 | * The tree is given a single reference on the ordered extent that was |
171 | * inserted. | |
172 | */ | |
4b46fce2 JB |
173 | static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
174 | u64 start, u64 len, u64 disk_len, | |
261507a0 | 175 | int type, int dio, int compress_type) |
dc17ff8f | 176 | { |
0b246afa | 177 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
199c2a9c | 178 | struct btrfs_root *root = BTRFS_I(inode)->root; |
dc17ff8f | 179 | struct btrfs_ordered_inode_tree *tree; |
e6dcd2dc CM |
180 | struct rb_node *node; |
181 | struct btrfs_ordered_extent *entry; | |
dc17ff8f | 182 | |
e6dcd2dc | 183 | tree = &BTRFS_I(inode)->ordered_tree; |
6352b91d | 184 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
dc17ff8f CM |
185 | if (!entry) |
186 | return -ENOMEM; | |
187 | ||
e6dcd2dc CM |
188 | entry->file_offset = file_offset; |
189 | entry->start = start; | |
190 | entry->len = len; | |
c8b97818 | 191 | entry->disk_len = disk_len; |
8b62b72b | 192 | entry->bytes_left = len; |
5fd02043 | 193 | entry->inode = igrab(inode); |
261507a0 | 194 | entry->compress_type = compress_type; |
77cef2ec | 195 | entry->truncated_len = (u64)-1; |
d899e052 | 196 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
80ff3856 | 197 | set_bit(type, &entry->flags); |
3eaa2885 | 198 | |
4297ff84 JB |
199 | if (dio) { |
200 | percpu_counter_add_batch(&fs_info->dio_bytes, len, | |
201 | fs_info->delalloc_batch); | |
4b46fce2 | 202 | set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); |
4297ff84 | 203 | } |
4b46fce2 | 204 | |
e6dcd2dc | 205 | /* one ref for the tree */ |
e76edab7 | 206 | refcount_set(&entry->refs, 1); |
e6dcd2dc CM |
207 | init_waitqueue_head(&entry->wait); |
208 | INIT_LIST_HEAD(&entry->list); | |
3eaa2885 | 209 | INIT_LIST_HEAD(&entry->root_extent_list); |
9afab882 MX |
210 | INIT_LIST_HEAD(&entry->work_list); |
211 | init_completion(&entry->completion); | |
2ab28f32 | 212 | INIT_LIST_HEAD(&entry->log_list); |
50d9aa99 | 213 | INIT_LIST_HEAD(&entry->trans_list); |
dc17ff8f | 214 | |
1abe9b8a | 215 | trace_btrfs_ordered_extent_add(inode, entry); |
216 | ||
5fd02043 | 217 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
218 | node = tree_insert(&tree->tree, file_offset, |
219 | &entry->rb_node); | |
43c04fb1 JM |
220 | if (node) |
221 | ordered_data_tree_panic(inode, -EEXIST, file_offset); | |
5fd02043 | 222 | spin_unlock_irq(&tree->lock); |
d397712b | 223 | |
199c2a9c | 224 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 225 | list_add_tail(&entry->root_extent_list, |
199c2a9c MX |
226 | &root->ordered_extents); |
227 | root->nr_ordered_extents++; | |
228 | if (root->nr_ordered_extents == 1) { | |
0b246afa | 229 | spin_lock(&fs_info->ordered_root_lock); |
199c2a9c | 230 | BUG_ON(!list_empty(&root->ordered_root)); |
0b246afa JM |
231 | list_add_tail(&root->ordered_root, &fs_info->ordered_roots); |
232 | spin_unlock(&fs_info->ordered_root_lock); | |
199c2a9c MX |
233 | } |
234 | spin_unlock(&root->ordered_extent_lock); | |
3eaa2885 | 235 | |
8b62f87b JB |
236 | /* |
237 | * We don't need the count_max_extents here, we can assume that all of | |
238 | * that work has been done at higher layers, so this is truly the | |
239 | * smallest the extent is going to get. | |
240 | */ | |
241 | spin_lock(&BTRFS_I(inode)->lock); | |
242 | btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); | |
243 | spin_unlock(&BTRFS_I(inode)->lock); | |
244 | ||
dc17ff8f CM |
245 | return 0; |
246 | } | |
247 | ||
4b46fce2 JB |
248 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
249 | u64 start, u64 len, u64 disk_len, int type) | |
250 | { | |
251 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
252 | disk_len, type, 0, |
253 | BTRFS_COMPRESS_NONE); | |
4b46fce2 JB |
254 | } |
255 | ||
256 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, | |
257 | u64 start, u64 len, u64 disk_len, int type) | |
258 | { | |
259 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
260 | disk_len, type, 1, |
261 | BTRFS_COMPRESS_NONE); | |
262 | } | |
263 | ||
264 | int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, | |
265 | u64 start, u64 len, u64 disk_len, | |
266 | int type, int compress_type) | |
267 | { | |
268 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
269 | disk_len, type, 0, | |
270 | compress_type); | |
4b46fce2 JB |
271 | } |
272 | ||
eb84ae03 CM |
273 | /* |
274 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted | |
3edf7d33 CM |
275 | * when an ordered extent is finished. If the list covers more than one |
276 | * ordered extent, it is split across multiples. | |
eb84ae03 | 277 | */ |
f9756261 | 278 | void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, |
143bede5 | 279 | struct btrfs_ordered_sum *sum) |
dc17ff8f | 280 | { |
e6dcd2dc | 281 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 282 | |
f9756261 | 283 | tree = &BTRFS_I(entry->inode)->ordered_tree; |
5fd02043 | 284 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 285 | list_add_tail(&sum->list, &entry->list); |
5fd02043 | 286 | spin_unlock_irq(&tree->lock); |
dc17ff8f CM |
287 | } |
288 | ||
163cf09c CM |
289 | /* |
290 | * this is used to account for finished IO across a given range | |
291 | * of the file. The IO may span ordered extents. If | |
292 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
293 | * 0. | |
294 | * | |
295 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
296 | * to make sure this function only returns 1 once for a given ordered extent. | |
297 | * | |
298 | * file_offset is updated to one byte past the range that is recorded as | |
299 | * complete. This allows you to walk forward in the file. | |
300 | */ | |
301 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, | |
302 | struct btrfs_ordered_extent **cached, | |
5fd02043 | 303 | u64 *file_offset, u64 io_size, int uptodate) |
163cf09c | 304 | { |
0b246afa | 305 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
163cf09c CM |
306 | struct btrfs_ordered_inode_tree *tree; |
307 | struct rb_node *node; | |
308 | struct btrfs_ordered_extent *entry = NULL; | |
309 | int ret; | |
5fd02043 | 310 | unsigned long flags; |
163cf09c CM |
311 | u64 dec_end; |
312 | u64 dec_start; | |
313 | u64 to_dec; | |
314 | ||
315 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 316 | spin_lock_irqsave(&tree->lock, flags); |
163cf09c CM |
317 | node = tree_search(tree, *file_offset); |
318 | if (!node) { | |
319 | ret = 1; | |
320 | goto out; | |
321 | } | |
322 | ||
323 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
324 | if (!offset_in_entry(entry, *file_offset)) { | |
325 | ret = 1; | |
326 | goto out; | |
327 | } | |
328 | ||
329 | dec_start = max(*file_offset, entry->file_offset); | |
330 | dec_end = min(*file_offset + io_size, entry->file_offset + | |
331 | entry->len); | |
332 | *file_offset = dec_end; | |
333 | if (dec_start > dec_end) { | |
0b246afa JM |
334 | btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu", |
335 | dec_start, dec_end); | |
163cf09c CM |
336 | } |
337 | to_dec = dec_end - dec_start; | |
338 | if (to_dec > entry->bytes_left) { | |
0b246afa JM |
339 | btrfs_crit(fs_info, |
340 | "bad ordered accounting left %llu size %llu", | |
341 | entry->bytes_left, to_dec); | |
163cf09c CM |
342 | } |
343 | entry->bytes_left -= to_dec; | |
5fd02043 JB |
344 | if (!uptodate) |
345 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
346 | ||
af7a6509 | 347 | if (entry->bytes_left == 0) { |
163cf09c | 348 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
093258e6 DS |
349 | /* test_and_set_bit implies a barrier */ |
350 | cond_wake_up_nomb(&entry->wait); | |
af7a6509 | 351 | } else { |
163cf09c | 352 | ret = 1; |
af7a6509 | 353 | } |
163cf09c CM |
354 | out: |
355 | if (!ret && cached && entry) { | |
356 | *cached = entry; | |
e76edab7 | 357 | refcount_inc(&entry->refs); |
163cf09c | 358 | } |
5fd02043 | 359 | spin_unlock_irqrestore(&tree->lock, flags); |
163cf09c CM |
360 | return ret == 0; |
361 | } | |
362 | ||
eb84ae03 CM |
363 | /* |
364 | * this is used to account for finished IO across a given range | |
365 | * of the file. The IO should not span ordered extents. If | |
366 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
367 | * 0. | |
368 | * | |
369 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
370 | * to make sure this function only returns 1 once for a given ordered extent. | |
371 | */ | |
e6dcd2dc | 372 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
5a1a3df1 | 373 | struct btrfs_ordered_extent **cached, |
5fd02043 | 374 | u64 file_offset, u64 io_size, int uptodate) |
dc17ff8f | 375 | { |
e6dcd2dc | 376 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 377 | struct rb_node *node; |
5a1a3df1 | 378 | struct btrfs_ordered_extent *entry = NULL; |
5fd02043 | 379 | unsigned long flags; |
e6dcd2dc CM |
380 | int ret; |
381 | ||
382 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 JB |
383 | spin_lock_irqsave(&tree->lock, flags); |
384 | if (cached && *cached) { | |
385 | entry = *cached; | |
386 | goto have_entry; | |
387 | } | |
388 | ||
e6dcd2dc | 389 | node = tree_search(tree, file_offset); |
dc17ff8f | 390 | if (!node) { |
e6dcd2dc CM |
391 | ret = 1; |
392 | goto out; | |
dc17ff8f CM |
393 | } |
394 | ||
e6dcd2dc | 395 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 396 | have_entry: |
e6dcd2dc CM |
397 | if (!offset_in_entry(entry, file_offset)) { |
398 | ret = 1; | |
399 | goto out; | |
dc17ff8f | 400 | } |
e6dcd2dc | 401 | |
8b62b72b | 402 | if (io_size > entry->bytes_left) { |
efe120a0 FH |
403 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
404 | "bad ordered accounting left %llu size %llu", | |
c1c9ff7c | 405 | entry->bytes_left, io_size); |
8b62b72b CM |
406 | } |
407 | entry->bytes_left -= io_size; | |
5fd02043 JB |
408 | if (!uptodate) |
409 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
410 | ||
af7a6509 | 411 | if (entry->bytes_left == 0) { |
e6dcd2dc | 412 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
093258e6 DS |
413 | /* test_and_set_bit implies a barrier */ |
414 | cond_wake_up_nomb(&entry->wait); | |
af7a6509 | 415 | } else { |
8b62b72b | 416 | ret = 1; |
af7a6509 | 417 | } |
e6dcd2dc | 418 | out: |
5a1a3df1 JB |
419 | if (!ret && cached && entry) { |
420 | *cached = entry; | |
e76edab7 | 421 | refcount_inc(&entry->refs); |
5a1a3df1 | 422 | } |
5fd02043 | 423 | spin_unlock_irqrestore(&tree->lock, flags); |
e6dcd2dc CM |
424 | return ret == 0; |
425 | } | |
dc17ff8f | 426 | |
eb84ae03 CM |
427 | /* |
428 | * used to drop a reference on an ordered extent. This will free | |
429 | * the extent if the last reference is dropped | |
430 | */ | |
143bede5 | 431 | void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
e6dcd2dc | 432 | { |
ba1da2f4 CM |
433 | struct list_head *cur; |
434 | struct btrfs_ordered_sum *sum; | |
435 | ||
1abe9b8a | 436 | trace_btrfs_ordered_extent_put(entry->inode, entry); |
437 | ||
e76edab7 | 438 | if (refcount_dec_and_test(&entry->refs)) { |
61de718f FM |
439 | ASSERT(list_empty(&entry->log_list)); |
440 | ASSERT(list_empty(&entry->trans_list)); | |
441 | ASSERT(list_empty(&entry->root_extent_list)); | |
442 | ASSERT(RB_EMPTY_NODE(&entry->rb_node)); | |
5fd02043 JB |
443 | if (entry->inode) |
444 | btrfs_add_delayed_iput(entry->inode); | |
d397712b | 445 | while (!list_empty(&entry->list)) { |
ba1da2f4 CM |
446 | cur = entry->list.next; |
447 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | |
448 | list_del(&sum->list); | |
a3d46aea | 449 | kvfree(sum); |
ba1da2f4 | 450 | } |
6352b91d | 451 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
ba1da2f4 | 452 | } |
dc17ff8f | 453 | } |
cee36a03 | 454 | |
eb84ae03 CM |
455 | /* |
456 | * remove an ordered extent from the tree. No references are dropped | |
5fd02043 | 457 | * and waiters are woken up. |
eb84ae03 | 458 | */ |
5fd02043 JB |
459 | void btrfs_remove_ordered_extent(struct inode *inode, |
460 | struct btrfs_ordered_extent *entry) | |
cee36a03 | 461 | { |
0b246afa | 462 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
e6dcd2dc | 463 | struct btrfs_ordered_inode_tree *tree; |
8b62f87b JB |
464 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); |
465 | struct btrfs_root *root = btrfs_inode->root; | |
cee36a03 | 466 | struct rb_node *node; |
cee36a03 | 467 | |
8b62f87b JB |
468 | /* This is paired with btrfs_add_ordered_extent. */ |
469 | spin_lock(&btrfs_inode->lock); | |
470 | btrfs_mod_outstanding_extents(btrfs_inode, -1); | |
471 | spin_unlock(&btrfs_inode->lock); | |
472 | if (root != fs_info->tree_root) | |
43b18595 | 473 | btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false); |
8b62f87b | 474 | |
4297ff84 JB |
475 | if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
476 | percpu_counter_add_batch(&fs_info->dio_bytes, -entry->len, | |
477 | fs_info->delalloc_batch); | |
478 | ||
8b62f87b | 479 | tree = &btrfs_inode->ordered_tree; |
5fd02043 | 480 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 481 | node = &entry->rb_node; |
cee36a03 | 482 | rb_erase(node, &tree->tree); |
61de718f | 483 | RB_CLEAR_NODE(node); |
1b8e7e45 FDBM |
484 | if (tree->last == node) |
485 | tree->last = NULL; | |
e6dcd2dc | 486 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
5fd02043 | 487 | spin_unlock_irq(&tree->lock); |
3eaa2885 | 488 | |
199c2a9c | 489 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 490 | list_del_init(&entry->root_extent_list); |
199c2a9c | 491 | root->nr_ordered_extents--; |
5a3f23d5 | 492 | |
1abe9b8a | 493 | trace_btrfs_ordered_extent_remove(inode, entry); |
494 | ||
199c2a9c | 495 | if (!root->nr_ordered_extents) { |
0b246afa | 496 | spin_lock(&fs_info->ordered_root_lock); |
199c2a9c MX |
497 | BUG_ON(list_empty(&root->ordered_root)); |
498 | list_del_init(&root->ordered_root); | |
0b246afa | 499 | spin_unlock(&fs_info->ordered_root_lock); |
199c2a9c MX |
500 | } |
501 | spin_unlock(&root->ordered_extent_lock); | |
e6dcd2dc | 502 | wake_up(&entry->wait); |
cee36a03 CM |
503 | } |
504 | ||
d458b054 | 505 | static void btrfs_run_ordered_extent_work(struct btrfs_work *work) |
9afab882 MX |
506 | { |
507 | struct btrfs_ordered_extent *ordered; | |
508 | ||
509 | ordered = container_of(work, struct btrfs_ordered_extent, flush_work); | |
510 | btrfs_start_ordered_extent(ordered->inode, ordered, 1); | |
511 | complete(&ordered->completion); | |
512 | } | |
513 | ||
d352ac68 CM |
514 | /* |
515 | * wait for all the ordered extents in a root. This is done when balancing | |
516 | * space between drives. | |
517 | */ | |
6374e57a | 518 | u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, |
578def7c | 519 | const u64 range_start, const u64 range_len) |
3eaa2885 | 520 | { |
0b246afa | 521 | struct btrfs_fs_info *fs_info = root->fs_info; |
578def7c FM |
522 | LIST_HEAD(splice); |
523 | LIST_HEAD(skipped); | |
524 | LIST_HEAD(works); | |
9afab882 | 525 | struct btrfs_ordered_extent *ordered, *next; |
6374e57a | 526 | u64 count = 0; |
578def7c | 527 | const u64 range_end = range_start + range_len; |
3eaa2885 | 528 | |
31f3d255 | 529 | mutex_lock(&root->ordered_extent_mutex); |
199c2a9c MX |
530 | spin_lock(&root->ordered_extent_lock); |
531 | list_splice_init(&root->ordered_extents, &splice); | |
b0244199 | 532 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
533 | ordered = list_first_entry(&splice, struct btrfs_ordered_extent, |
534 | root_extent_list); | |
578def7c FM |
535 | |
536 | if (range_end <= ordered->start || | |
537 | ordered->start + ordered->disk_len <= range_start) { | |
538 | list_move_tail(&ordered->root_extent_list, &skipped); | |
539 | cond_resched_lock(&root->ordered_extent_lock); | |
540 | continue; | |
541 | } | |
542 | ||
199c2a9c MX |
543 | list_move_tail(&ordered->root_extent_list, |
544 | &root->ordered_extents); | |
e76edab7 | 545 | refcount_inc(&ordered->refs); |
199c2a9c | 546 | spin_unlock(&root->ordered_extent_lock); |
3eaa2885 | 547 | |
a44903ab | 548 | btrfs_init_work(&ordered->flush_work, |
9e0af237 | 549 | btrfs_flush_delalloc_helper, |
a44903ab | 550 | btrfs_run_ordered_extent_work, NULL, NULL); |
199c2a9c | 551 | list_add_tail(&ordered->work_list, &works); |
0b246afa | 552 | btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); |
3eaa2885 | 553 | |
9afab882 | 554 | cond_resched(); |
199c2a9c | 555 | spin_lock(&root->ordered_extent_lock); |
6374e57a | 556 | if (nr != U64_MAX) |
b0244199 MX |
557 | nr--; |
558 | count++; | |
3eaa2885 | 559 | } |
578def7c | 560 | list_splice_tail(&skipped, &root->ordered_extents); |
b0244199 | 561 | list_splice_tail(&splice, &root->ordered_extents); |
199c2a9c | 562 | spin_unlock(&root->ordered_extent_lock); |
9afab882 MX |
563 | |
564 | list_for_each_entry_safe(ordered, next, &works, work_list) { | |
565 | list_del_init(&ordered->work_list); | |
566 | wait_for_completion(&ordered->completion); | |
9afab882 | 567 | btrfs_put_ordered_extent(ordered); |
9afab882 MX |
568 | cond_resched(); |
569 | } | |
31f3d255 | 570 | mutex_unlock(&root->ordered_extent_mutex); |
b0244199 MX |
571 | |
572 | return count; | |
3eaa2885 CM |
573 | } |
574 | ||
6374e57a CM |
575 | u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, |
576 | const u64 range_start, const u64 range_len) | |
199c2a9c MX |
577 | { |
578 | struct btrfs_root *root; | |
579 | struct list_head splice; | |
6374e57a CM |
580 | u64 total_done = 0; |
581 | u64 done; | |
199c2a9c MX |
582 | |
583 | INIT_LIST_HEAD(&splice); | |
584 | ||
8b9d83cd | 585 | mutex_lock(&fs_info->ordered_operations_mutex); |
199c2a9c MX |
586 | spin_lock(&fs_info->ordered_root_lock); |
587 | list_splice_init(&fs_info->ordered_roots, &splice); | |
b0244199 | 588 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
589 | root = list_first_entry(&splice, struct btrfs_root, |
590 | ordered_root); | |
591 | root = btrfs_grab_fs_root(root); | |
592 | BUG_ON(!root); | |
593 | list_move_tail(&root->ordered_root, | |
594 | &fs_info->ordered_roots); | |
595 | spin_unlock(&fs_info->ordered_root_lock); | |
596 | ||
578def7c FM |
597 | done = btrfs_wait_ordered_extents(root, nr, |
598 | range_start, range_len); | |
199c2a9c | 599 | btrfs_put_fs_root(root); |
f0e9b7d6 | 600 | total_done += done; |
199c2a9c MX |
601 | |
602 | spin_lock(&fs_info->ordered_root_lock); | |
6374e57a | 603 | if (nr != U64_MAX) { |
b0244199 | 604 | nr -= done; |
b0244199 | 605 | } |
199c2a9c | 606 | } |
931aa877 | 607 | list_splice_tail(&splice, &fs_info->ordered_roots); |
199c2a9c | 608 | spin_unlock(&fs_info->ordered_root_lock); |
8b9d83cd | 609 | mutex_unlock(&fs_info->ordered_operations_mutex); |
f0e9b7d6 FM |
610 | |
611 | return total_done; | |
199c2a9c MX |
612 | } |
613 | ||
eb84ae03 CM |
614 | /* |
615 | * Used to start IO or wait for a given ordered extent to finish. | |
616 | * | |
617 | * If wait is one, this effectively waits on page writeback for all the pages | |
618 | * in the extent, and it waits on the io completion code to insert | |
619 | * metadata into the btree corresponding to the extent | |
620 | */ | |
621 | void btrfs_start_ordered_extent(struct inode *inode, | |
622 | struct btrfs_ordered_extent *entry, | |
623 | int wait) | |
e6dcd2dc CM |
624 | { |
625 | u64 start = entry->file_offset; | |
626 | u64 end = start + entry->len - 1; | |
e1b81e67 | 627 | |
1abe9b8a | 628 | trace_btrfs_ordered_extent_start(inode, entry); |
629 | ||
eb84ae03 CM |
630 | /* |
631 | * pages in the range can be dirty, clean or writeback. We | |
632 | * start IO on any dirty ones so the wait doesn't stall waiting | |
b2570314 | 633 | * for the flusher thread to find them |
eb84ae03 | 634 | */ |
4b46fce2 JB |
635 | if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
636 | filemap_fdatawrite_range(inode->i_mapping, start, end); | |
c8b97818 | 637 | if (wait) { |
e6dcd2dc CM |
638 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
639 | &entry->flags)); | |
c8b97818 | 640 | } |
e6dcd2dc | 641 | } |
cee36a03 | 642 | |
eb84ae03 CM |
643 | /* |
644 | * Used to wait on ordered extents across a large range of bytes. | |
645 | */ | |
0ef8b726 | 646 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
e6dcd2dc | 647 | { |
0ef8b726 | 648 | int ret = 0; |
28aeeac1 | 649 | int ret_wb = 0; |
e6dcd2dc | 650 | u64 end; |
e5a2217e | 651 | u64 orig_end; |
e6dcd2dc | 652 | struct btrfs_ordered_extent *ordered; |
e5a2217e CM |
653 | |
654 | if (start + len < start) { | |
f421950f | 655 | orig_end = INT_LIMIT(loff_t); |
e5a2217e CM |
656 | } else { |
657 | orig_end = start + len - 1; | |
f421950f CM |
658 | if (orig_end > INT_LIMIT(loff_t)) |
659 | orig_end = INT_LIMIT(loff_t); | |
e5a2217e | 660 | } |
551ebb2d | 661 | |
e5a2217e CM |
662 | /* start IO across the range first to instantiate any delalloc |
663 | * extents | |
664 | */ | |
728404da | 665 | ret = btrfs_fdatawrite_range(inode, start, orig_end); |
0ef8b726 JB |
666 | if (ret) |
667 | return ret; | |
728404da | 668 | |
28aeeac1 FM |
669 | /* |
670 | * If we have a writeback error don't return immediately. Wait first | |
671 | * for any ordered extents that haven't completed yet. This is to make | |
672 | * sure no one can dirty the same page ranges and call writepages() | |
673 | * before the ordered extents complete - to avoid failures (-EEXIST) | |
674 | * when adding the new ordered extents to the ordered tree. | |
675 | */ | |
676 | ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); | |
e5a2217e | 677 | |
f421950f | 678 | end = orig_end; |
d397712b | 679 | while (1) { |
e6dcd2dc | 680 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
d397712b | 681 | if (!ordered) |
e6dcd2dc | 682 | break; |
e5a2217e | 683 | if (ordered->file_offset > orig_end) { |
e6dcd2dc CM |
684 | btrfs_put_ordered_extent(ordered); |
685 | break; | |
686 | } | |
b52abf1e | 687 | if (ordered->file_offset + ordered->len <= start) { |
e6dcd2dc CM |
688 | btrfs_put_ordered_extent(ordered); |
689 | break; | |
690 | } | |
e5a2217e | 691 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc | 692 | end = ordered->file_offset; |
0ef8b726 JB |
693 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) |
694 | ret = -EIO; | |
e6dcd2dc | 695 | btrfs_put_ordered_extent(ordered); |
0ef8b726 | 696 | if (ret || end == 0 || end == start) |
e6dcd2dc CM |
697 | break; |
698 | end--; | |
699 | } | |
28aeeac1 | 700 | return ret_wb ? ret_wb : ret; |
cee36a03 CM |
701 | } |
702 | ||
eb84ae03 CM |
703 | /* |
704 | * find an ordered extent corresponding to file_offset. return NULL if | |
705 | * nothing is found, otherwise take a reference on the extent and return it | |
706 | */ | |
e6dcd2dc CM |
707 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
708 | u64 file_offset) | |
709 | { | |
710 | struct btrfs_ordered_inode_tree *tree; | |
711 | struct rb_node *node; | |
712 | struct btrfs_ordered_extent *entry = NULL; | |
713 | ||
714 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 715 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
716 | node = tree_search(tree, file_offset); |
717 | if (!node) | |
718 | goto out; | |
719 | ||
720 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
721 | if (!offset_in_entry(entry, file_offset)) | |
722 | entry = NULL; | |
723 | if (entry) | |
e76edab7 | 724 | refcount_inc(&entry->refs); |
e6dcd2dc | 725 | out: |
5fd02043 | 726 | spin_unlock_irq(&tree->lock); |
e6dcd2dc CM |
727 | return entry; |
728 | } | |
729 | ||
4b46fce2 JB |
730 | /* Since the DIO code tries to lock a wide area we need to look for any ordered |
731 | * extents that exist in the range, rather than just the start of the range. | |
732 | */ | |
a776c6fa NB |
733 | struct btrfs_ordered_extent *btrfs_lookup_ordered_range( |
734 | struct btrfs_inode *inode, u64 file_offset, u64 len) | |
4b46fce2 JB |
735 | { |
736 | struct btrfs_ordered_inode_tree *tree; | |
737 | struct rb_node *node; | |
738 | struct btrfs_ordered_extent *entry = NULL; | |
739 | ||
a776c6fa | 740 | tree = &inode->ordered_tree; |
5fd02043 | 741 | spin_lock_irq(&tree->lock); |
4b46fce2 JB |
742 | node = tree_search(tree, file_offset); |
743 | if (!node) { | |
744 | node = tree_search(tree, file_offset + len); | |
745 | if (!node) | |
746 | goto out; | |
747 | } | |
748 | ||
749 | while (1) { | |
750 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
751 | if (range_overlaps(entry, file_offset, len)) | |
752 | break; | |
753 | ||
754 | if (entry->file_offset >= file_offset + len) { | |
755 | entry = NULL; | |
756 | break; | |
757 | } | |
758 | entry = NULL; | |
759 | node = rb_next(node); | |
760 | if (!node) | |
761 | break; | |
762 | } | |
763 | out: | |
764 | if (entry) | |
e76edab7 | 765 | refcount_inc(&entry->refs); |
5fd02043 | 766 | spin_unlock_irq(&tree->lock); |
4b46fce2 JB |
767 | return entry; |
768 | } | |
769 | ||
eb84ae03 CM |
770 | /* |
771 | * lookup and return any extent before 'file_offset'. NULL is returned | |
772 | * if none is found | |
773 | */ | |
e6dcd2dc | 774 | struct btrfs_ordered_extent * |
d397712b | 775 | btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) |
e6dcd2dc CM |
776 | { |
777 | struct btrfs_ordered_inode_tree *tree; | |
778 | struct rb_node *node; | |
779 | struct btrfs_ordered_extent *entry = NULL; | |
780 | ||
781 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 782 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
783 | node = tree_search(tree, file_offset); |
784 | if (!node) | |
785 | goto out; | |
786 | ||
787 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
e76edab7 | 788 | refcount_inc(&entry->refs); |
e6dcd2dc | 789 | out: |
5fd02043 | 790 | spin_unlock_irq(&tree->lock); |
e6dcd2dc | 791 | return entry; |
81d7ed29 | 792 | } |
dbe674a9 | 793 | |
eb84ae03 CM |
794 | /* |
795 | * After an extent is done, call this to conditionally update the on disk | |
796 | * i_size. i_size is updated to cover any fully written part of the file. | |
797 | */ | |
c2167754 | 798 | int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
dbe674a9 CM |
799 | struct btrfs_ordered_extent *ordered) |
800 | { | |
801 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
dbe674a9 CM |
802 | u64 disk_i_size; |
803 | u64 new_i_size; | |
c2167754 | 804 | u64 i_size = i_size_read(inode); |
dbe674a9 | 805 | struct rb_node *node; |
c2167754 | 806 | struct rb_node *prev = NULL; |
dbe674a9 | 807 | struct btrfs_ordered_extent *test; |
c2167754 | 808 | int ret = 1; |
c0d2f610 | 809 | u64 orig_offset = offset; |
c2167754 | 810 | |
77cef2ec JB |
811 | spin_lock_irq(&tree->lock); |
812 | if (ordered) { | |
c2167754 | 813 | offset = entry_end(ordered); |
77cef2ec JB |
814 | if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) |
815 | offset = min(offset, | |
816 | ordered->file_offset + | |
817 | ordered->truncated_len); | |
818 | } else { | |
da17066c | 819 | offset = ALIGN(offset, btrfs_inode_sectorsize(inode)); |
77cef2ec | 820 | } |
dbe674a9 CM |
821 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
822 | ||
19fd2df5 LB |
823 | /* |
824 | * truncate file. | |
825 | * If ordered is not NULL, then this is called from endio and | |
826 | * disk_i_size will be updated by either truncate itself or any | |
827 | * in-flight IOs which are inside the disk_i_size. | |
828 | * | |
829 | * Because btrfs_setsize() may set i_size with disk_i_size if truncate | |
830 | * fails somehow, we need to make sure we have a precise disk_i_size by | |
831 | * updating it as usual. | |
832 | * | |
833 | */ | |
834 | if (!ordered && disk_i_size > i_size) { | |
c0d2f610 | 835 | BTRFS_I(inode)->disk_i_size = orig_offset; |
c2167754 YZ |
836 | ret = 0; |
837 | goto out; | |
838 | } | |
839 | ||
dbe674a9 CM |
840 | /* |
841 | * if the disk i_size is already at the inode->i_size, or | |
842 | * this ordered extent is inside the disk i_size, we're done | |
843 | */ | |
5d1f4020 JB |
844 | if (disk_i_size == i_size) |
845 | goto out; | |
846 | ||
847 | /* | |
848 | * We still need to update disk_i_size if outstanding_isize is greater | |
849 | * than disk_i_size. | |
850 | */ | |
851 | if (offset <= disk_i_size && | |
852 | (!ordered || ordered->outstanding_isize <= disk_i_size)) | |
dbe674a9 | 853 | goto out; |
dbe674a9 | 854 | |
dbe674a9 CM |
855 | /* |
856 | * walk backward from this ordered extent to disk_i_size. | |
857 | * if we find an ordered extent then we can't update disk i_size | |
858 | * yet | |
859 | */ | |
c2167754 YZ |
860 | if (ordered) { |
861 | node = rb_prev(&ordered->rb_node); | |
862 | } else { | |
863 | prev = tree_search(tree, offset); | |
864 | /* | |
865 | * we insert file extents without involving ordered struct, | |
866 | * so there should be no ordered struct cover this offset | |
867 | */ | |
868 | if (prev) { | |
869 | test = rb_entry(prev, struct btrfs_ordered_extent, | |
870 | rb_node); | |
871 | BUG_ON(offset_in_entry(test, offset)); | |
872 | } | |
873 | node = prev; | |
874 | } | |
5fd02043 | 875 | for (; node; node = rb_prev(node)) { |
dbe674a9 | 876 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 877 | |
bb7ab3b9 | 878 | /* We treat this entry as if it doesn't exist */ |
5fd02043 JB |
879 | if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) |
880 | continue; | |
62c821a8 LB |
881 | |
882 | if (entry_end(test) <= disk_i_size) | |
dbe674a9 | 883 | break; |
c2167754 | 884 | if (test->file_offset >= i_size) |
dbe674a9 | 885 | break; |
62c821a8 LB |
886 | |
887 | /* | |
888 | * We don't update disk_i_size now, so record this undealt | |
889 | * i_size. Or we will not know the real i_size. | |
890 | */ | |
891 | if (test->outstanding_isize < offset) | |
892 | test->outstanding_isize = offset; | |
893 | if (ordered && | |
894 | ordered->outstanding_isize > test->outstanding_isize) | |
895 | test->outstanding_isize = ordered->outstanding_isize; | |
896 | goto out; | |
dbe674a9 | 897 | } |
b9a8cc5b | 898 | new_i_size = min_t(u64, offset, i_size); |
dbe674a9 CM |
899 | |
900 | /* | |
b9a8cc5b MX |
901 | * Some ordered extents may completed before the current one, and |
902 | * we hold the real i_size in ->outstanding_isize. | |
dbe674a9 | 903 | */ |
b9a8cc5b MX |
904 | if (ordered && ordered->outstanding_isize > new_i_size) |
905 | new_i_size = min_t(u64, ordered->outstanding_isize, i_size); | |
dbe674a9 | 906 | BTRFS_I(inode)->disk_i_size = new_i_size; |
c2167754 | 907 | ret = 0; |
dbe674a9 | 908 | out: |
c2167754 | 909 | /* |
5fd02043 JB |
910 | * We need to do this because we can't remove ordered extents until |
911 | * after the i_disk_size has been updated and then the inode has been | |
912 | * updated to reflect the change, so we need to tell anybody who finds | |
913 | * this ordered extent that we've already done all the real work, we | |
914 | * just haven't completed all the other work. | |
c2167754 YZ |
915 | */ |
916 | if (ordered) | |
5fd02043 JB |
917 | set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); |
918 | spin_unlock_irq(&tree->lock); | |
c2167754 | 919 | return ret; |
dbe674a9 | 920 | } |
ba1da2f4 | 921 | |
eb84ae03 CM |
922 | /* |
923 | * search the ordered extents for one corresponding to 'offset' and | |
924 | * try to find a checksum. This is used because we allow pages to | |
925 | * be reclaimed before their checksum is actually put into the btree | |
926 | */ | |
d20f7043 | 927 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, |
1e25a2e3 | 928 | u8 *sum, int len) |
ba1da2f4 | 929 | { |
1e25a2e3 | 930 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
ba1da2f4 | 931 | struct btrfs_ordered_sum *ordered_sum; |
ba1da2f4 CM |
932 | struct btrfs_ordered_extent *ordered; |
933 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
3edf7d33 CM |
934 | unsigned long num_sectors; |
935 | unsigned long i; | |
da17066c | 936 | u32 sectorsize = btrfs_inode_sectorsize(inode); |
1e25a2e3 | 937 | const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
e4100d98 | 938 | int index = 0; |
ba1da2f4 CM |
939 | |
940 | ordered = btrfs_lookup_ordered_extent(inode, offset); | |
941 | if (!ordered) | |
e4100d98 | 942 | return 0; |
ba1da2f4 | 943 | |
5fd02043 | 944 | spin_lock_irq(&tree->lock); |
c6e30871 | 945 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
e4100d98 MX |
946 | if (disk_bytenr >= ordered_sum->bytenr && |
947 | disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { | |
948 | i = (disk_bytenr - ordered_sum->bytenr) >> | |
949 | inode->i_sb->s_blocksize_bits; | |
e4100d98 MX |
950 | num_sectors = ordered_sum->len >> |
951 | inode->i_sb->s_blocksize_bits; | |
f51a4a18 | 952 | num_sectors = min_t(int, len - index, num_sectors - i); |
1e25a2e3 JT |
953 | memcpy(sum + index, ordered_sum->sums + i * csum_size, |
954 | num_sectors * csum_size); | |
f51a4a18 | 955 | |
1e25a2e3 | 956 | index += (int)num_sectors * csum_size; |
f51a4a18 MX |
957 | if (index == len) |
958 | goto out; | |
959 | disk_bytenr += num_sectors * sectorsize; | |
ba1da2f4 CM |
960 | } |
961 | } | |
962 | out: | |
5fd02043 | 963 | spin_unlock_irq(&tree->lock); |
89642229 | 964 | btrfs_put_ordered_extent(ordered); |
e4100d98 | 965 | return index; |
ba1da2f4 CM |
966 | } |
967 | ||
ffa87214 NB |
968 | /* |
969 | * btrfs_flush_ordered_range - Lock the passed range and ensures all pending | |
970 | * ordered extents in it are run to completion. | |
971 | * | |
972 | * @tree: IO tree used for locking out other users of the range | |
973 | * @inode: Inode whose ordered tree is to be searched | |
974 | * @start: Beginning of range to flush | |
975 | * @end: Last byte of range to lock | |
976 | * @cached_state: If passed, will return the extent state responsible for the | |
977 | * locked range. It's the caller's responsibility to free the cached state. | |
978 | * | |
979 | * This function always returns with the given range locked, ensuring after it's | |
980 | * called no order extent can be pending. | |
981 | */ | |
982 | void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, | |
983 | struct btrfs_inode *inode, u64 start, | |
984 | u64 end, | |
985 | struct extent_state **cached_state) | |
986 | { | |
987 | struct btrfs_ordered_extent *ordered; | |
a3b46b86 NA |
988 | struct extent_state *cache = NULL; |
989 | struct extent_state **cachedp = &cache; | |
bd80d94e NB |
990 | |
991 | if (cached_state) | |
a3b46b86 | 992 | cachedp = cached_state; |
ffa87214 NB |
993 | |
994 | while (1) { | |
a3b46b86 | 995 | lock_extent_bits(tree, start, end, cachedp); |
ffa87214 NB |
996 | ordered = btrfs_lookup_ordered_range(inode, start, |
997 | end - start + 1); | |
bd80d94e NB |
998 | if (!ordered) { |
999 | /* | |
1000 | * If no external cached_state has been passed then | |
1001 | * decrement the extra ref taken for cachedp since we | |
1002 | * aren't exposing it outside of this function | |
1003 | */ | |
1004 | if (!cached_state) | |
a3b46b86 | 1005 | refcount_dec(&cache->refs); |
ffa87214 | 1006 | break; |
bd80d94e | 1007 | } |
a3b46b86 | 1008 | unlock_extent_cached(tree, start, end, cachedp); |
ffa87214 NB |
1009 | btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); |
1010 | btrfs_put_ordered_extent(ordered); | |
1011 | } | |
1012 | } | |
1013 | ||
6352b91d MX |
1014 | int __init ordered_data_init(void) |
1015 | { | |
1016 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", | |
1017 | sizeof(struct btrfs_ordered_extent), 0, | |
fba4b697 | 1018 | SLAB_MEM_SPREAD, |
6352b91d MX |
1019 | NULL); |
1020 | if (!btrfs_ordered_extent_cache) | |
1021 | return -ENOMEM; | |
25287e0a | 1022 | |
6352b91d MX |
1023 | return 0; |
1024 | } | |
1025 | ||
e67c718b | 1026 | void __cold ordered_data_exit(void) |
6352b91d | 1027 | { |
5598e900 | 1028 | kmem_cache_destroy(btrfs_ordered_extent_cache); |
6352b91d | 1029 | } |