Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
dc17ff8f CM |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
dc17ff8f CM |
4 | */ |
5 | ||
dc17ff8f | 6 | #include <linux/slab.h> |
d6bfde87 | 7 | #include <linux/blkdev.h> |
f421950f CM |
8 | #include <linux/writeback.h> |
9 | #include <linux/pagevec.h> | |
dc17ff8f CM |
10 | #include "ctree.h" |
11 | #include "transaction.h" | |
12 | #include "btrfs_inode.h" | |
e6dcd2dc | 13 | #include "extent_io.h" |
199c2a9c | 14 | #include "disk-io.h" |
ebb8765b | 15 | #include "compression.h" |
dc17ff8f | 16 | |
6352b91d MX |
17 | static struct kmem_cache *btrfs_ordered_extent_cache; |
18 | ||
e6dcd2dc | 19 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
dc17ff8f | 20 | { |
e6dcd2dc CM |
21 | if (entry->file_offset + entry->len < entry->file_offset) |
22 | return (u64)-1; | |
23 | return entry->file_offset + entry->len; | |
dc17ff8f CM |
24 | } |
25 | ||
d352ac68 CM |
26 | /* returns NULL if the insertion worked, or it returns the node it did find |
27 | * in the tree | |
28 | */ | |
e6dcd2dc CM |
29 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
30 | struct rb_node *node) | |
dc17ff8f | 31 | { |
d397712b CM |
32 | struct rb_node **p = &root->rb_node; |
33 | struct rb_node *parent = NULL; | |
e6dcd2dc | 34 | struct btrfs_ordered_extent *entry; |
dc17ff8f | 35 | |
d397712b | 36 | while (*p) { |
dc17ff8f | 37 | parent = *p; |
e6dcd2dc | 38 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
dc17ff8f | 39 | |
e6dcd2dc | 40 | if (file_offset < entry->file_offset) |
dc17ff8f | 41 | p = &(*p)->rb_left; |
e6dcd2dc | 42 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
43 | p = &(*p)->rb_right; |
44 | else | |
45 | return parent; | |
46 | } | |
47 | ||
48 | rb_link_node(node, parent, p); | |
49 | rb_insert_color(node, root); | |
50 | return NULL; | |
51 | } | |
52 | ||
43c04fb1 JM |
53 | static void ordered_data_tree_panic(struct inode *inode, int errno, |
54 | u64 offset) | |
55 | { | |
56 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | |
5d163e0e JM |
57 | btrfs_panic(fs_info, errno, |
58 | "Inconsistency in ordered tree at offset %llu", offset); | |
43c04fb1 JM |
59 | } |
60 | ||
d352ac68 CM |
61 | /* |
62 | * look for a given offset in the tree, and if it can't be found return the | |
63 | * first lesser offset | |
64 | */ | |
e6dcd2dc CM |
65 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
66 | struct rb_node **prev_ret) | |
dc17ff8f | 67 | { |
d397712b | 68 | struct rb_node *n = root->rb_node; |
dc17ff8f | 69 | struct rb_node *prev = NULL; |
e6dcd2dc CM |
70 | struct rb_node *test; |
71 | struct btrfs_ordered_extent *entry; | |
72 | struct btrfs_ordered_extent *prev_entry = NULL; | |
dc17ff8f | 73 | |
d397712b | 74 | while (n) { |
e6dcd2dc | 75 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
dc17ff8f CM |
76 | prev = n; |
77 | prev_entry = entry; | |
dc17ff8f | 78 | |
e6dcd2dc | 79 | if (file_offset < entry->file_offset) |
dc17ff8f | 80 | n = n->rb_left; |
e6dcd2dc | 81 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
82 | n = n->rb_right; |
83 | else | |
84 | return n; | |
85 | } | |
86 | if (!prev_ret) | |
87 | return NULL; | |
88 | ||
d397712b | 89 | while (prev && file_offset >= entry_end(prev_entry)) { |
e6dcd2dc CM |
90 | test = rb_next(prev); |
91 | if (!test) | |
92 | break; | |
93 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
94 | rb_node); | |
95 | if (file_offset < entry_end(prev_entry)) | |
96 | break; | |
97 | ||
98 | prev = test; | |
99 | } | |
100 | if (prev) | |
101 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, | |
102 | rb_node); | |
d397712b | 103 | while (prev && file_offset < entry_end(prev_entry)) { |
e6dcd2dc CM |
104 | test = rb_prev(prev); |
105 | if (!test) | |
106 | break; | |
107 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
108 | rb_node); | |
109 | prev = test; | |
dc17ff8f CM |
110 | } |
111 | *prev_ret = prev; | |
112 | return NULL; | |
113 | } | |
114 | ||
d352ac68 CM |
115 | /* |
116 | * helper to check if a given offset is inside a given entry | |
117 | */ | |
e6dcd2dc CM |
118 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
119 | { | |
120 | if (file_offset < entry->file_offset || | |
121 | entry->file_offset + entry->len <= file_offset) | |
122 | return 0; | |
123 | return 1; | |
124 | } | |
125 | ||
4b46fce2 JB |
126 | static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, |
127 | u64 len) | |
128 | { | |
129 | if (file_offset + len <= entry->file_offset || | |
130 | entry->file_offset + entry->len <= file_offset) | |
131 | return 0; | |
132 | return 1; | |
133 | } | |
134 | ||
d352ac68 CM |
135 | /* |
136 | * look find the first ordered struct that has this offset, otherwise | |
137 | * the first one less than this offset | |
138 | */ | |
e6dcd2dc CM |
139 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
140 | u64 file_offset) | |
dc17ff8f | 141 | { |
e6dcd2dc | 142 | struct rb_root *root = &tree->tree; |
c87fb6fd | 143 | struct rb_node *prev = NULL; |
dc17ff8f | 144 | struct rb_node *ret; |
e6dcd2dc CM |
145 | struct btrfs_ordered_extent *entry; |
146 | ||
147 | if (tree->last) { | |
148 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, | |
149 | rb_node); | |
150 | if (offset_in_entry(entry, file_offset)) | |
151 | return tree->last; | |
152 | } | |
153 | ret = __tree_search(root, file_offset, &prev); | |
dc17ff8f | 154 | if (!ret) |
e6dcd2dc CM |
155 | ret = prev; |
156 | if (ret) | |
157 | tree->last = ret; | |
dc17ff8f CM |
158 | return ret; |
159 | } | |
160 | ||
eb84ae03 CM |
161 | /* allocate and add a new ordered_extent into the per-inode tree. |
162 | * file_offset is the logical offset in the file | |
163 | * | |
164 | * start is the disk block number of an extent already reserved in the | |
165 | * extent allocation tree | |
166 | * | |
167 | * len is the length of the extent | |
168 | * | |
eb84ae03 CM |
169 | * The tree is given a single reference on the ordered extent that was |
170 | * inserted. | |
171 | */ | |
4b46fce2 JB |
172 | static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
173 | u64 start, u64 len, u64 disk_len, | |
261507a0 | 174 | int type, int dio, int compress_type) |
dc17ff8f | 175 | { |
0b246afa | 176 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
199c2a9c | 177 | struct btrfs_root *root = BTRFS_I(inode)->root; |
dc17ff8f | 178 | struct btrfs_ordered_inode_tree *tree; |
e6dcd2dc CM |
179 | struct rb_node *node; |
180 | struct btrfs_ordered_extent *entry; | |
dc17ff8f | 181 | |
e6dcd2dc | 182 | tree = &BTRFS_I(inode)->ordered_tree; |
6352b91d | 183 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
dc17ff8f CM |
184 | if (!entry) |
185 | return -ENOMEM; | |
186 | ||
e6dcd2dc CM |
187 | entry->file_offset = file_offset; |
188 | entry->start = start; | |
189 | entry->len = len; | |
c8b97818 | 190 | entry->disk_len = disk_len; |
8b62b72b | 191 | entry->bytes_left = len; |
5fd02043 | 192 | entry->inode = igrab(inode); |
261507a0 | 193 | entry->compress_type = compress_type; |
77cef2ec | 194 | entry->truncated_len = (u64)-1; |
d899e052 | 195 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
80ff3856 | 196 | set_bit(type, &entry->flags); |
3eaa2885 | 197 | |
4b46fce2 JB |
198 | if (dio) |
199 | set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); | |
200 | ||
e6dcd2dc | 201 | /* one ref for the tree */ |
e76edab7 | 202 | refcount_set(&entry->refs, 1); |
e6dcd2dc CM |
203 | init_waitqueue_head(&entry->wait); |
204 | INIT_LIST_HEAD(&entry->list); | |
3eaa2885 | 205 | INIT_LIST_HEAD(&entry->root_extent_list); |
9afab882 MX |
206 | INIT_LIST_HEAD(&entry->work_list); |
207 | init_completion(&entry->completion); | |
2ab28f32 | 208 | INIT_LIST_HEAD(&entry->log_list); |
50d9aa99 | 209 | INIT_LIST_HEAD(&entry->trans_list); |
dc17ff8f | 210 | |
1abe9b8a | 211 | trace_btrfs_ordered_extent_add(inode, entry); |
212 | ||
5fd02043 | 213 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
214 | node = tree_insert(&tree->tree, file_offset, |
215 | &entry->rb_node); | |
43c04fb1 JM |
216 | if (node) |
217 | ordered_data_tree_panic(inode, -EEXIST, file_offset); | |
5fd02043 | 218 | spin_unlock_irq(&tree->lock); |
d397712b | 219 | |
199c2a9c | 220 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 221 | list_add_tail(&entry->root_extent_list, |
199c2a9c MX |
222 | &root->ordered_extents); |
223 | root->nr_ordered_extents++; | |
224 | if (root->nr_ordered_extents == 1) { | |
0b246afa | 225 | spin_lock(&fs_info->ordered_root_lock); |
199c2a9c | 226 | BUG_ON(!list_empty(&root->ordered_root)); |
0b246afa JM |
227 | list_add_tail(&root->ordered_root, &fs_info->ordered_roots); |
228 | spin_unlock(&fs_info->ordered_root_lock); | |
199c2a9c MX |
229 | } |
230 | spin_unlock(&root->ordered_extent_lock); | |
3eaa2885 | 231 | |
8b62f87b JB |
232 | /* |
233 | * We don't need the count_max_extents here, we can assume that all of | |
234 | * that work has been done at higher layers, so this is truly the | |
235 | * smallest the extent is going to get. | |
236 | */ | |
237 | spin_lock(&BTRFS_I(inode)->lock); | |
238 | btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); | |
239 | spin_unlock(&BTRFS_I(inode)->lock); | |
240 | ||
dc17ff8f CM |
241 | return 0; |
242 | } | |
243 | ||
4b46fce2 JB |
244 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
245 | u64 start, u64 len, u64 disk_len, int type) | |
246 | { | |
247 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
248 | disk_len, type, 0, |
249 | BTRFS_COMPRESS_NONE); | |
4b46fce2 JB |
250 | } |
251 | ||
252 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, | |
253 | u64 start, u64 len, u64 disk_len, int type) | |
254 | { | |
255 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
256 | disk_len, type, 1, |
257 | BTRFS_COMPRESS_NONE); | |
258 | } | |
259 | ||
260 | int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, | |
261 | u64 start, u64 len, u64 disk_len, | |
262 | int type, int compress_type) | |
263 | { | |
264 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
265 | disk_len, type, 0, | |
266 | compress_type); | |
4b46fce2 JB |
267 | } |
268 | ||
eb84ae03 CM |
269 | /* |
270 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted | |
3edf7d33 CM |
271 | * when an ordered extent is finished. If the list covers more than one |
272 | * ordered extent, it is split across multiples. | |
eb84ae03 | 273 | */ |
143bede5 JM |
274 | void btrfs_add_ordered_sum(struct inode *inode, |
275 | struct btrfs_ordered_extent *entry, | |
276 | struct btrfs_ordered_sum *sum) | |
dc17ff8f | 277 | { |
e6dcd2dc | 278 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 279 | |
e6dcd2dc | 280 | tree = &BTRFS_I(inode)->ordered_tree; |
5fd02043 | 281 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 282 | list_add_tail(&sum->list, &entry->list); |
5fd02043 | 283 | spin_unlock_irq(&tree->lock); |
dc17ff8f CM |
284 | } |
285 | ||
163cf09c CM |
286 | /* |
287 | * this is used to account for finished IO across a given range | |
288 | * of the file. The IO may span ordered extents. If | |
289 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
290 | * 0. | |
291 | * | |
292 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
293 | * to make sure this function only returns 1 once for a given ordered extent. | |
294 | * | |
295 | * file_offset is updated to one byte past the range that is recorded as | |
296 | * complete. This allows you to walk forward in the file. | |
297 | */ | |
298 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, | |
299 | struct btrfs_ordered_extent **cached, | |
5fd02043 | 300 | u64 *file_offset, u64 io_size, int uptodate) |
163cf09c | 301 | { |
0b246afa | 302 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
163cf09c CM |
303 | struct btrfs_ordered_inode_tree *tree; |
304 | struct rb_node *node; | |
305 | struct btrfs_ordered_extent *entry = NULL; | |
306 | int ret; | |
5fd02043 | 307 | unsigned long flags; |
163cf09c CM |
308 | u64 dec_end; |
309 | u64 dec_start; | |
310 | u64 to_dec; | |
311 | ||
312 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 313 | spin_lock_irqsave(&tree->lock, flags); |
163cf09c CM |
314 | node = tree_search(tree, *file_offset); |
315 | if (!node) { | |
316 | ret = 1; | |
317 | goto out; | |
318 | } | |
319 | ||
320 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
321 | if (!offset_in_entry(entry, *file_offset)) { | |
322 | ret = 1; | |
323 | goto out; | |
324 | } | |
325 | ||
326 | dec_start = max(*file_offset, entry->file_offset); | |
327 | dec_end = min(*file_offset + io_size, entry->file_offset + | |
328 | entry->len); | |
329 | *file_offset = dec_end; | |
330 | if (dec_start > dec_end) { | |
0b246afa JM |
331 | btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu", |
332 | dec_start, dec_end); | |
163cf09c CM |
333 | } |
334 | to_dec = dec_end - dec_start; | |
335 | if (to_dec > entry->bytes_left) { | |
0b246afa JM |
336 | btrfs_crit(fs_info, |
337 | "bad ordered accounting left %llu size %llu", | |
338 | entry->bytes_left, to_dec); | |
163cf09c CM |
339 | } |
340 | entry->bytes_left -= to_dec; | |
5fd02043 JB |
341 | if (!uptodate) |
342 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
343 | ||
af7a6509 | 344 | if (entry->bytes_left == 0) { |
163cf09c | 345 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
a83342aa DS |
346 | /* |
347 | * Implicit memory barrier after test_and_set_bit | |
348 | */ | |
af7a6509 MX |
349 | if (waitqueue_active(&entry->wait)) |
350 | wake_up(&entry->wait); | |
351 | } else { | |
163cf09c | 352 | ret = 1; |
af7a6509 | 353 | } |
163cf09c CM |
354 | out: |
355 | if (!ret && cached && entry) { | |
356 | *cached = entry; | |
e76edab7 | 357 | refcount_inc(&entry->refs); |
163cf09c | 358 | } |
5fd02043 | 359 | spin_unlock_irqrestore(&tree->lock, flags); |
163cf09c CM |
360 | return ret == 0; |
361 | } | |
362 | ||
eb84ae03 CM |
363 | /* |
364 | * this is used to account for finished IO across a given range | |
365 | * of the file. The IO should not span ordered extents. If | |
366 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
367 | * 0. | |
368 | * | |
369 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
370 | * to make sure this function only returns 1 once for a given ordered extent. | |
371 | */ | |
e6dcd2dc | 372 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
5a1a3df1 | 373 | struct btrfs_ordered_extent **cached, |
5fd02043 | 374 | u64 file_offset, u64 io_size, int uptodate) |
dc17ff8f | 375 | { |
e6dcd2dc | 376 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 377 | struct rb_node *node; |
5a1a3df1 | 378 | struct btrfs_ordered_extent *entry = NULL; |
5fd02043 | 379 | unsigned long flags; |
e6dcd2dc CM |
380 | int ret; |
381 | ||
382 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 JB |
383 | spin_lock_irqsave(&tree->lock, flags); |
384 | if (cached && *cached) { | |
385 | entry = *cached; | |
386 | goto have_entry; | |
387 | } | |
388 | ||
e6dcd2dc | 389 | node = tree_search(tree, file_offset); |
dc17ff8f | 390 | if (!node) { |
e6dcd2dc CM |
391 | ret = 1; |
392 | goto out; | |
dc17ff8f CM |
393 | } |
394 | ||
e6dcd2dc | 395 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 396 | have_entry: |
e6dcd2dc CM |
397 | if (!offset_in_entry(entry, file_offset)) { |
398 | ret = 1; | |
399 | goto out; | |
dc17ff8f | 400 | } |
e6dcd2dc | 401 | |
8b62b72b | 402 | if (io_size > entry->bytes_left) { |
efe120a0 FH |
403 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
404 | "bad ordered accounting left %llu size %llu", | |
c1c9ff7c | 405 | entry->bytes_left, io_size); |
8b62b72b CM |
406 | } |
407 | entry->bytes_left -= io_size; | |
5fd02043 JB |
408 | if (!uptodate) |
409 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
410 | ||
af7a6509 | 411 | if (entry->bytes_left == 0) { |
e6dcd2dc | 412 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
a83342aa DS |
413 | /* |
414 | * Implicit memory barrier after test_and_set_bit | |
415 | */ | |
af7a6509 MX |
416 | if (waitqueue_active(&entry->wait)) |
417 | wake_up(&entry->wait); | |
418 | } else { | |
8b62b72b | 419 | ret = 1; |
af7a6509 | 420 | } |
e6dcd2dc | 421 | out: |
5a1a3df1 JB |
422 | if (!ret && cached && entry) { |
423 | *cached = entry; | |
e76edab7 | 424 | refcount_inc(&entry->refs); |
5a1a3df1 | 425 | } |
5fd02043 | 426 | spin_unlock_irqrestore(&tree->lock, flags); |
e6dcd2dc CM |
427 | return ret == 0; |
428 | } | |
dc17ff8f | 429 | |
2ab28f32 | 430 | /* Needs to either be called under a log transaction or the log_mutex */ |
22346637 | 431 | void btrfs_get_logged_extents(struct btrfs_inode *inode, |
0870295b FM |
432 | struct list_head *logged_list, |
433 | const loff_t start, | |
434 | const loff_t end) | |
2ab28f32 JB |
435 | { |
436 | struct btrfs_ordered_inode_tree *tree; | |
437 | struct btrfs_ordered_extent *ordered; | |
438 | struct rb_node *n; | |
0870295b | 439 | struct rb_node *prev; |
2ab28f32 | 440 | |
22346637 | 441 | tree = &inode->ordered_tree; |
2ab28f32 | 442 | spin_lock_irq(&tree->lock); |
0870295b FM |
443 | n = __tree_search(&tree->tree, end, &prev); |
444 | if (!n) | |
445 | n = prev; | |
446 | for (; n; n = rb_prev(n)) { | |
2ab28f32 | 447 | ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
0870295b FM |
448 | if (ordered->file_offset > end) |
449 | continue; | |
450 | if (entry_end(ordered) <= start) | |
451 | break; | |
4d884fce | 452 | if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) |
50d9aa99 | 453 | continue; |
0870295b | 454 | list_add(&ordered->log_list, logged_list); |
e76edab7 | 455 | refcount_inc(&ordered->refs); |
2ab28f32 JB |
456 | } |
457 | spin_unlock_irq(&tree->lock); | |
458 | } | |
459 | ||
827463c4 MX |
460 | void btrfs_put_logged_extents(struct list_head *logged_list) |
461 | { | |
462 | struct btrfs_ordered_extent *ordered; | |
463 | ||
464 | while (!list_empty(logged_list)) { | |
465 | ordered = list_first_entry(logged_list, | |
466 | struct btrfs_ordered_extent, | |
467 | log_list); | |
468 | list_del_init(&ordered->log_list); | |
469 | btrfs_put_ordered_extent(ordered); | |
470 | } | |
471 | } | |
472 | ||
473 | void btrfs_submit_logged_extents(struct list_head *logged_list, | |
474 | struct btrfs_root *log) | |
475 | { | |
476 | int index = log->log_transid % 2; | |
477 | ||
478 | spin_lock_irq(&log->log_extents_lock[index]); | |
479 | list_splice_tail(logged_list, &log->logged_list[index]); | |
480 | spin_unlock_irq(&log->log_extents_lock[index]); | |
481 | } | |
482 | ||
50d9aa99 JB |
483 | void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans, |
484 | struct btrfs_root *log, u64 transid) | |
2ab28f32 JB |
485 | { |
486 | struct btrfs_ordered_extent *ordered; | |
487 | int index = transid % 2; | |
488 | ||
489 | spin_lock_irq(&log->log_extents_lock[index]); | |
490 | while (!list_empty(&log->logged_list[index])) { | |
161c3549 | 491 | struct inode *inode; |
2ab28f32 JB |
492 | ordered = list_first_entry(&log->logged_list[index], |
493 | struct btrfs_ordered_extent, | |
494 | log_list); | |
495 | list_del_init(&ordered->log_list); | |
161c3549 | 496 | inode = ordered->inode; |
2ab28f32 | 497 | spin_unlock_irq(&log->log_extents_lock[index]); |
98ce2ded LB |
498 | |
499 | if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) && | |
500 | !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { | |
98ce2ded LB |
501 | u64 start = ordered->file_offset; |
502 | u64 end = ordered->file_offset + ordered->len - 1; | |
503 | ||
504 | WARN_ON(!inode); | |
505 | filemap_fdatawrite_range(inode->i_mapping, start, end); | |
506 | } | |
2ab28f32 JB |
507 | wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, |
508 | &ordered->flags)); | |
98ce2ded | 509 | |
7558c8bc | 510 | /* |
161c3549 JB |
511 | * In order to keep us from losing our ordered extent |
512 | * information when committing the transaction we have to make | |
513 | * sure that any logged extents are completed when we go to | |
514 | * commit the transaction. To do this we simply increase the | |
515 | * current transactions pending_ordered counter and decrement it | |
516 | * when the ordered extent completes. | |
7558c8bc | 517 | */ |
161c3549 JB |
518 | if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { |
519 | struct btrfs_ordered_inode_tree *tree; | |
520 | ||
521 | tree = &BTRFS_I(inode)->ordered_tree; | |
522 | spin_lock_irq(&tree->lock); | |
523 | if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { | |
524 | set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); | |
525 | atomic_inc(&trans->transaction->pending_ordered); | |
526 | } | |
527 | spin_unlock_irq(&tree->lock); | |
528 | } | |
529 | btrfs_put_ordered_extent(ordered); | |
2ab28f32 JB |
530 | spin_lock_irq(&log->log_extents_lock[index]); |
531 | } | |
532 | spin_unlock_irq(&log->log_extents_lock[index]); | |
533 | } | |
534 | ||
535 | void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid) | |
536 | { | |
537 | struct btrfs_ordered_extent *ordered; | |
538 | int index = transid % 2; | |
539 | ||
540 | spin_lock_irq(&log->log_extents_lock[index]); | |
541 | while (!list_empty(&log->logged_list[index])) { | |
542 | ordered = list_first_entry(&log->logged_list[index], | |
543 | struct btrfs_ordered_extent, | |
544 | log_list); | |
545 | list_del_init(&ordered->log_list); | |
546 | spin_unlock_irq(&log->log_extents_lock[index]); | |
547 | btrfs_put_ordered_extent(ordered); | |
548 | spin_lock_irq(&log->log_extents_lock[index]); | |
549 | } | |
550 | spin_unlock_irq(&log->log_extents_lock[index]); | |
551 | } | |
552 | ||
eb84ae03 CM |
553 | /* |
554 | * used to drop a reference on an ordered extent. This will free | |
555 | * the extent if the last reference is dropped | |
556 | */ | |
143bede5 | 557 | void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
e6dcd2dc | 558 | { |
ba1da2f4 CM |
559 | struct list_head *cur; |
560 | struct btrfs_ordered_sum *sum; | |
561 | ||
1abe9b8a | 562 | trace_btrfs_ordered_extent_put(entry->inode, entry); |
563 | ||
e76edab7 | 564 | if (refcount_dec_and_test(&entry->refs)) { |
61de718f FM |
565 | ASSERT(list_empty(&entry->log_list)); |
566 | ASSERT(list_empty(&entry->trans_list)); | |
567 | ASSERT(list_empty(&entry->root_extent_list)); | |
568 | ASSERT(RB_EMPTY_NODE(&entry->rb_node)); | |
5fd02043 JB |
569 | if (entry->inode) |
570 | btrfs_add_delayed_iput(entry->inode); | |
d397712b | 571 | while (!list_empty(&entry->list)) { |
ba1da2f4 CM |
572 | cur = entry->list.next; |
573 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | |
574 | list_del(&sum->list); | |
575 | kfree(sum); | |
576 | } | |
6352b91d | 577 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
ba1da2f4 | 578 | } |
dc17ff8f | 579 | } |
cee36a03 | 580 | |
eb84ae03 CM |
581 | /* |
582 | * remove an ordered extent from the tree. No references are dropped | |
5fd02043 | 583 | * and waiters are woken up. |
eb84ae03 | 584 | */ |
5fd02043 JB |
585 | void btrfs_remove_ordered_extent(struct inode *inode, |
586 | struct btrfs_ordered_extent *entry) | |
cee36a03 | 587 | { |
0b246afa | 588 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
e6dcd2dc | 589 | struct btrfs_ordered_inode_tree *tree; |
8b62f87b JB |
590 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); |
591 | struct btrfs_root *root = btrfs_inode->root; | |
cee36a03 | 592 | struct rb_node *node; |
161c3549 | 593 | bool dec_pending_ordered = false; |
cee36a03 | 594 | |
8b62f87b JB |
595 | /* This is paired with btrfs_add_ordered_extent. */ |
596 | spin_lock(&btrfs_inode->lock); | |
597 | btrfs_mod_outstanding_extents(btrfs_inode, -1); | |
598 | spin_unlock(&btrfs_inode->lock); | |
599 | if (root != fs_info->tree_root) | |
43b18595 | 600 | btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false); |
8b62f87b JB |
601 | |
602 | tree = &btrfs_inode->ordered_tree; | |
5fd02043 | 603 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 604 | node = &entry->rb_node; |
cee36a03 | 605 | rb_erase(node, &tree->tree); |
61de718f | 606 | RB_CLEAR_NODE(node); |
1b8e7e45 FDBM |
607 | if (tree->last == node) |
608 | tree->last = NULL; | |
e6dcd2dc | 609 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
161c3549 JB |
610 | if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags)) |
611 | dec_pending_ordered = true; | |
5fd02043 | 612 | spin_unlock_irq(&tree->lock); |
3eaa2885 | 613 | |
161c3549 JB |
614 | /* |
615 | * The current running transaction is waiting on us, we need to let it | |
616 | * know that we're complete and wake it up. | |
617 | */ | |
618 | if (dec_pending_ordered) { | |
619 | struct btrfs_transaction *trans; | |
620 | ||
621 | /* | |
622 | * The checks for trans are just a formality, it should be set, | |
623 | * but if it isn't we don't want to deref/assert under the spin | |
624 | * lock, so be nice and check if trans is set, but ASSERT() so | |
625 | * if it isn't set a developer will notice. | |
626 | */ | |
0b246afa JM |
627 | spin_lock(&fs_info->trans_lock); |
628 | trans = fs_info->running_transaction; | |
161c3549 | 629 | if (trans) |
9b64f57d | 630 | refcount_inc(&trans->use_count); |
0b246afa | 631 | spin_unlock(&fs_info->trans_lock); |
161c3549 JB |
632 | |
633 | ASSERT(trans); | |
634 | if (trans) { | |
635 | if (atomic_dec_and_test(&trans->pending_ordered)) | |
636 | wake_up(&trans->pending_wait); | |
637 | btrfs_put_transaction(trans); | |
638 | } | |
639 | } | |
640 | ||
199c2a9c | 641 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 642 | list_del_init(&entry->root_extent_list); |
199c2a9c | 643 | root->nr_ordered_extents--; |
5a3f23d5 | 644 | |
1abe9b8a | 645 | trace_btrfs_ordered_extent_remove(inode, entry); |
646 | ||
199c2a9c | 647 | if (!root->nr_ordered_extents) { |
0b246afa | 648 | spin_lock(&fs_info->ordered_root_lock); |
199c2a9c MX |
649 | BUG_ON(list_empty(&root->ordered_root)); |
650 | list_del_init(&root->ordered_root); | |
0b246afa | 651 | spin_unlock(&fs_info->ordered_root_lock); |
199c2a9c MX |
652 | } |
653 | spin_unlock(&root->ordered_extent_lock); | |
e6dcd2dc | 654 | wake_up(&entry->wait); |
cee36a03 CM |
655 | } |
656 | ||
d458b054 | 657 | static void btrfs_run_ordered_extent_work(struct btrfs_work *work) |
9afab882 MX |
658 | { |
659 | struct btrfs_ordered_extent *ordered; | |
660 | ||
661 | ordered = container_of(work, struct btrfs_ordered_extent, flush_work); | |
662 | btrfs_start_ordered_extent(ordered->inode, ordered, 1); | |
663 | complete(&ordered->completion); | |
664 | } | |
665 | ||
d352ac68 CM |
666 | /* |
667 | * wait for all the ordered extents in a root. This is done when balancing | |
668 | * space between drives. | |
669 | */ | |
6374e57a | 670 | u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, |
578def7c | 671 | const u64 range_start, const u64 range_len) |
3eaa2885 | 672 | { |
0b246afa | 673 | struct btrfs_fs_info *fs_info = root->fs_info; |
578def7c FM |
674 | LIST_HEAD(splice); |
675 | LIST_HEAD(skipped); | |
676 | LIST_HEAD(works); | |
9afab882 | 677 | struct btrfs_ordered_extent *ordered, *next; |
6374e57a | 678 | u64 count = 0; |
578def7c | 679 | const u64 range_end = range_start + range_len; |
3eaa2885 | 680 | |
31f3d255 | 681 | mutex_lock(&root->ordered_extent_mutex); |
199c2a9c MX |
682 | spin_lock(&root->ordered_extent_lock); |
683 | list_splice_init(&root->ordered_extents, &splice); | |
b0244199 | 684 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
685 | ordered = list_first_entry(&splice, struct btrfs_ordered_extent, |
686 | root_extent_list); | |
578def7c FM |
687 | |
688 | if (range_end <= ordered->start || | |
689 | ordered->start + ordered->disk_len <= range_start) { | |
690 | list_move_tail(&ordered->root_extent_list, &skipped); | |
691 | cond_resched_lock(&root->ordered_extent_lock); | |
692 | continue; | |
693 | } | |
694 | ||
199c2a9c MX |
695 | list_move_tail(&ordered->root_extent_list, |
696 | &root->ordered_extents); | |
e76edab7 | 697 | refcount_inc(&ordered->refs); |
199c2a9c | 698 | spin_unlock(&root->ordered_extent_lock); |
3eaa2885 | 699 | |
a44903ab | 700 | btrfs_init_work(&ordered->flush_work, |
9e0af237 | 701 | btrfs_flush_delalloc_helper, |
a44903ab | 702 | btrfs_run_ordered_extent_work, NULL, NULL); |
199c2a9c | 703 | list_add_tail(&ordered->work_list, &works); |
0b246afa | 704 | btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); |
3eaa2885 | 705 | |
9afab882 | 706 | cond_resched(); |
199c2a9c | 707 | spin_lock(&root->ordered_extent_lock); |
6374e57a | 708 | if (nr != U64_MAX) |
b0244199 MX |
709 | nr--; |
710 | count++; | |
3eaa2885 | 711 | } |
578def7c | 712 | list_splice_tail(&skipped, &root->ordered_extents); |
b0244199 | 713 | list_splice_tail(&splice, &root->ordered_extents); |
199c2a9c | 714 | spin_unlock(&root->ordered_extent_lock); |
9afab882 MX |
715 | |
716 | list_for_each_entry_safe(ordered, next, &works, work_list) { | |
717 | list_del_init(&ordered->work_list); | |
718 | wait_for_completion(&ordered->completion); | |
9afab882 | 719 | btrfs_put_ordered_extent(ordered); |
9afab882 MX |
720 | cond_resched(); |
721 | } | |
31f3d255 | 722 | mutex_unlock(&root->ordered_extent_mutex); |
b0244199 MX |
723 | |
724 | return count; | |
3eaa2885 CM |
725 | } |
726 | ||
6374e57a CM |
727 | u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, |
728 | const u64 range_start, const u64 range_len) | |
199c2a9c MX |
729 | { |
730 | struct btrfs_root *root; | |
731 | struct list_head splice; | |
6374e57a CM |
732 | u64 total_done = 0; |
733 | u64 done; | |
199c2a9c MX |
734 | |
735 | INIT_LIST_HEAD(&splice); | |
736 | ||
8b9d83cd | 737 | mutex_lock(&fs_info->ordered_operations_mutex); |
199c2a9c MX |
738 | spin_lock(&fs_info->ordered_root_lock); |
739 | list_splice_init(&fs_info->ordered_roots, &splice); | |
b0244199 | 740 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
741 | root = list_first_entry(&splice, struct btrfs_root, |
742 | ordered_root); | |
743 | root = btrfs_grab_fs_root(root); | |
744 | BUG_ON(!root); | |
745 | list_move_tail(&root->ordered_root, | |
746 | &fs_info->ordered_roots); | |
747 | spin_unlock(&fs_info->ordered_root_lock); | |
748 | ||
578def7c FM |
749 | done = btrfs_wait_ordered_extents(root, nr, |
750 | range_start, range_len); | |
199c2a9c | 751 | btrfs_put_fs_root(root); |
f0e9b7d6 | 752 | total_done += done; |
199c2a9c MX |
753 | |
754 | spin_lock(&fs_info->ordered_root_lock); | |
6374e57a | 755 | if (nr != U64_MAX) { |
b0244199 | 756 | nr -= done; |
b0244199 | 757 | } |
199c2a9c | 758 | } |
931aa877 | 759 | list_splice_tail(&splice, &fs_info->ordered_roots); |
199c2a9c | 760 | spin_unlock(&fs_info->ordered_root_lock); |
8b9d83cd | 761 | mutex_unlock(&fs_info->ordered_operations_mutex); |
f0e9b7d6 FM |
762 | |
763 | return total_done; | |
199c2a9c MX |
764 | } |
765 | ||
eb84ae03 CM |
766 | /* |
767 | * Used to start IO or wait for a given ordered extent to finish. | |
768 | * | |
769 | * If wait is one, this effectively waits on page writeback for all the pages | |
770 | * in the extent, and it waits on the io completion code to insert | |
771 | * metadata into the btree corresponding to the extent | |
772 | */ | |
773 | void btrfs_start_ordered_extent(struct inode *inode, | |
774 | struct btrfs_ordered_extent *entry, | |
775 | int wait) | |
e6dcd2dc CM |
776 | { |
777 | u64 start = entry->file_offset; | |
778 | u64 end = start + entry->len - 1; | |
e1b81e67 | 779 | |
1abe9b8a | 780 | trace_btrfs_ordered_extent_start(inode, entry); |
781 | ||
eb84ae03 CM |
782 | /* |
783 | * pages in the range can be dirty, clean or writeback. We | |
784 | * start IO on any dirty ones so the wait doesn't stall waiting | |
b2570314 | 785 | * for the flusher thread to find them |
eb84ae03 | 786 | */ |
4b46fce2 JB |
787 | if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
788 | filemap_fdatawrite_range(inode->i_mapping, start, end); | |
c8b97818 | 789 | if (wait) { |
e6dcd2dc CM |
790 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
791 | &entry->flags)); | |
c8b97818 | 792 | } |
e6dcd2dc | 793 | } |
cee36a03 | 794 | |
eb84ae03 CM |
795 | /* |
796 | * Used to wait on ordered extents across a large range of bytes. | |
797 | */ | |
0ef8b726 | 798 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
e6dcd2dc | 799 | { |
0ef8b726 | 800 | int ret = 0; |
28aeeac1 | 801 | int ret_wb = 0; |
e6dcd2dc | 802 | u64 end; |
e5a2217e | 803 | u64 orig_end; |
e6dcd2dc | 804 | struct btrfs_ordered_extent *ordered; |
e5a2217e CM |
805 | |
806 | if (start + len < start) { | |
f421950f | 807 | orig_end = INT_LIMIT(loff_t); |
e5a2217e CM |
808 | } else { |
809 | orig_end = start + len - 1; | |
f421950f CM |
810 | if (orig_end > INT_LIMIT(loff_t)) |
811 | orig_end = INT_LIMIT(loff_t); | |
e5a2217e | 812 | } |
551ebb2d | 813 | |
e5a2217e CM |
814 | /* start IO across the range first to instantiate any delalloc |
815 | * extents | |
816 | */ | |
728404da | 817 | ret = btrfs_fdatawrite_range(inode, start, orig_end); |
0ef8b726 JB |
818 | if (ret) |
819 | return ret; | |
728404da | 820 | |
28aeeac1 FM |
821 | /* |
822 | * If we have a writeback error don't return immediately. Wait first | |
823 | * for any ordered extents that haven't completed yet. This is to make | |
824 | * sure no one can dirty the same page ranges and call writepages() | |
825 | * before the ordered extents complete - to avoid failures (-EEXIST) | |
826 | * when adding the new ordered extents to the ordered tree. | |
827 | */ | |
828 | ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); | |
e5a2217e | 829 | |
f421950f | 830 | end = orig_end; |
d397712b | 831 | while (1) { |
e6dcd2dc | 832 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
d397712b | 833 | if (!ordered) |
e6dcd2dc | 834 | break; |
e5a2217e | 835 | if (ordered->file_offset > orig_end) { |
e6dcd2dc CM |
836 | btrfs_put_ordered_extent(ordered); |
837 | break; | |
838 | } | |
b52abf1e | 839 | if (ordered->file_offset + ordered->len <= start) { |
e6dcd2dc CM |
840 | btrfs_put_ordered_extent(ordered); |
841 | break; | |
842 | } | |
e5a2217e | 843 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc | 844 | end = ordered->file_offset; |
0ef8b726 JB |
845 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) |
846 | ret = -EIO; | |
e6dcd2dc | 847 | btrfs_put_ordered_extent(ordered); |
0ef8b726 | 848 | if (ret || end == 0 || end == start) |
e6dcd2dc CM |
849 | break; |
850 | end--; | |
851 | } | |
28aeeac1 | 852 | return ret_wb ? ret_wb : ret; |
cee36a03 CM |
853 | } |
854 | ||
eb84ae03 CM |
855 | /* |
856 | * find an ordered extent corresponding to file_offset. return NULL if | |
857 | * nothing is found, otherwise take a reference on the extent and return it | |
858 | */ | |
e6dcd2dc CM |
859 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
860 | u64 file_offset) | |
861 | { | |
862 | struct btrfs_ordered_inode_tree *tree; | |
863 | struct rb_node *node; | |
864 | struct btrfs_ordered_extent *entry = NULL; | |
865 | ||
866 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 867 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
868 | node = tree_search(tree, file_offset); |
869 | if (!node) | |
870 | goto out; | |
871 | ||
872 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
873 | if (!offset_in_entry(entry, file_offset)) | |
874 | entry = NULL; | |
875 | if (entry) | |
e76edab7 | 876 | refcount_inc(&entry->refs); |
e6dcd2dc | 877 | out: |
5fd02043 | 878 | spin_unlock_irq(&tree->lock); |
e6dcd2dc CM |
879 | return entry; |
880 | } | |
881 | ||
4b46fce2 JB |
882 | /* Since the DIO code tries to lock a wide area we need to look for any ordered |
883 | * extents that exist in the range, rather than just the start of the range. | |
884 | */ | |
a776c6fa NB |
885 | struct btrfs_ordered_extent *btrfs_lookup_ordered_range( |
886 | struct btrfs_inode *inode, u64 file_offset, u64 len) | |
4b46fce2 JB |
887 | { |
888 | struct btrfs_ordered_inode_tree *tree; | |
889 | struct rb_node *node; | |
890 | struct btrfs_ordered_extent *entry = NULL; | |
891 | ||
a776c6fa | 892 | tree = &inode->ordered_tree; |
5fd02043 | 893 | spin_lock_irq(&tree->lock); |
4b46fce2 JB |
894 | node = tree_search(tree, file_offset); |
895 | if (!node) { | |
896 | node = tree_search(tree, file_offset + len); | |
897 | if (!node) | |
898 | goto out; | |
899 | } | |
900 | ||
901 | while (1) { | |
902 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
903 | if (range_overlaps(entry, file_offset, len)) | |
904 | break; | |
905 | ||
906 | if (entry->file_offset >= file_offset + len) { | |
907 | entry = NULL; | |
908 | break; | |
909 | } | |
910 | entry = NULL; | |
911 | node = rb_next(node); | |
912 | if (!node) | |
913 | break; | |
914 | } | |
915 | out: | |
916 | if (entry) | |
e76edab7 | 917 | refcount_inc(&entry->refs); |
5fd02043 | 918 | spin_unlock_irq(&tree->lock); |
4b46fce2 JB |
919 | return entry; |
920 | } | |
921 | ||
b659ef02 FM |
922 | bool btrfs_have_ordered_extents_in_range(struct inode *inode, |
923 | u64 file_offset, | |
924 | u64 len) | |
925 | { | |
926 | struct btrfs_ordered_extent *oe; | |
927 | ||
a776c6fa | 928 | oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len); |
b659ef02 FM |
929 | if (oe) { |
930 | btrfs_put_ordered_extent(oe); | |
931 | return true; | |
932 | } | |
933 | return false; | |
934 | } | |
935 | ||
eb84ae03 CM |
936 | /* |
937 | * lookup and return any extent before 'file_offset'. NULL is returned | |
938 | * if none is found | |
939 | */ | |
e6dcd2dc | 940 | struct btrfs_ordered_extent * |
d397712b | 941 | btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) |
e6dcd2dc CM |
942 | { |
943 | struct btrfs_ordered_inode_tree *tree; | |
944 | struct rb_node *node; | |
945 | struct btrfs_ordered_extent *entry = NULL; | |
946 | ||
947 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 948 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
949 | node = tree_search(tree, file_offset); |
950 | if (!node) | |
951 | goto out; | |
952 | ||
953 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
e76edab7 | 954 | refcount_inc(&entry->refs); |
e6dcd2dc | 955 | out: |
5fd02043 | 956 | spin_unlock_irq(&tree->lock); |
e6dcd2dc | 957 | return entry; |
81d7ed29 | 958 | } |
dbe674a9 | 959 | |
eb84ae03 CM |
960 | /* |
961 | * After an extent is done, call this to conditionally update the on disk | |
962 | * i_size. i_size is updated to cover any fully written part of the file. | |
963 | */ | |
c2167754 | 964 | int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
dbe674a9 CM |
965 | struct btrfs_ordered_extent *ordered) |
966 | { | |
967 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
dbe674a9 CM |
968 | u64 disk_i_size; |
969 | u64 new_i_size; | |
c2167754 | 970 | u64 i_size = i_size_read(inode); |
dbe674a9 | 971 | struct rb_node *node; |
c2167754 | 972 | struct rb_node *prev = NULL; |
dbe674a9 | 973 | struct btrfs_ordered_extent *test; |
c2167754 | 974 | int ret = 1; |
c0d2f610 | 975 | u64 orig_offset = offset; |
c2167754 | 976 | |
77cef2ec JB |
977 | spin_lock_irq(&tree->lock); |
978 | if (ordered) { | |
c2167754 | 979 | offset = entry_end(ordered); |
77cef2ec JB |
980 | if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) |
981 | offset = min(offset, | |
982 | ordered->file_offset + | |
983 | ordered->truncated_len); | |
984 | } else { | |
da17066c | 985 | offset = ALIGN(offset, btrfs_inode_sectorsize(inode)); |
77cef2ec | 986 | } |
dbe674a9 CM |
987 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
988 | ||
19fd2df5 LB |
989 | /* |
990 | * truncate file. | |
991 | * If ordered is not NULL, then this is called from endio and | |
992 | * disk_i_size will be updated by either truncate itself or any | |
993 | * in-flight IOs which are inside the disk_i_size. | |
994 | * | |
995 | * Because btrfs_setsize() may set i_size with disk_i_size if truncate | |
996 | * fails somehow, we need to make sure we have a precise disk_i_size by | |
997 | * updating it as usual. | |
998 | * | |
999 | */ | |
1000 | if (!ordered && disk_i_size > i_size) { | |
c0d2f610 | 1001 | BTRFS_I(inode)->disk_i_size = orig_offset; |
c2167754 YZ |
1002 | ret = 0; |
1003 | goto out; | |
1004 | } | |
1005 | ||
dbe674a9 CM |
1006 | /* |
1007 | * if the disk i_size is already at the inode->i_size, or | |
1008 | * this ordered extent is inside the disk i_size, we're done | |
1009 | */ | |
5d1f4020 JB |
1010 | if (disk_i_size == i_size) |
1011 | goto out; | |
1012 | ||
1013 | /* | |
1014 | * We still need to update disk_i_size if outstanding_isize is greater | |
1015 | * than disk_i_size. | |
1016 | */ | |
1017 | if (offset <= disk_i_size && | |
1018 | (!ordered || ordered->outstanding_isize <= disk_i_size)) | |
dbe674a9 | 1019 | goto out; |
dbe674a9 | 1020 | |
dbe674a9 CM |
1021 | /* |
1022 | * walk backward from this ordered extent to disk_i_size. | |
1023 | * if we find an ordered extent then we can't update disk i_size | |
1024 | * yet | |
1025 | */ | |
c2167754 YZ |
1026 | if (ordered) { |
1027 | node = rb_prev(&ordered->rb_node); | |
1028 | } else { | |
1029 | prev = tree_search(tree, offset); | |
1030 | /* | |
1031 | * we insert file extents without involving ordered struct, | |
1032 | * so there should be no ordered struct cover this offset | |
1033 | */ | |
1034 | if (prev) { | |
1035 | test = rb_entry(prev, struct btrfs_ordered_extent, | |
1036 | rb_node); | |
1037 | BUG_ON(offset_in_entry(test, offset)); | |
1038 | } | |
1039 | node = prev; | |
1040 | } | |
5fd02043 | 1041 | for (; node; node = rb_prev(node)) { |
dbe674a9 | 1042 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 1043 | |
bb7ab3b9 | 1044 | /* We treat this entry as if it doesn't exist */ |
5fd02043 JB |
1045 | if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) |
1046 | continue; | |
62c821a8 LB |
1047 | |
1048 | if (entry_end(test) <= disk_i_size) | |
dbe674a9 | 1049 | break; |
c2167754 | 1050 | if (test->file_offset >= i_size) |
dbe674a9 | 1051 | break; |
62c821a8 LB |
1052 | |
1053 | /* | |
1054 | * We don't update disk_i_size now, so record this undealt | |
1055 | * i_size. Or we will not know the real i_size. | |
1056 | */ | |
1057 | if (test->outstanding_isize < offset) | |
1058 | test->outstanding_isize = offset; | |
1059 | if (ordered && | |
1060 | ordered->outstanding_isize > test->outstanding_isize) | |
1061 | test->outstanding_isize = ordered->outstanding_isize; | |
1062 | goto out; | |
dbe674a9 | 1063 | } |
b9a8cc5b | 1064 | new_i_size = min_t(u64, offset, i_size); |
dbe674a9 CM |
1065 | |
1066 | /* | |
b9a8cc5b MX |
1067 | * Some ordered extents may completed before the current one, and |
1068 | * we hold the real i_size in ->outstanding_isize. | |
dbe674a9 | 1069 | */ |
b9a8cc5b MX |
1070 | if (ordered && ordered->outstanding_isize > new_i_size) |
1071 | new_i_size = min_t(u64, ordered->outstanding_isize, i_size); | |
dbe674a9 | 1072 | BTRFS_I(inode)->disk_i_size = new_i_size; |
c2167754 | 1073 | ret = 0; |
dbe674a9 | 1074 | out: |
c2167754 | 1075 | /* |
5fd02043 JB |
1076 | * We need to do this because we can't remove ordered extents until |
1077 | * after the i_disk_size has been updated and then the inode has been | |
1078 | * updated to reflect the change, so we need to tell anybody who finds | |
1079 | * this ordered extent that we've already done all the real work, we | |
1080 | * just haven't completed all the other work. | |
c2167754 YZ |
1081 | */ |
1082 | if (ordered) | |
5fd02043 JB |
1083 | set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); |
1084 | spin_unlock_irq(&tree->lock); | |
c2167754 | 1085 | return ret; |
dbe674a9 | 1086 | } |
ba1da2f4 | 1087 | |
eb84ae03 CM |
1088 | /* |
1089 | * search the ordered extents for one corresponding to 'offset' and | |
1090 | * try to find a checksum. This is used because we allow pages to | |
1091 | * be reclaimed before their checksum is actually put into the btree | |
1092 | */ | |
d20f7043 | 1093 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, |
e4100d98 | 1094 | u32 *sum, int len) |
ba1da2f4 CM |
1095 | { |
1096 | struct btrfs_ordered_sum *ordered_sum; | |
ba1da2f4 CM |
1097 | struct btrfs_ordered_extent *ordered; |
1098 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
3edf7d33 CM |
1099 | unsigned long num_sectors; |
1100 | unsigned long i; | |
da17066c | 1101 | u32 sectorsize = btrfs_inode_sectorsize(inode); |
e4100d98 | 1102 | int index = 0; |
ba1da2f4 CM |
1103 | |
1104 | ordered = btrfs_lookup_ordered_extent(inode, offset); | |
1105 | if (!ordered) | |
e4100d98 | 1106 | return 0; |
ba1da2f4 | 1107 | |
5fd02043 | 1108 | spin_lock_irq(&tree->lock); |
c6e30871 | 1109 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
e4100d98 MX |
1110 | if (disk_bytenr >= ordered_sum->bytenr && |
1111 | disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { | |
1112 | i = (disk_bytenr - ordered_sum->bytenr) >> | |
1113 | inode->i_sb->s_blocksize_bits; | |
e4100d98 MX |
1114 | num_sectors = ordered_sum->len >> |
1115 | inode->i_sb->s_blocksize_bits; | |
f51a4a18 MX |
1116 | num_sectors = min_t(int, len - index, num_sectors - i); |
1117 | memcpy(sum + index, ordered_sum->sums + i, | |
1118 | num_sectors); | |
1119 | ||
1120 | index += (int)num_sectors; | |
1121 | if (index == len) | |
1122 | goto out; | |
1123 | disk_bytenr += num_sectors * sectorsize; | |
ba1da2f4 CM |
1124 | } |
1125 | } | |
1126 | out: | |
5fd02043 | 1127 | spin_unlock_irq(&tree->lock); |
89642229 | 1128 | btrfs_put_ordered_extent(ordered); |
e4100d98 | 1129 | return index; |
ba1da2f4 CM |
1130 | } |
1131 | ||
6352b91d MX |
1132 | int __init ordered_data_init(void) |
1133 | { | |
1134 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", | |
1135 | sizeof(struct btrfs_ordered_extent), 0, | |
fba4b697 | 1136 | SLAB_MEM_SPREAD, |
6352b91d MX |
1137 | NULL); |
1138 | if (!btrfs_ordered_extent_cache) | |
1139 | return -ENOMEM; | |
25287e0a | 1140 | |
6352b91d MX |
1141 | return 0; |
1142 | } | |
1143 | ||
e67c718b | 1144 | void __cold ordered_data_exit(void) |
6352b91d | 1145 | { |
5598e900 | 1146 | kmem_cache_destroy(btrfs_ordered_extent_cache); |
6352b91d | 1147 | } |