Commit | Line | Data |
---|---|---|
dc17ff8f CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
dc17ff8f | 19 | #include <linux/slab.h> |
d6bfde87 | 20 | #include <linux/blkdev.h> |
f421950f CM |
21 | #include <linux/writeback.h> |
22 | #include <linux/pagevec.h> | |
dc17ff8f CM |
23 | #include "ctree.h" |
24 | #include "transaction.h" | |
25 | #include "btrfs_inode.h" | |
e6dcd2dc | 26 | #include "extent_io.h" |
dc17ff8f | 27 | |
6352b91d MX |
28 | static struct kmem_cache *btrfs_ordered_extent_cache; |
29 | ||
e6dcd2dc | 30 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
dc17ff8f | 31 | { |
e6dcd2dc CM |
32 | if (entry->file_offset + entry->len < entry->file_offset) |
33 | return (u64)-1; | |
34 | return entry->file_offset + entry->len; | |
dc17ff8f CM |
35 | } |
36 | ||
d352ac68 CM |
37 | /* returns NULL if the insertion worked, or it returns the node it did find |
38 | * in the tree | |
39 | */ | |
e6dcd2dc CM |
40 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
41 | struct rb_node *node) | |
dc17ff8f | 42 | { |
d397712b CM |
43 | struct rb_node **p = &root->rb_node; |
44 | struct rb_node *parent = NULL; | |
e6dcd2dc | 45 | struct btrfs_ordered_extent *entry; |
dc17ff8f | 46 | |
d397712b | 47 | while (*p) { |
dc17ff8f | 48 | parent = *p; |
e6dcd2dc | 49 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
dc17ff8f | 50 | |
e6dcd2dc | 51 | if (file_offset < entry->file_offset) |
dc17ff8f | 52 | p = &(*p)->rb_left; |
e6dcd2dc | 53 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
54 | p = &(*p)->rb_right; |
55 | else | |
56 | return parent; | |
57 | } | |
58 | ||
59 | rb_link_node(node, parent, p); | |
60 | rb_insert_color(node, root); | |
61 | return NULL; | |
62 | } | |
63 | ||
43c04fb1 JM |
64 | static void ordered_data_tree_panic(struct inode *inode, int errno, |
65 | u64 offset) | |
66 | { | |
67 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | |
68 | btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset " | |
69 | "%llu\n", (unsigned long long)offset); | |
70 | } | |
71 | ||
d352ac68 CM |
72 | /* |
73 | * look for a given offset in the tree, and if it can't be found return the | |
74 | * first lesser offset | |
75 | */ | |
e6dcd2dc CM |
76 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
77 | struct rb_node **prev_ret) | |
dc17ff8f | 78 | { |
d397712b | 79 | struct rb_node *n = root->rb_node; |
dc17ff8f | 80 | struct rb_node *prev = NULL; |
e6dcd2dc CM |
81 | struct rb_node *test; |
82 | struct btrfs_ordered_extent *entry; | |
83 | struct btrfs_ordered_extent *prev_entry = NULL; | |
dc17ff8f | 84 | |
d397712b | 85 | while (n) { |
e6dcd2dc | 86 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
dc17ff8f CM |
87 | prev = n; |
88 | prev_entry = entry; | |
dc17ff8f | 89 | |
e6dcd2dc | 90 | if (file_offset < entry->file_offset) |
dc17ff8f | 91 | n = n->rb_left; |
e6dcd2dc | 92 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
93 | n = n->rb_right; |
94 | else | |
95 | return n; | |
96 | } | |
97 | if (!prev_ret) | |
98 | return NULL; | |
99 | ||
d397712b | 100 | while (prev && file_offset >= entry_end(prev_entry)) { |
e6dcd2dc CM |
101 | test = rb_next(prev); |
102 | if (!test) | |
103 | break; | |
104 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
105 | rb_node); | |
106 | if (file_offset < entry_end(prev_entry)) | |
107 | break; | |
108 | ||
109 | prev = test; | |
110 | } | |
111 | if (prev) | |
112 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, | |
113 | rb_node); | |
d397712b | 114 | while (prev && file_offset < entry_end(prev_entry)) { |
e6dcd2dc CM |
115 | test = rb_prev(prev); |
116 | if (!test) | |
117 | break; | |
118 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
119 | rb_node); | |
120 | prev = test; | |
dc17ff8f CM |
121 | } |
122 | *prev_ret = prev; | |
123 | return NULL; | |
124 | } | |
125 | ||
d352ac68 CM |
126 | /* |
127 | * helper to check if a given offset is inside a given entry | |
128 | */ | |
e6dcd2dc CM |
129 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
130 | { | |
131 | if (file_offset < entry->file_offset || | |
132 | entry->file_offset + entry->len <= file_offset) | |
133 | return 0; | |
134 | return 1; | |
135 | } | |
136 | ||
4b46fce2 JB |
137 | static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, |
138 | u64 len) | |
139 | { | |
140 | if (file_offset + len <= entry->file_offset || | |
141 | entry->file_offset + entry->len <= file_offset) | |
142 | return 0; | |
143 | return 1; | |
144 | } | |
145 | ||
d352ac68 CM |
146 | /* |
147 | * look find the first ordered struct that has this offset, otherwise | |
148 | * the first one less than this offset | |
149 | */ | |
e6dcd2dc CM |
150 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
151 | u64 file_offset) | |
dc17ff8f | 152 | { |
e6dcd2dc | 153 | struct rb_root *root = &tree->tree; |
c87fb6fd | 154 | struct rb_node *prev = NULL; |
dc17ff8f | 155 | struct rb_node *ret; |
e6dcd2dc CM |
156 | struct btrfs_ordered_extent *entry; |
157 | ||
158 | if (tree->last) { | |
159 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, | |
160 | rb_node); | |
161 | if (offset_in_entry(entry, file_offset)) | |
162 | return tree->last; | |
163 | } | |
164 | ret = __tree_search(root, file_offset, &prev); | |
dc17ff8f | 165 | if (!ret) |
e6dcd2dc CM |
166 | ret = prev; |
167 | if (ret) | |
168 | tree->last = ret; | |
dc17ff8f CM |
169 | return ret; |
170 | } | |
171 | ||
eb84ae03 CM |
172 | /* allocate and add a new ordered_extent into the per-inode tree. |
173 | * file_offset is the logical offset in the file | |
174 | * | |
175 | * start is the disk block number of an extent already reserved in the | |
176 | * extent allocation tree | |
177 | * | |
178 | * len is the length of the extent | |
179 | * | |
eb84ae03 CM |
180 | * The tree is given a single reference on the ordered extent that was |
181 | * inserted. | |
182 | */ | |
4b46fce2 JB |
183 | static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
184 | u64 start, u64 len, u64 disk_len, | |
261507a0 | 185 | int type, int dio, int compress_type) |
dc17ff8f | 186 | { |
dc17ff8f | 187 | struct btrfs_ordered_inode_tree *tree; |
e6dcd2dc CM |
188 | struct rb_node *node; |
189 | struct btrfs_ordered_extent *entry; | |
dc17ff8f | 190 | |
e6dcd2dc | 191 | tree = &BTRFS_I(inode)->ordered_tree; |
6352b91d | 192 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
dc17ff8f CM |
193 | if (!entry) |
194 | return -ENOMEM; | |
195 | ||
e6dcd2dc CM |
196 | entry->file_offset = file_offset; |
197 | entry->start = start; | |
198 | entry->len = len; | |
c8b97818 | 199 | entry->disk_len = disk_len; |
8b62b72b | 200 | entry->bytes_left = len; |
5fd02043 | 201 | entry->inode = igrab(inode); |
261507a0 | 202 | entry->compress_type = compress_type; |
d899e052 | 203 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
80ff3856 | 204 | set_bit(type, &entry->flags); |
3eaa2885 | 205 | |
4b46fce2 JB |
206 | if (dio) |
207 | set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); | |
208 | ||
e6dcd2dc CM |
209 | /* one ref for the tree */ |
210 | atomic_set(&entry->refs, 1); | |
211 | init_waitqueue_head(&entry->wait); | |
212 | INIT_LIST_HEAD(&entry->list); | |
3eaa2885 | 213 | INIT_LIST_HEAD(&entry->root_extent_list); |
dc17ff8f | 214 | |
1abe9b8a | 215 | trace_btrfs_ordered_extent_add(inode, entry); |
216 | ||
5fd02043 | 217 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
218 | node = tree_insert(&tree->tree, file_offset, |
219 | &entry->rb_node); | |
43c04fb1 JM |
220 | if (node) |
221 | ordered_data_tree_panic(inode, -EEXIST, file_offset); | |
5fd02043 | 222 | spin_unlock_irq(&tree->lock); |
d397712b | 223 | |
3eaa2885 CM |
224 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
225 | list_add_tail(&entry->root_extent_list, | |
226 | &BTRFS_I(inode)->root->fs_info->ordered_extents); | |
227 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | |
228 | ||
dc17ff8f CM |
229 | return 0; |
230 | } | |
231 | ||
4b46fce2 JB |
232 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
233 | u64 start, u64 len, u64 disk_len, int type) | |
234 | { | |
235 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
236 | disk_len, type, 0, |
237 | BTRFS_COMPRESS_NONE); | |
4b46fce2 JB |
238 | } |
239 | ||
240 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, | |
241 | u64 start, u64 len, u64 disk_len, int type) | |
242 | { | |
243 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
244 | disk_len, type, 1, |
245 | BTRFS_COMPRESS_NONE); | |
246 | } | |
247 | ||
248 | int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, | |
249 | u64 start, u64 len, u64 disk_len, | |
250 | int type, int compress_type) | |
251 | { | |
252 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
253 | disk_len, type, 0, | |
254 | compress_type); | |
4b46fce2 JB |
255 | } |
256 | ||
eb84ae03 CM |
257 | /* |
258 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted | |
3edf7d33 CM |
259 | * when an ordered extent is finished. If the list covers more than one |
260 | * ordered extent, it is split across multiples. | |
eb84ae03 | 261 | */ |
143bede5 JM |
262 | void btrfs_add_ordered_sum(struct inode *inode, |
263 | struct btrfs_ordered_extent *entry, | |
264 | struct btrfs_ordered_sum *sum) | |
dc17ff8f | 265 | { |
e6dcd2dc | 266 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 267 | |
e6dcd2dc | 268 | tree = &BTRFS_I(inode)->ordered_tree; |
5fd02043 | 269 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 270 | list_add_tail(&sum->list, &entry->list); |
5fd02043 | 271 | spin_unlock_irq(&tree->lock); |
dc17ff8f CM |
272 | } |
273 | ||
163cf09c CM |
274 | /* |
275 | * this is used to account for finished IO across a given range | |
276 | * of the file. The IO may span ordered extents. If | |
277 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
278 | * 0. | |
279 | * | |
280 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
281 | * to make sure this function only returns 1 once for a given ordered extent. | |
282 | * | |
283 | * file_offset is updated to one byte past the range that is recorded as | |
284 | * complete. This allows you to walk forward in the file. | |
285 | */ | |
286 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, | |
287 | struct btrfs_ordered_extent **cached, | |
5fd02043 | 288 | u64 *file_offset, u64 io_size, int uptodate) |
163cf09c CM |
289 | { |
290 | struct btrfs_ordered_inode_tree *tree; | |
291 | struct rb_node *node; | |
292 | struct btrfs_ordered_extent *entry = NULL; | |
293 | int ret; | |
5fd02043 | 294 | unsigned long flags; |
163cf09c CM |
295 | u64 dec_end; |
296 | u64 dec_start; | |
297 | u64 to_dec; | |
298 | ||
299 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 300 | spin_lock_irqsave(&tree->lock, flags); |
163cf09c CM |
301 | node = tree_search(tree, *file_offset); |
302 | if (!node) { | |
303 | ret = 1; | |
304 | goto out; | |
305 | } | |
306 | ||
307 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
308 | if (!offset_in_entry(entry, *file_offset)) { | |
309 | ret = 1; | |
310 | goto out; | |
311 | } | |
312 | ||
313 | dec_start = max(*file_offset, entry->file_offset); | |
314 | dec_end = min(*file_offset + io_size, entry->file_offset + | |
315 | entry->len); | |
316 | *file_offset = dec_end; | |
317 | if (dec_start > dec_end) { | |
318 | printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n", | |
319 | (unsigned long long)dec_start, | |
320 | (unsigned long long)dec_end); | |
321 | } | |
322 | to_dec = dec_end - dec_start; | |
323 | if (to_dec > entry->bytes_left) { | |
324 | printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n", | |
325 | (unsigned long long)entry->bytes_left, | |
326 | (unsigned long long)to_dec); | |
327 | } | |
328 | entry->bytes_left -= to_dec; | |
5fd02043 JB |
329 | if (!uptodate) |
330 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
331 | ||
163cf09c CM |
332 | if (entry->bytes_left == 0) |
333 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | |
334 | else | |
335 | ret = 1; | |
336 | out: | |
337 | if (!ret && cached && entry) { | |
338 | *cached = entry; | |
339 | atomic_inc(&entry->refs); | |
340 | } | |
5fd02043 | 341 | spin_unlock_irqrestore(&tree->lock, flags); |
163cf09c CM |
342 | return ret == 0; |
343 | } | |
344 | ||
eb84ae03 CM |
345 | /* |
346 | * this is used to account for finished IO across a given range | |
347 | * of the file. The IO should not span ordered extents. If | |
348 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
349 | * 0. | |
350 | * | |
351 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
352 | * to make sure this function only returns 1 once for a given ordered extent. | |
353 | */ | |
e6dcd2dc | 354 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
5a1a3df1 | 355 | struct btrfs_ordered_extent **cached, |
5fd02043 | 356 | u64 file_offset, u64 io_size, int uptodate) |
dc17ff8f | 357 | { |
e6dcd2dc | 358 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 359 | struct rb_node *node; |
5a1a3df1 | 360 | struct btrfs_ordered_extent *entry = NULL; |
5fd02043 | 361 | unsigned long flags; |
e6dcd2dc CM |
362 | int ret; |
363 | ||
364 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 JB |
365 | spin_lock_irqsave(&tree->lock, flags); |
366 | if (cached && *cached) { | |
367 | entry = *cached; | |
368 | goto have_entry; | |
369 | } | |
370 | ||
e6dcd2dc | 371 | node = tree_search(tree, file_offset); |
dc17ff8f | 372 | if (!node) { |
e6dcd2dc CM |
373 | ret = 1; |
374 | goto out; | |
dc17ff8f CM |
375 | } |
376 | ||
e6dcd2dc | 377 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 378 | have_entry: |
e6dcd2dc CM |
379 | if (!offset_in_entry(entry, file_offset)) { |
380 | ret = 1; | |
381 | goto out; | |
dc17ff8f | 382 | } |
e6dcd2dc | 383 | |
8b62b72b CM |
384 | if (io_size > entry->bytes_left) { |
385 | printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n", | |
386 | (unsigned long long)entry->bytes_left, | |
387 | (unsigned long long)io_size); | |
388 | } | |
389 | entry->bytes_left -= io_size; | |
5fd02043 JB |
390 | if (!uptodate) |
391 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
392 | ||
8b62b72b | 393 | if (entry->bytes_left == 0) |
e6dcd2dc | 394 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
8b62b72b CM |
395 | else |
396 | ret = 1; | |
e6dcd2dc | 397 | out: |
5a1a3df1 JB |
398 | if (!ret && cached && entry) { |
399 | *cached = entry; | |
400 | atomic_inc(&entry->refs); | |
401 | } | |
5fd02043 | 402 | spin_unlock_irqrestore(&tree->lock, flags); |
e6dcd2dc CM |
403 | return ret == 0; |
404 | } | |
dc17ff8f | 405 | |
eb84ae03 CM |
406 | /* |
407 | * used to drop a reference on an ordered extent. This will free | |
408 | * the extent if the last reference is dropped | |
409 | */ | |
143bede5 | 410 | void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
e6dcd2dc | 411 | { |
ba1da2f4 CM |
412 | struct list_head *cur; |
413 | struct btrfs_ordered_sum *sum; | |
414 | ||
1abe9b8a | 415 | trace_btrfs_ordered_extent_put(entry->inode, entry); |
416 | ||
ba1da2f4 | 417 | if (atomic_dec_and_test(&entry->refs)) { |
5fd02043 JB |
418 | if (entry->inode) |
419 | btrfs_add_delayed_iput(entry->inode); | |
d397712b | 420 | while (!list_empty(&entry->list)) { |
ba1da2f4 CM |
421 | cur = entry->list.next; |
422 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | |
423 | list_del(&sum->list); | |
424 | kfree(sum); | |
425 | } | |
6352b91d | 426 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
ba1da2f4 | 427 | } |
dc17ff8f | 428 | } |
cee36a03 | 429 | |
eb84ae03 CM |
430 | /* |
431 | * remove an ordered extent from the tree. No references are dropped | |
5fd02043 | 432 | * and waiters are woken up. |
eb84ae03 | 433 | */ |
5fd02043 JB |
434 | void btrfs_remove_ordered_extent(struct inode *inode, |
435 | struct btrfs_ordered_extent *entry) | |
cee36a03 | 436 | { |
e6dcd2dc | 437 | struct btrfs_ordered_inode_tree *tree; |
287a0ab9 | 438 | struct btrfs_root *root = BTRFS_I(inode)->root; |
cee36a03 | 439 | struct rb_node *node; |
cee36a03 | 440 | |
e6dcd2dc | 441 | tree = &BTRFS_I(inode)->ordered_tree; |
5fd02043 | 442 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 443 | node = &entry->rb_node; |
cee36a03 | 444 | rb_erase(node, &tree->tree); |
e6dcd2dc CM |
445 | tree->last = NULL; |
446 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); | |
5fd02043 | 447 | spin_unlock_irq(&tree->lock); |
3eaa2885 | 448 | |
287a0ab9 | 449 | spin_lock(&root->fs_info->ordered_extent_lock); |
3eaa2885 | 450 | list_del_init(&entry->root_extent_list); |
5a3f23d5 | 451 | |
1abe9b8a | 452 | trace_btrfs_ordered_extent_remove(inode, entry); |
453 | ||
5a3f23d5 CM |
454 | /* |
455 | * we have no more ordered extents for this inode and | |
456 | * no dirty pages. We can safely remove it from the | |
457 | * list of ordered extents | |
458 | */ | |
459 | if (RB_EMPTY_ROOT(&tree->tree) && | |
460 | !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { | |
461 | list_del_init(&BTRFS_I(inode)->ordered_operations); | |
462 | } | |
287a0ab9 | 463 | spin_unlock(&root->fs_info->ordered_extent_lock); |
e6dcd2dc | 464 | wake_up(&entry->wait); |
cee36a03 CM |
465 | } |
466 | ||
d352ac68 CM |
467 | /* |
468 | * wait for all the ordered extents in a root. This is done when balancing | |
469 | * space between drives. | |
470 | */ | |
6bbe3a9c | 471 | void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput) |
3eaa2885 CM |
472 | { |
473 | struct list_head splice; | |
474 | struct list_head *cur; | |
475 | struct btrfs_ordered_extent *ordered; | |
476 | struct inode *inode; | |
477 | ||
478 | INIT_LIST_HEAD(&splice); | |
479 | ||
480 | spin_lock(&root->fs_info->ordered_extent_lock); | |
481 | list_splice_init(&root->fs_info->ordered_extents, &splice); | |
5b21f2ed | 482 | while (!list_empty(&splice)) { |
3eaa2885 CM |
483 | cur = splice.next; |
484 | ordered = list_entry(cur, struct btrfs_ordered_extent, | |
485 | root_extent_list); | |
486 | list_del_init(&ordered->root_extent_list); | |
487 | atomic_inc(&ordered->refs); | |
3eaa2885 CM |
488 | |
489 | /* | |
5b21f2ed | 490 | * the inode may be getting freed (in sys_unlink path). |
3eaa2885 | 491 | */ |
5b21f2ed ZY |
492 | inode = igrab(ordered->inode); |
493 | ||
3eaa2885 CM |
494 | spin_unlock(&root->fs_info->ordered_extent_lock); |
495 | ||
5b21f2ed ZY |
496 | if (inode) { |
497 | btrfs_start_ordered_extent(inode, ordered, 1); | |
498 | btrfs_put_ordered_extent(ordered); | |
24bbcf04 YZ |
499 | if (delay_iput) |
500 | btrfs_add_delayed_iput(inode); | |
501 | else | |
502 | iput(inode); | |
5b21f2ed ZY |
503 | } else { |
504 | btrfs_put_ordered_extent(ordered); | |
505 | } | |
3eaa2885 CM |
506 | |
507 | spin_lock(&root->fs_info->ordered_extent_lock); | |
508 | } | |
509 | spin_unlock(&root->fs_info->ordered_extent_lock); | |
3eaa2885 CM |
510 | } |
511 | ||
5a3f23d5 CM |
512 | /* |
513 | * this is used during transaction commit to write all the inodes | |
514 | * added to the ordered operation list. These files must be fully on | |
515 | * disk before the transaction commits. | |
516 | * | |
517 | * we have two modes here, one is to just start the IO via filemap_flush | |
518 | * and the other is to wait for all the io. When we wait, we have an | |
519 | * extra check to make sure the ordered operation list really is empty | |
520 | * before we return | |
521 | */ | |
143bede5 | 522 | void btrfs_run_ordered_operations(struct btrfs_root *root, int wait) |
5a3f23d5 CM |
523 | { |
524 | struct btrfs_inode *btrfs_inode; | |
525 | struct inode *inode; | |
526 | struct list_head splice; | |
527 | ||
528 | INIT_LIST_HEAD(&splice); | |
529 | ||
530 | mutex_lock(&root->fs_info->ordered_operations_mutex); | |
531 | spin_lock(&root->fs_info->ordered_extent_lock); | |
532 | again: | |
533 | list_splice_init(&root->fs_info->ordered_operations, &splice); | |
534 | ||
535 | while (!list_empty(&splice)) { | |
536 | btrfs_inode = list_entry(splice.next, struct btrfs_inode, | |
537 | ordered_operations); | |
538 | ||
539 | inode = &btrfs_inode->vfs_inode; | |
540 | ||
541 | list_del_init(&btrfs_inode->ordered_operations); | |
542 | ||
543 | /* | |
544 | * the inode may be getting freed (in sys_unlink path). | |
545 | */ | |
546 | inode = igrab(inode); | |
547 | ||
548 | if (!wait && inode) { | |
549 | list_add_tail(&BTRFS_I(inode)->ordered_operations, | |
550 | &root->fs_info->ordered_operations); | |
551 | } | |
552 | spin_unlock(&root->fs_info->ordered_extent_lock); | |
553 | ||
554 | if (inode) { | |
555 | if (wait) | |
556 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | |
557 | else | |
558 | filemap_flush(inode->i_mapping); | |
24bbcf04 | 559 | btrfs_add_delayed_iput(inode); |
5a3f23d5 CM |
560 | } |
561 | ||
562 | cond_resched(); | |
563 | spin_lock(&root->fs_info->ordered_extent_lock); | |
564 | } | |
565 | if (wait && !list_empty(&root->fs_info->ordered_operations)) | |
566 | goto again; | |
567 | ||
568 | spin_unlock(&root->fs_info->ordered_extent_lock); | |
569 | mutex_unlock(&root->fs_info->ordered_operations_mutex); | |
5a3f23d5 CM |
570 | } |
571 | ||
eb84ae03 CM |
572 | /* |
573 | * Used to start IO or wait for a given ordered extent to finish. | |
574 | * | |
575 | * If wait is one, this effectively waits on page writeback for all the pages | |
576 | * in the extent, and it waits on the io completion code to insert | |
577 | * metadata into the btree corresponding to the extent | |
578 | */ | |
579 | void btrfs_start_ordered_extent(struct inode *inode, | |
580 | struct btrfs_ordered_extent *entry, | |
581 | int wait) | |
e6dcd2dc CM |
582 | { |
583 | u64 start = entry->file_offset; | |
584 | u64 end = start + entry->len - 1; | |
e1b81e67 | 585 | |
1abe9b8a | 586 | trace_btrfs_ordered_extent_start(inode, entry); |
587 | ||
eb84ae03 CM |
588 | /* |
589 | * pages in the range can be dirty, clean or writeback. We | |
590 | * start IO on any dirty ones so the wait doesn't stall waiting | |
b2570314 | 591 | * for the flusher thread to find them |
eb84ae03 | 592 | */ |
4b46fce2 JB |
593 | if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
594 | filemap_fdatawrite_range(inode->i_mapping, start, end); | |
c8b97818 | 595 | if (wait) { |
e6dcd2dc CM |
596 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
597 | &entry->flags)); | |
c8b97818 | 598 | } |
e6dcd2dc | 599 | } |
cee36a03 | 600 | |
eb84ae03 CM |
601 | /* |
602 | * Used to wait on ordered extents across a large range of bytes. | |
603 | */ | |
143bede5 | 604 | void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
e6dcd2dc CM |
605 | { |
606 | u64 end; | |
e5a2217e | 607 | u64 orig_end; |
e6dcd2dc | 608 | struct btrfs_ordered_extent *ordered; |
8b62b72b | 609 | int found; |
e5a2217e CM |
610 | |
611 | if (start + len < start) { | |
f421950f | 612 | orig_end = INT_LIMIT(loff_t); |
e5a2217e CM |
613 | } else { |
614 | orig_end = start + len - 1; | |
f421950f CM |
615 | if (orig_end > INT_LIMIT(loff_t)) |
616 | orig_end = INT_LIMIT(loff_t); | |
e5a2217e | 617 | } |
551ebb2d | 618 | |
e5a2217e CM |
619 | /* start IO across the range first to instantiate any delalloc |
620 | * extents | |
621 | */ | |
7ddf5a42 JB |
622 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); |
623 | ||
624 | /* | |
625 | * So with compression we will find and lock a dirty page and clear the | |
626 | * first one as dirty, setup an async extent, and immediately return | |
627 | * with the entire range locked but with nobody actually marked with | |
628 | * writeback. So we can't just filemap_write_and_wait_range() and | |
629 | * expect it to work since it will just kick off a thread to do the | |
630 | * actual work. So we need to call filemap_fdatawrite_range _again_ | |
631 | * since it will wait on the page lock, which won't be unlocked until | |
632 | * after the pages have been marked as writeback and so we're good to go | |
633 | * from there. We have to do this otherwise we'll miss the ordered | |
634 | * extents and that results in badness. Please Josef, do not think you | |
635 | * know better and pull this out at some point in the future, it is | |
636 | * right and you are wrong. | |
637 | */ | |
638 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | |
639 | &BTRFS_I(inode)->runtime_flags)) | |
640 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); | |
641 | ||
642 | filemap_fdatawait_range(inode->i_mapping, start, orig_end); | |
e5a2217e | 643 | |
f421950f | 644 | end = orig_end; |
8b62b72b | 645 | found = 0; |
d397712b | 646 | while (1) { |
e6dcd2dc | 647 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
d397712b | 648 | if (!ordered) |
e6dcd2dc | 649 | break; |
e5a2217e | 650 | if (ordered->file_offset > orig_end) { |
e6dcd2dc CM |
651 | btrfs_put_ordered_extent(ordered); |
652 | break; | |
653 | } | |
654 | if (ordered->file_offset + ordered->len < start) { | |
655 | btrfs_put_ordered_extent(ordered); | |
656 | break; | |
657 | } | |
8b62b72b | 658 | found++; |
e5a2217e | 659 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc CM |
660 | end = ordered->file_offset; |
661 | btrfs_put_ordered_extent(ordered); | |
e5a2217e | 662 | if (end == 0 || end == start) |
e6dcd2dc CM |
663 | break; |
664 | end--; | |
665 | } | |
cee36a03 CM |
666 | } |
667 | ||
eb84ae03 CM |
668 | /* |
669 | * find an ordered extent corresponding to file_offset. return NULL if | |
670 | * nothing is found, otherwise take a reference on the extent and return it | |
671 | */ | |
e6dcd2dc CM |
672 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
673 | u64 file_offset) | |
674 | { | |
675 | struct btrfs_ordered_inode_tree *tree; | |
676 | struct rb_node *node; | |
677 | struct btrfs_ordered_extent *entry = NULL; | |
678 | ||
679 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 680 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
681 | node = tree_search(tree, file_offset); |
682 | if (!node) | |
683 | goto out; | |
684 | ||
685 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
686 | if (!offset_in_entry(entry, file_offset)) | |
687 | entry = NULL; | |
688 | if (entry) | |
689 | atomic_inc(&entry->refs); | |
690 | out: | |
5fd02043 | 691 | spin_unlock_irq(&tree->lock); |
e6dcd2dc CM |
692 | return entry; |
693 | } | |
694 | ||
4b46fce2 JB |
695 | /* Since the DIO code tries to lock a wide area we need to look for any ordered |
696 | * extents that exist in the range, rather than just the start of the range. | |
697 | */ | |
698 | struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, | |
699 | u64 file_offset, | |
700 | u64 len) | |
701 | { | |
702 | struct btrfs_ordered_inode_tree *tree; | |
703 | struct rb_node *node; | |
704 | struct btrfs_ordered_extent *entry = NULL; | |
705 | ||
706 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 707 | spin_lock_irq(&tree->lock); |
4b46fce2 JB |
708 | node = tree_search(tree, file_offset); |
709 | if (!node) { | |
710 | node = tree_search(tree, file_offset + len); | |
711 | if (!node) | |
712 | goto out; | |
713 | } | |
714 | ||
715 | while (1) { | |
716 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
717 | if (range_overlaps(entry, file_offset, len)) | |
718 | break; | |
719 | ||
720 | if (entry->file_offset >= file_offset + len) { | |
721 | entry = NULL; | |
722 | break; | |
723 | } | |
724 | entry = NULL; | |
725 | node = rb_next(node); | |
726 | if (!node) | |
727 | break; | |
728 | } | |
729 | out: | |
730 | if (entry) | |
731 | atomic_inc(&entry->refs); | |
5fd02043 | 732 | spin_unlock_irq(&tree->lock); |
4b46fce2 JB |
733 | return entry; |
734 | } | |
735 | ||
eb84ae03 CM |
736 | /* |
737 | * lookup and return any extent before 'file_offset'. NULL is returned | |
738 | * if none is found | |
739 | */ | |
e6dcd2dc | 740 | struct btrfs_ordered_extent * |
d397712b | 741 | btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) |
e6dcd2dc CM |
742 | { |
743 | struct btrfs_ordered_inode_tree *tree; | |
744 | struct rb_node *node; | |
745 | struct btrfs_ordered_extent *entry = NULL; | |
746 | ||
747 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 748 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
749 | node = tree_search(tree, file_offset); |
750 | if (!node) | |
751 | goto out; | |
752 | ||
753 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
754 | atomic_inc(&entry->refs); | |
755 | out: | |
5fd02043 | 756 | spin_unlock_irq(&tree->lock); |
e6dcd2dc | 757 | return entry; |
81d7ed29 | 758 | } |
dbe674a9 | 759 | |
eb84ae03 CM |
760 | /* |
761 | * After an extent is done, call this to conditionally update the on disk | |
762 | * i_size. i_size is updated to cover any fully written part of the file. | |
763 | */ | |
c2167754 | 764 | int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
dbe674a9 CM |
765 | struct btrfs_ordered_extent *ordered) |
766 | { | |
767 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
dbe674a9 CM |
768 | u64 disk_i_size; |
769 | u64 new_i_size; | |
c2167754 | 770 | u64 i_size = i_size_read(inode); |
dbe674a9 | 771 | struct rb_node *node; |
c2167754 | 772 | struct rb_node *prev = NULL; |
dbe674a9 | 773 | struct btrfs_ordered_extent *test; |
c2167754 YZ |
774 | int ret = 1; |
775 | ||
776 | if (ordered) | |
777 | offset = entry_end(ordered); | |
a038fab0 YZ |
778 | else |
779 | offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); | |
dbe674a9 | 780 | |
5fd02043 | 781 | spin_lock_irq(&tree->lock); |
dbe674a9 CM |
782 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
783 | ||
c2167754 YZ |
784 | /* truncate file */ |
785 | if (disk_i_size > i_size) { | |
786 | BTRFS_I(inode)->disk_i_size = i_size; | |
787 | ret = 0; | |
788 | goto out; | |
789 | } | |
790 | ||
dbe674a9 CM |
791 | /* |
792 | * if the disk i_size is already at the inode->i_size, or | |
793 | * this ordered extent is inside the disk i_size, we're done | |
794 | */ | |
c2167754 | 795 | if (disk_i_size == i_size || offset <= disk_i_size) { |
dbe674a9 CM |
796 | goto out; |
797 | } | |
798 | ||
dbe674a9 CM |
799 | /* |
800 | * walk backward from this ordered extent to disk_i_size. | |
801 | * if we find an ordered extent then we can't update disk i_size | |
802 | * yet | |
803 | */ | |
c2167754 YZ |
804 | if (ordered) { |
805 | node = rb_prev(&ordered->rb_node); | |
806 | } else { | |
807 | prev = tree_search(tree, offset); | |
808 | /* | |
809 | * we insert file extents without involving ordered struct, | |
810 | * so there should be no ordered struct cover this offset | |
811 | */ | |
812 | if (prev) { | |
813 | test = rb_entry(prev, struct btrfs_ordered_extent, | |
814 | rb_node); | |
815 | BUG_ON(offset_in_entry(test, offset)); | |
816 | } | |
817 | node = prev; | |
818 | } | |
5fd02043 | 819 | for (; node; node = rb_prev(node)) { |
dbe674a9 | 820 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 JB |
821 | |
822 | /* We treat this entry as if it doesnt exist */ | |
823 | if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) | |
824 | continue; | |
dbe674a9 CM |
825 | if (test->file_offset + test->len <= disk_i_size) |
826 | break; | |
c2167754 | 827 | if (test->file_offset >= i_size) |
dbe674a9 | 828 | break; |
b9a8cc5b MX |
829 | if (test->file_offset >= disk_i_size) { |
830 | /* | |
831 | * we don't update disk_i_size now, so record this | |
832 | * undealt i_size. Or we will not know the real | |
833 | * i_size. | |
834 | */ | |
835 | if (test->outstanding_isize < offset) | |
836 | test->outstanding_isize = offset; | |
837 | if (ordered && | |
838 | ordered->outstanding_isize > | |
839 | test->outstanding_isize) | |
840 | test->outstanding_isize = | |
841 | ordered->outstanding_isize; | |
dbe674a9 | 842 | goto out; |
5fd02043 | 843 | } |
dbe674a9 | 844 | } |
b9a8cc5b | 845 | new_i_size = min_t(u64, offset, i_size); |
dbe674a9 CM |
846 | |
847 | /* | |
b9a8cc5b MX |
848 | * Some ordered extents may completed before the current one, and |
849 | * we hold the real i_size in ->outstanding_isize. | |
dbe674a9 | 850 | */ |
b9a8cc5b MX |
851 | if (ordered && ordered->outstanding_isize > new_i_size) |
852 | new_i_size = min_t(u64, ordered->outstanding_isize, i_size); | |
dbe674a9 | 853 | BTRFS_I(inode)->disk_i_size = new_i_size; |
c2167754 | 854 | ret = 0; |
dbe674a9 | 855 | out: |
c2167754 | 856 | /* |
5fd02043 JB |
857 | * We need to do this because we can't remove ordered extents until |
858 | * after the i_disk_size has been updated and then the inode has been | |
859 | * updated to reflect the change, so we need to tell anybody who finds | |
860 | * this ordered extent that we've already done all the real work, we | |
861 | * just haven't completed all the other work. | |
c2167754 YZ |
862 | */ |
863 | if (ordered) | |
5fd02043 JB |
864 | set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); |
865 | spin_unlock_irq(&tree->lock); | |
c2167754 | 866 | return ret; |
dbe674a9 | 867 | } |
ba1da2f4 | 868 | |
eb84ae03 CM |
869 | /* |
870 | * search the ordered extents for one corresponding to 'offset' and | |
871 | * try to find a checksum. This is used because we allow pages to | |
872 | * be reclaimed before their checksum is actually put into the btree | |
873 | */ | |
d20f7043 CM |
874 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, |
875 | u32 *sum) | |
ba1da2f4 CM |
876 | { |
877 | struct btrfs_ordered_sum *ordered_sum; | |
878 | struct btrfs_sector_sum *sector_sums; | |
879 | struct btrfs_ordered_extent *ordered; | |
880 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
3edf7d33 CM |
881 | unsigned long num_sectors; |
882 | unsigned long i; | |
883 | u32 sectorsize = BTRFS_I(inode)->root->sectorsize; | |
ba1da2f4 | 884 | int ret = 1; |
ba1da2f4 CM |
885 | |
886 | ordered = btrfs_lookup_ordered_extent(inode, offset); | |
887 | if (!ordered) | |
888 | return 1; | |
889 | ||
5fd02043 | 890 | spin_lock_irq(&tree->lock); |
c6e30871 | 891 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
d20f7043 | 892 | if (disk_bytenr >= ordered_sum->bytenr) { |
3edf7d33 | 893 | num_sectors = ordered_sum->len / sectorsize; |
ed98b56a | 894 | sector_sums = ordered_sum->sums; |
3edf7d33 | 895 | for (i = 0; i < num_sectors; i++) { |
d20f7043 | 896 | if (sector_sums[i].bytenr == disk_bytenr) { |
3edf7d33 CM |
897 | *sum = sector_sums[i].sum; |
898 | ret = 0; | |
899 | goto out; | |
900 | } | |
901 | } | |
ba1da2f4 CM |
902 | } |
903 | } | |
904 | out: | |
5fd02043 | 905 | spin_unlock_irq(&tree->lock); |
89642229 | 906 | btrfs_put_ordered_extent(ordered); |
ba1da2f4 CM |
907 | return ret; |
908 | } | |
909 | ||
f421950f | 910 | |
5a3f23d5 CM |
911 | /* |
912 | * add a given inode to the list of inodes that must be fully on | |
913 | * disk before a transaction commit finishes. | |
914 | * | |
915 | * This basically gives us the ext3 style data=ordered mode, and it is mostly | |
916 | * used to make sure renamed files are fully on disk. | |
917 | * | |
918 | * It is a noop if the inode is already fully on disk. | |
919 | * | |
920 | * If trans is not null, we'll do a friendly check for a transaction that | |
921 | * is already flushing things and force the IO down ourselves. | |
922 | */ | |
143bede5 JM |
923 | void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, |
924 | struct btrfs_root *root, struct inode *inode) | |
5a3f23d5 CM |
925 | { |
926 | u64 last_mod; | |
927 | ||
928 | last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans); | |
929 | ||
930 | /* | |
931 | * if this file hasn't been changed since the last transaction | |
932 | * commit, we can safely return without doing anything | |
933 | */ | |
934 | if (last_mod < root->fs_info->last_trans_committed) | |
143bede5 | 935 | return; |
5a3f23d5 CM |
936 | |
937 | /* | |
938 | * the transaction is already committing. Just start the IO and | |
939 | * don't bother with all of this list nonsense | |
940 | */ | |
941 | if (trans && root->fs_info->running_transaction->blocked) { | |
942 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | |
143bede5 | 943 | return; |
5a3f23d5 CM |
944 | } |
945 | ||
946 | spin_lock(&root->fs_info->ordered_extent_lock); | |
947 | if (list_empty(&BTRFS_I(inode)->ordered_operations)) { | |
948 | list_add_tail(&BTRFS_I(inode)->ordered_operations, | |
949 | &root->fs_info->ordered_operations); | |
950 | } | |
951 | spin_unlock(&root->fs_info->ordered_extent_lock); | |
5a3f23d5 | 952 | } |
6352b91d MX |
953 | |
954 | int __init ordered_data_init(void) | |
955 | { | |
956 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", | |
957 | sizeof(struct btrfs_ordered_extent), 0, | |
958 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | |
959 | NULL); | |
960 | if (!btrfs_ordered_extent_cache) | |
961 | return -ENOMEM; | |
962 | return 0; | |
963 | } | |
964 | ||
965 | void ordered_data_exit(void) | |
966 | { | |
967 | if (btrfs_ordered_extent_cache) | |
968 | kmem_cache_destroy(btrfs_ordered_extent_cache); | |
969 | } |