Commit | Line | Data |
---|---|---|
dc17ff8f CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
dc17ff8f | 19 | #include <linux/slab.h> |
d6bfde87 | 20 | #include <linux/blkdev.h> |
f421950f CM |
21 | #include <linux/writeback.h> |
22 | #include <linux/pagevec.h> | |
dc17ff8f CM |
23 | #include "ctree.h" |
24 | #include "transaction.h" | |
25 | #include "btrfs_inode.h" | |
e6dcd2dc | 26 | #include "extent_io.h" |
199c2a9c | 27 | #include "disk-io.h" |
dc17ff8f | 28 | |
6352b91d MX |
29 | static struct kmem_cache *btrfs_ordered_extent_cache; |
30 | ||
e6dcd2dc | 31 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
dc17ff8f | 32 | { |
e6dcd2dc CM |
33 | if (entry->file_offset + entry->len < entry->file_offset) |
34 | return (u64)-1; | |
35 | return entry->file_offset + entry->len; | |
dc17ff8f CM |
36 | } |
37 | ||
d352ac68 CM |
38 | /* returns NULL if the insertion worked, or it returns the node it did find |
39 | * in the tree | |
40 | */ | |
e6dcd2dc CM |
41 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
42 | struct rb_node *node) | |
dc17ff8f | 43 | { |
d397712b CM |
44 | struct rb_node **p = &root->rb_node; |
45 | struct rb_node *parent = NULL; | |
e6dcd2dc | 46 | struct btrfs_ordered_extent *entry; |
dc17ff8f | 47 | |
d397712b | 48 | while (*p) { |
dc17ff8f | 49 | parent = *p; |
e6dcd2dc | 50 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
dc17ff8f | 51 | |
e6dcd2dc | 52 | if (file_offset < entry->file_offset) |
dc17ff8f | 53 | p = &(*p)->rb_left; |
e6dcd2dc | 54 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
55 | p = &(*p)->rb_right; |
56 | else | |
57 | return parent; | |
58 | } | |
59 | ||
60 | rb_link_node(node, parent, p); | |
61 | rb_insert_color(node, root); | |
62 | return NULL; | |
63 | } | |
64 | ||
43c04fb1 JM |
65 | static void ordered_data_tree_panic(struct inode *inode, int errno, |
66 | u64 offset) | |
67 | { | |
68 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | |
69 | btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset " | |
351fd353 | 70 | "%llu", offset); |
43c04fb1 JM |
71 | } |
72 | ||
d352ac68 CM |
73 | /* |
74 | * look for a given offset in the tree, and if it can't be found return the | |
75 | * first lesser offset | |
76 | */ | |
e6dcd2dc CM |
77 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
78 | struct rb_node **prev_ret) | |
dc17ff8f | 79 | { |
d397712b | 80 | struct rb_node *n = root->rb_node; |
dc17ff8f | 81 | struct rb_node *prev = NULL; |
e6dcd2dc CM |
82 | struct rb_node *test; |
83 | struct btrfs_ordered_extent *entry; | |
84 | struct btrfs_ordered_extent *prev_entry = NULL; | |
dc17ff8f | 85 | |
d397712b | 86 | while (n) { |
e6dcd2dc | 87 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
dc17ff8f CM |
88 | prev = n; |
89 | prev_entry = entry; | |
dc17ff8f | 90 | |
e6dcd2dc | 91 | if (file_offset < entry->file_offset) |
dc17ff8f | 92 | n = n->rb_left; |
e6dcd2dc | 93 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
94 | n = n->rb_right; |
95 | else | |
96 | return n; | |
97 | } | |
98 | if (!prev_ret) | |
99 | return NULL; | |
100 | ||
d397712b | 101 | while (prev && file_offset >= entry_end(prev_entry)) { |
e6dcd2dc CM |
102 | test = rb_next(prev); |
103 | if (!test) | |
104 | break; | |
105 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
106 | rb_node); | |
107 | if (file_offset < entry_end(prev_entry)) | |
108 | break; | |
109 | ||
110 | prev = test; | |
111 | } | |
112 | if (prev) | |
113 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, | |
114 | rb_node); | |
d397712b | 115 | while (prev && file_offset < entry_end(prev_entry)) { |
e6dcd2dc CM |
116 | test = rb_prev(prev); |
117 | if (!test) | |
118 | break; | |
119 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
120 | rb_node); | |
121 | prev = test; | |
dc17ff8f CM |
122 | } |
123 | *prev_ret = prev; | |
124 | return NULL; | |
125 | } | |
126 | ||
d352ac68 CM |
127 | /* |
128 | * helper to check if a given offset is inside a given entry | |
129 | */ | |
e6dcd2dc CM |
130 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
131 | { | |
132 | if (file_offset < entry->file_offset || | |
133 | entry->file_offset + entry->len <= file_offset) | |
134 | return 0; | |
135 | return 1; | |
136 | } | |
137 | ||
4b46fce2 JB |
138 | static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, |
139 | u64 len) | |
140 | { | |
141 | if (file_offset + len <= entry->file_offset || | |
142 | entry->file_offset + entry->len <= file_offset) | |
143 | return 0; | |
144 | return 1; | |
145 | } | |
146 | ||
d352ac68 CM |
147 | /* |
148 | * look find the first ordered struct that has this offset, otherwise | |
149 | * the first one less than this offset | |
150 | */ | |
e6dcd2dc CM |
151 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
152 | u64 file_offset) | |
dc17ff8f | 153 | { |
e6dcd2dc | 154 | struct rb_root *root = &tree->tree; |
c87fb6fd | 155 | struct rb_node *prev = NULL; |
dc17ff8f | 156 | struct rb_node *ret; |
e6dcd2dc CM |
157 | struct btrfs_ordered_extent *entry; |
158 | ||
159 | if (tree->last) { | |
160 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, | |
161 | rb_node); | |
162 | if (offset_in_entry(entry, file_offset)) | |
163 | return tree->last; | |
164 | } | |
165 | ret = __tree_search(root, file_offset, &prev); | |
dc17ff8f | 166 | if (!ret) |
e6dcd2dc CM |
167 | ret = prev; |
168 | if (ret) | |
169 | tree->last = ret; | |
dc17ff8f CM |
170 | return ret; |
171 | } | |
172 | ||
eb84ae03 CM |
173 | /* allocate and add a new ordered_extent into the per-inode tree. |
174 | * file_offset is the logical offset in the file | |
175 | * | |
176 | * start is the disk block number of an extent already reserved in the | |
177 | * extent allocation tree | |
178 | * | |
179 | * len is the length of the extent | |
180 | * | |
eb84ae03 CM |
181 | * The tree is given a single reference on the ordered extent that was |
182 | * inserted. | |
183 | */ | |
4b46fce2 JB |
184 | static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
185 | u64 start, u64 len, u64 disk_len, | |
261507a0 | 186 | int type, int dio, int compress_type) |
dc17ff8f | 187 | { |
199c2a9c | 188 | struct btrfs_root *root = BTRFS_I(inode)->root; |
dc17ff8f | 189 | struct btrfs_ordered_inode_tree *tree; |
e6dcd2dc CM |
190 | struct rb_node *node; |
191 | struct btrfs_ordered_extent *entry; | |
dc17ff8f | 192 | |
e6dcd2dc | 193 | tree = &BTRFS_I(inode)->ordered_tree; |
6352b91d | 194 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
dc17ff8f CM |
195 | if (!entry) |
196 | return -ENOMEM; | |
197 | ||
e6dcd2dc CM |
198 | entry->file_offset = file_offset; |
199 | entry->start = start; | |
200 | entry->len = len; | |
c8b97818 | 201 | entry->disk_len = disk_len; |
8b62b72b | 202 | entry->bytes_left = len; |
5fd02043 | 203 | entry->inode = igrab(inode); |
261507a0 | 204 | entry->compress_type = compress_type; |
77cef2ec | 205 | entry->truncated_len = (u64)-1; |
d899e052 | 206 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
80ff3856 | 207 | set_bit(type, &entry->flags); |
3eaa2885 | 208 | |
4b46fce2 JB |
209 | if (dio) |
210 | set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); | |
211 | ||
e6dcd2dc CM |
212 | /* one ref for the tree */ |
213 | atomic_set(&entry->refs, 1); | |
214 | init_waitqueue_head(&entry->wait); | |
215 | INIT_LIST_HEAD(&entry->list); | |
3eaa2885 | 216 | INIT_LIST_HEAD(&entry->root_extent_list); |
9afab882 MX |
217 | INIT_LIST_HEAD(&entry->work_list); |
218 | init_completion(&entry->completion); | |
2ab28f32 | 219 | INIT_LIST_HEAD(&entry->log_list); |
50d9aa99 | 220 | INIT_LIST_HEAD(&entry->trans_list); |
dc17ff8f | 221 | |
1abe9b8a | 222 | trace_btrfs_ordered_extent_add(inode, entry); |
223 | ||
5fd02043 | 224 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
225 | node = tree_insert(&tree->tree, file_offset, |
226 | &entry->rb_node); | |
43c04fb1 JM |
227 | if (node) |
228 | ordered_data_tree_panic(inode, -EEXIST, file_offset); | |
5fd02043 | 229 | spin_unlock_irq(&tree->lock); |
d397712b | 230 | |
199c2a9c | 231 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 232 | list_add_tail(&entry->root_extent_list, |
199c2a9c MX |
233 | &root->ordered_extents); |
234 | root->nr_ordered_extents++; | |
235 | if (root->nr_ordered_extents == 1) { | |
236 | spin_lock(&root->fs_info->ordered_root_lock); | |
237 | BUG_ON(!list_empty(&root->ordered_root)); | |
238 | list_add_tail(&root->ordered_root, | |
239 | &root->fs_info->ordered_roots); | |
240 | spin_unlock(&root->fs_info->ordered_root_lock); | |
241 | } | |
242 | spin_unlock(&root->ordered_extent_lock); | |
3eaa2885 | 243 | |
dc17ff8f CM |
244 | return 0; |
245 | } | |
246 | ||
4b46fce2 JB |
247 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
248 | u64 start, u64 len, u64 disk_len, int type) | |
249 | { | |
250 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
251 | disk_len, type, 0, |
252 | BTRFS_COMPRESS_NONE); | |
4b46fce2 JB |
253 | } |
254 | ||
255 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, | |
256 | u64 start, u64 len, u64 disk_len, int type) | |
257 | { | |
258 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
259 | disk_len, type, 1, |
260 | BTRFS_COMPRESS_NONE); | |
261 | } | |
262 | ||
263 | int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, | |
264 | u64 start, u64 len, u64 disk_len, | |
265 | int type, int compress_type) | |
266 | { | |
267 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
268 | disk_len, type, 0, | |
269 | compress_type); | |
4b46fce2 JB |
270 | } |
271 | ||
eb84ae03 CM |
272 | /* |
273 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted | |
3edf7d33 CM |
274 | * when an ordered extent is finished. If the list covers more than one |
275 | * ordered extent, it is split across multiples. | |
eb84ae03 | 276 | */ |
143bede5 JM |
277 | void btrfs_add_ordered_sum(struct inode *inode, |
278 | struct btrfs_ordered_extent *entry, | |
279 | struct btrfs_ordered_sum *sum) | |
dc17ff8f | 280 | { |
e6dcd2dc | 281 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 282 | |
e6dcd2dc | 283 | tree = &BTRFS_I(inode)->ordered_tree; |
5fd02043 | 284 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 285 | list_add_tail(&sum->list, &entry->list); |
5fd02043 | 286 | spin_unlock_irq(&tree->lock); |
dc17ff8f CM |
287 | } |
288 | ||
163cf09c CM |
289 | /* |
290 | * this is used to account for finished IO across a given range | |
291 | * of the file. The IO may span ordered extents. If | |
292 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
293 | * 0. | |
294 | * | |
295 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
296 | * to make sure this function only returns 1 once for a given ordered extent. | |
297 | * | |
298 | * file_offset is updated to one byte past the range that is recorded as | |
299 | * complete. This allows you to walk forward in the file. | |
300 | */ | |
301 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, | |
302 | struct btrfs_ordered_extent **cached, | |
5fd02043 | 303 | u64 *file_offset, u64 io_size, int uptodate) |
163cf09c CM |
304 | { |
305 | struct btrfs_ordered_inode_tree *tree; | |
306 | struct rb_node *node; | |
307 | struct btrfs_ordered_extent *entry = NULL; | |
308 | int ret; | |
5fd02043 | 309 | unsigned long flags; |
163cf09c CM |
310 | u64 dec_end; |
311 | u64 dec_start; | |
312 | u64 to_dec; | |
313 | ||
314 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 315 | spin_lock_irqsave(&tree->lock, flags); |
163cf09c CM |
316 | node = tree_search(tree, *file_offset); |
317 | if (!node) { | |
318 | ret = 1; | |
319 | goto out; | |
320 | } | |
321 | ||
322 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
323 | if (!offset_in_entry(entry, *file_offset)) { | |
324 | ret = 1; | |
325 | goto out; | |
326 | } | |
327 | ||
328 | dec_start = max(*file_offset, entry->file_offset); | |
329 | dec_end = min(*file_offset + io_size, entry->file_offset + | |
330 | entry->len); | |
331 | *file_offset = dec_end; | |
332 | if (dec_start > dec_end) { | |
efe120a0 FH |
333 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
334 | "bad ordering dec_start %llu end %llu", dec_start, dec_end); | |
163cf09c CM |
335 | } |
336 | to_dec = dec_end - dec_start; | |
337 | if (to_dec > entry->bytes_left) { | |
efe120a0 FH |
338 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
339 | "bad ordered accounting left %llu size %llu", | |
340 | entry->bytes_left, to_dec); | |
163cf09c CM |
341 | } |
342 | entry->bytes_left -= to_dec; | |
5fd02043 JB |
343 | if (!uptodate) |
344 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
345 | ||
af7a6509 | 346 | if (entry->bytes_left == 0) { |
163cf09c | 347 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
a83342aa DS |
348 | /* |
349 | * Implicit memory barrier after test_and_set_bit | |
350 | */ | |
af7a6509 MX |
351 | if (waitqueue_active(&entry->wait)) |
352 | wake_up(&entry->wait); | |
353 | } else { | |
163cf09c | 354 | ret = 1; |
af7a6509 | 355 | } |
163cf09c CM |
356 | out: |
357 | if (!ret && cached && entry) { | |
358 | *cached = entry; | |
359 | atomic_inc(&entry->refs); | |
360 | } | |
5fd02043 | 361 | spin_unlock_irqrestore(&tree->lock, flags); |
163cf09c CM |
362 | return ret == 0; |
363 | } | |
364 | ||
eb84ae03 CM |
365 | /* |
366 | * this is used to account for finished IO across a given range | |
367 | * of the file. The IO should not span ordered extents. If | |
368 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
369 | * 0. | |
370 | * | |
371 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
372 | * to make sure this function only returns 1 once for a given ordered extent. | |
373 | */ | |
e6dcd2dc | 374 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
5a1a3df1 | 375 | struct btrfs_ordered_extent **cached, |
5fd02043 | 376 | u64 file_offset, u64 io_size, int uptodate) |
dc17ff8f | 377 | { |
e6dcd2dc | 378 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 379 | struct rb_node *node; |
5a1a3df1 | 380 | struct btrfs_ordered_extent *entry = NULL; |
5fd02043 | 381 | unsigned long flags; |
e6dcd2dc CM |
382 | int ret; |
383 | ||
384 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 JB |
385 | spin_lock_irqsave(&tree->lock, flags); |
386 | if (cached && *cached) { | |
387 | entry = *cached; | |
388 | goto have_entry; | |
389 | } | |
390 | ||
e6dcd2dc | 391 | node = tree_search(tree, file_offset); |
dc17ff8f | 392 | if (!node) { |
e6dcd2dc CM |
393 | ret = 1; |
394 | goto out; | |
dc17ff8f CM |
395 | } |
396 | ||
e6dcd2dc | 397 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 398 | have_entry: |
e6dcd2dc CM |
399 | if (!offset_in_entry(entry, file_offset)) { |
400 | ret = 1; | |
401 | goto out; | |
dc17ff8f | 402 | } |
e6dcd2dc | 403 | |
8b62b72b | 404 | if (io_size > entry->bytes_left) { |
efe120a0 FH |
405 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
406 | "bad ordered accounting left %llu size %llu", | |
c1c9ff7c | 407 | entry->bytes_left, io_size); |
8b62b72b CM |
408 | } |
409 | entry->bytes_left -= io_size; | |
5fd02043 JB |
410 | if (!uptodate) |
411 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
412 | ||
af7a6509 | 413 | if (entry->bytes_left == 0) { |
e6dcd2dc | 414 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
a83342aa DS |
415 | /* |
416 | * Implicit memory barrier after test_and_set_bit | |
417 | */ | |
af7a6509 MX |
418 | if (waitqueue_active(&entry->wait)) |
419 | wake_up(&entry->wait); | |
420 | } else { | |
8b62b72b | 421 | ret = 1; |
af7a6509 | 422 | } |
e6dcd2dc | 423 | out: |
5a1a3df1 JB |
424 | if (!ret && cached && entry) { |
425 | *cached = entry; | |
426 | atomic_inc(&entry->refs); | |
427 | } | |
5fd02043 | 428 | spin_unlock_irqrestore(&tree->lock, flags); |
e6dcd2dc CM |
429 | return ret == 0; |
430 | } | |
dc17ff8f | 431 | |
2ab28f32 | 432 | /* Needs to either be called under a log transaction or the log_mutex */ |
827463c4 | 433 | void btrfs_get_logged_extents(struct inode *inode, |
0870295b FM |
434 | struct list_head *logged_list, |
435 | const loff_t start, | |
436 | const loff_t end) | |
2ab28f32 JB |
437 | { |
438 | struct btrfs_ordered_inode_tree *tree; | |
439 | struct btrfs_ordered_extent *ordered; | |
440 | struct rb_node *n; | |
0870295b | 441 | struct rb_node *prev; |
2ab28f32 JB |
442 | |
443 | tree = &BTRFS_I(inode)->ordered_tree; | |
444 | spin_lock_irq(&tree->lock); | |
0870295b FM |
445 | n = __tree_search(&tree->tree, end, &prev); |
446 | if (!n) | |
447 | n = prev; | |
448 | for (; n; n = rb_prev(n)) { | |
2ab28f32 | 449 | ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
0870295b FM |
450 | if (ordered->file_offset > end) |
451 | continue; | |
452 | if (entry_end(ordered) <= start) | |
453 | break; | |
4d884fce | 454 | if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) |
50d9aa99 | 455 | continue; |
0870295b | 456 | list_add(&ordered->log_list, logged_list); |
827463c4 | 457 | atomic_inc(&ordered->refs); |
2ab28f32 JB |
458 | } |
459 | spin_unlock_irq(&tree->lock); | |
460 | } | |
461 | ||
827463c4 MX |
462 | void btrfs_put_logged_extents(struct list_head *logged_list) |
463 | { | |
464 | struct btrfs_ordered_extent *ordered; | |
465 | ||
466 | while (!list_empty(logged_list)) { | |
467 | ordered = list_first_entry(logged_list, | |
468 | struct btrfs_ordered_extent, | |
469 | log_list); | |
470 | list_del_init(&ordered->log_list); | |
471 | btrfs_put_ordered_extent(ordered); | |
472 | } | |
473 | } | |
474 | ||
475 | void btrfs_submit_logged_extents(struct list_head *logged_list, | |
476 | struct btrfs_root *log) | |
477 | { | |
478 | int index = log->log_transid % 2; | |
479 | ||
480 | spin_lock_irq(&log->log_extents_lock[index]); | |
481 | list_splice_tail(logged_list, &log->logged_list[index]); | |
482 | spin_unlock_irq(&log->log_extents_lock[index]); | |
483 | } | |
484 | ||
50d9aa99 JB |
485 | void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans, |
486 | struct btrfs_root *log, u64 transid) | |
2ab28f32 JB |
487 | { |
488 | struct btrfs_ordered_extent *ordered; | |
489 | int index = transid % 2; | |
490 | ||
491 | spin_lock_irq(&log->log_extents_lock[index]); | |
492 | while (!list_empty(&log->logged_list[index])) { | |
161c3549 | 493 | struct inode *inode; |
2ab28f32 JB |
494 | ordered = list_first_entry(&log->logged_list[index], |
495 | struct btrfs_ordered_extent, | |
496 | log_list); | |
497 | list_del_init(&ordered->log_list); | |
161c3549 | 498 | inode = ordered->inode; |
2ab28f32 | 499 | spin_unlock_irq(&log->log_extents_lock[index]); |
98ce2ded LB |
500 | |
501 | if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) && | |
502 | !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { | |
98ce2ded LB |
503 | u64 start = ordered->file_offset; |
504 | u64 end = ordered->file_offset + ordered->len - 1; | |
505 | ||
506 | WARN_ON(!inode); | |
507 | filemap_fdatawrite_range(inode->i_mapping, start, end); | |
508 | } | |
2ab28f32 JB |
509 | wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, |
510 | &ordered->flags)); | |
98ce2ded | 511 | |
7558c8bc | 512 | /* |
161c3549 JB |
513 | * In order to keep us from losing our ordered extent |
514 | * information when committing the transaction we have to make | |
515 | * sure that any logged extents are completed when we go to | |
516 | * commit the transaction. To do this we simply increase the | |
517 | * current transactions pending_ordered counter and decrement it | |
518 | * when the ordered extent completes. | |
7558c8bc | 519 | */ |
161c3549 JB |
520 | if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { |
521 | struct btrfs_ordered_inode_tree *tree; | |
522 | ||
523 | tree = &BTRFS_I(inode)->ordered_tree; | |
524 | spin_lock_irq(&tree->lock); | |
525 | if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { | |
526 | set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); | |
527 | atomic_inc(&trans->transaction->pending_ordered); | |
528 | } | |
529 | spin_unlock_irq(&tree->lock); | |
530 | } | |
531 | btrfs_put_ordered_extent(ordered); | |
2ab28f32 JB |
532 | spin_lock_irq(&log->log_extents_lock[index]); |
533 | } | |
534 | spin_unlock_irq(&log->log_extents_lock[index]); | |
535 | } | |
536 | ||
537 | void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid) | |
538 | { | |
539 | struct btrfs_ordered_extent *ordered; | |
540 | int index = transid % 2; | |
541 | ||
542 | spin_lock_irq(&log->log_extents_lock[index]); | |
543 | while (!list_empty(&log->logged_list[index])) { | |
544 | ordered = list_first_entry(&log->logged_list[index], | |
545 | struct btrfs_ordered_extent, | |
546 | log_list); | |
547 | list_del_init(&ordered->log_list); | |
548 | spin_unlock_irq(&log->log_extents_lock[index]); | |
549 | btrfs_put_ordered_extent(ordered); | |
550 | spin_lock_irq(&log->log_extents_lock[index]); | |
551 | } | |
552 | spin_unlock_irq(&log->log_extents_lock[index]); | |
553 | } | |
554 | ||
eb84ae03 CM |
555 | /* |
556 | * used to drop a reference on an ordered extent. This will free | |
557 | * the extent if the last reference is dropped | |
558 | */ | |
143bede5 | 559 | void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
e6dcd2dc | 560 | { |
ba1da2f4 CM |
561 | struct list_head *cur; |
562 | struct btrfs_ordered_sum *sum; | |
563 | ||
1abe9b8a | 564 | trace_btrfs_ordered_extent_put(entry->inode, entry); |
565 | ||
ba1da2f4 | 566 | if (atomic_dec_and_test(&entry->refs)) { |
61de718f FM |
567 | ASSERT(list_empty(&entry->log_list)); |
568 | ASSERT(list_empty(&entry->trans_list)); | |
569 | ASSERT(list_empty(&entry->root_extent_list)); | |
570 | ASSERT(RB_EMPTY_NODE(&entry->rb_node)); | |
5fd02043 JB |
571 | if (entry->inode) |
572 | btrfs_add_delayed_iput(entry->inode); | |
d397712b | 573 | while (!list_empty(&entry->list)) { |
ba1da2f4 CM |
574 | cur = entry->list.next; |
575 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | |
576 | list_del(&sum->list); | |
577 | kfree(sum); | |
578 | } | |
6352b91d | 579 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
ba1da2f4 | 580 | } |
dc17ff8f | 581 | } |
cee36a03 | 582 | |
eb84ae03 CM |
583 | /* |
584 | * remove an ordered extent from the tree. No references are dropped | |
5fd02043 | 585 | * and waiters are woken up. |
eb84ae03 | 586 | */ |
5fd02043 JB |
587 | void btrfs_remove_ordered_extent(struct inode *inode, |
588 | struct btrfs_ordered_extent *entry) | |
cee36a03 | 589 | { |
e6dcd2dc | 590 | struct btrfs_ordered_inode_tree *tree; |
287a0ab9 | 591 | struct btrfs_root *root = BTRFS_I(inode)->root; |
cee36a03 | 592 | struct rb_node *node; |
161c3549 | 593 | bool dec_pending_ordered = false; |
cee36a03 | 594 | |
e6dcd2dc | 595 | tree = &BTRFS_I(inode)->ordered_tree; |
5fd02043 | 596 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 597 | node = &entry->rb_node; |
cee36a03 | 598 | rb_erase(node, &tree->tree); |
61de718f | 599 | RB_CLEAR_NODE(node); |
1b8e7e45 FDBM |
600 | if (tree->last == node) |
601 | tree->last = NULL; | |
e6dcd2dc | 602 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
161c3549 JB |
603 | if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags)) |
604 | dec_pending_ordered = true; | |
5fd02043 | 605 | spin_unlock_irq(&tree->lock); |
3eaa2885 | 606 | |
161c3549 JB |
607 | /* |
608 | * The current running transaction is waiting on us, we need to let it | |
609 | * know that we're complete and wake it up. | |
610 | */ | |
611 | if (dec_pending_ordered) { | |
612 | struct btrfs_transaction *trans; | |
613 | ||
614 | /* | |
615 | * The checks for trans are just a formality, it should be set, | |
616 | * but if it isn't we don't want to deref/assert under the spin | |
617 | * lock, so be nice and check if trans is set, but ASSERT() so | |
618 | * if it isn't set a developer will notice. | |
619 | */ | |
620 | spin_lock(&root->fs_info->trans_lock); | |
621 | trans = root->fs_info->running_transaction; | |
622 | if (trans) | |
623 | atomic_inc(&trans->use_count); | |
624 | spin_unlock(&root->fs_info->trans_lock); | |
625 | ||
626 | ASSERT(trans); | |
627 | if (trans) { | |
628 | if (atomic_dec_and_test(&trans->pending_ordered)) | |
629 | wake_up(&trans->pending_wait); | |
630 | btrfs_put_transaction(trans); | |
631 | } | |
632 | } | |
633 | ||
199c2a9c | 634 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 635 | list_del_init(&entry->root_extent_list); |
199c2a9c | 636 | root->nr_ordered_extents--; |
5a3f23d5 | 637 | |
1abe9b8a | 638 | trace_btrfs_ordered_extent_remove(inode, entry); |
639 | ||
199c2a9c MX |
640 | if (!root->nr_ordered_extents) { |
641 | spin_lock(&root->fs_info->ordered_root_lock); | |
642 | BUG_ON(list_empty(&root->ordered_root)); | |
643 | list_del_init(&root->ordered_root); | |
644 | spin_unlock(&root->fs_info->ordered_root_lock); | |
645 | } | |
646 | spin_unlock(&root->ordered_extent_lock); | |
e6dcd2dc | 647 | wake_up(&entry->wait); |
cee36a03 CM |
648 | } |
649 | ||
d458b054 | 650 | static void btrfs_run_ordered_extent_work(struct btrfs_work *work) |
9afab882 MX |
651 | { |
652 | struct btrfs_ordered_extent *ordered; | |
653 | ||
654 | ordered = container_of(work, struct btrfs_ordered_extent, flush_work); | |
655 | btrfs_start_ordered_extent(ordered->inode, ordered, 1); | |
656 | complete(&ordered->completion); | |
657 | } | |
658 | ||
d352ac68 CM |
659 | /* |
660 | * wait for all the ordered extents in a root. This is done when balancing | |
661 | * space between drives. | |
662 | */ | |
31f3d255 | 663 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr) |
3eaa2885 | 664 | { |
9afab882 | 665 | struct list_head splice, works; |
9afab882 | 666 | struct btrfs_ordered_extent *ordered, *next; |
b0244199 | 667 | int count = 0; |
3eaa2885 CM |
668 | |
669 | INIT_LIST_HEAD(&splice); | |
9afab882 | 670 | INIT_LIST_HEAD(&works); |
3eaa2885 | 671 | |
31f3d255 | 672 | mutex_lock(&root->ordered_extent_mutex); |
199c2a9c MX |
673 | spin_lock(&root->ordered_extent_lock); |
674 | list_splice_init(&root->ordered_extents, &splice); | |
b0244199 | 675 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
676 | ordered = list_first_entry(&splice, struct btrfs_ordered_extent, |
677 | root_extent_list); | |
678 | list_move_tail(&ordered->root_extent_list, | |
679 | &root->ordered_extents); | |
199c2a9c MX |
680 | atomic_inc(&ordered->refs); |
681 | spin_unlock(&root->ordered_extent_lock); | |
3eaa2885 | 682 | |
a44903ab | 683 | btrfs_init_work(&ordered->flush_work, |
9e0af237 | 684 | btrfs_flush_delalloc_helper, |
a44903ab | 685 | btrfs_run_ordered_extent_work, NULL, NULL); |
199c2a9c | 686 | list_add_tail(&ordered->work_list, &works); |
a44903ab QW |
687 | btrfs_queue_work(root->fs_info->flush_workers, |
688 | &ordered->flush_work); | |
3eaa2885 | 689 | |
9afab882 | 690 | cond_resched(); |
199c2a9c | 691 | spin_lock(&root->ordered_extent_lock); |
b0244199 MX |
692 | if (nr != -1) |
693 | nr--; | |
694 | count++; | |
3eaa2885 | 695 | } |
b0244199 | 696 | list_splice_tail(&splice, &root->ordered_extents); |
199c2a9c | 697 | spin_unlock(&root->ordered_extent_lock); |
9afab882 MX |
698 | |
699 | list_for_each_entry_safe(ordered, next, &works, work_list) { | |
700 | list_del_init(&ordered->work_list); | |
701 | wait_for_completion(&ordered->completion); | |
9afab882 | 702 | btrfs_put_ordered_extent(ordered); |
9afab882 MX |
703 | cond_resched(); |
704 | } | |
31f3d255 | 705 | mutex_unlock(&root->ordered_extent_mutex); |
b0244199 MX |
706 | |
707 | return count; | |
3eaa2885 CM |
708 | } |
709 | ||
b0244199 | 710 | void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr) |
199c2a9c MX |
711 | { |
712 | struct btrfs_root *root; | |
713 | struct list_head splice; | |
b0244199 | 714 | int done; |
199c2a9c MX |
715 | |
716 | INIT_LIST_HEAD(&splice); | |
717 | ||
8b9d83cd | 718 | mutex_lock(&fs_info->ordered_operations_mutex); |
199c2a9c MX |
719 | spin_lock(&fs_info->ordered_root_lock); |
720 | list_splice_init(&fs_info->ordered_roots, &splice); | |
b0244199 | 721 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
722 | root = list_first_entry(&splice, struct btrfs_root, |
723 | ordered_root); | |
724 | root = btrfs_grab_fs_root(root); | |
725 | BUG_ON(!root); | |
726 | list_move_tail(&root->ordered_root, | |
727 | &fs_info->ordered_roots); | |
728 | spin_unlock(&fs_info->ordered_root_lock); | |
729 | ||
31f3d255 | 730 | done = btrfs_wait_ordered_extents(root, nr); |
199c2a9c MX |
731 | btrfs_put_fs_root(root); |
732 | ||
733 | spin_lock(&fs_info->ordered_root_lock); | |
b0244199 MX |
734 | if (nr != -1) { |
735 | nr -= done; | |
736 | WARN_ON(nr < 0); | |
737 | } | |
199c2a9c | 738 | } |
931aa877 | 739 | list_splice_tail(&splice, &fs_info->ordered_roots); |
199c2a9c | 740 | spin_unlock(&fs_info->ordered_root_lock); |
8b9d83cd | 741 | mutex_unlock(&fs_info->ordered_operations_mutex); |
199c2a9c MX |
742 | } |
743 | ||
eb84ae03 CM |
744 | /* |
745 | * Used to start IO or wait for a given ordered extent to finish. | |
746 | * | |
747 | * If wait is one, this effectively waits on page writeback for all the pages | |
748 | * in the extent, and it waits on the io completion code to insert | |
749 | * metadata into the btree corresponding to the extent | |
750 | */ | |
751 | void btrfs_start_ordered_extent(struct inode *inode, | |
752 | struct btrfs_ordered_extent *entry, | |
753 | int wait) | |
e6dcd2dc CM |
754 | { |
755 | u64 start = entry->file_offset; | |
756 | u64 end = start + entry->len - 1; | |
e1b81e67 | 757 | |
1abe9b8a | 758 | trace_btrfs_ordered_extent_start(inode, entry); |
759 | ||
eb84ae03 CM |
760 | /* |
761 | * pages in the range can be dirty, clean or writeback. We | |
762 | * start IO on any dirty ones so the wait doesn't stall waiting | |
b2570314 | 763 | * for the flusher thread to find them |
eb84ae03 | 764 | */ |
4b46fce2 JB |
765 | if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
766 | filemap_fdatawrite_range(inode->i_mapping, start, end); | |
c8b97818 | 767 | if (wait) { |
e6dcd2dc CM |
768 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
769 | &entry->flags)); | |
c8b97818 | 770 | } |
e6dcd2dc | 771 | } |
cee36a03 | 772 | |
eb84ae03 CM |
773 | /* |
774 | * Used to wait on ordered extents across a large range of bytes. | |
775 | */ | |
0ef8b726 | 776 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
e6dcd2dc | 777 | { |
0ef8b726 | 778 | int ret = 0; |
28aeeac1 | 779 | int ret_wb = 0; |
e6dcd2dc | 780 | u64 end; |
e5a2217e | 781 | u64 orig_end; |
e6dcd2dc | 782 | struct btrfs_ordered_extent *ordered; |
e5a2217e CM |
783 | |
784 | if (start + len < start) { | |
f421950f | 785 | orig_end = INT_LIMIT(loff_t); |
e5a2217e CM |
786 | } else { |
787 | orig_end = start + len - 1; | |
f421950f CM |
788 | if (orig_end > INT_LIMIT(loff_t)) |
789 | orig_end = INT_LIMIT(loff_t); | |
e5a2217e | 790 | } |
551ebb2d | 791 | |
e5a2217e CM |
792 | /* start IO across the range first to instantiate any delalloc |
793 | * extents | |
794 | */ | |
728404da | 795 | ret = btrfs_fdatawrite_range(inode, start, orig_end); |
0ef8b726 JB |
796 | if (ret) |
797 | return ret; | |
728404da | 798 | |
28aeeac1 FM |
799 | /* |
800 | * If we have a writeback error don't return immediately. Wait first | |
801 | * for any ordered extents that haven't completed yet. This is to make | |
802 | * sure no one can dirty the same page ranges and call writepages() | |
803 | * before the ordered extents complete - to avoid failures (-EEXIST) | |
804 | * when adding the new ordered extents to the ordered tree. | |
805 | */ | |
806 | ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); | |
e5a2217e | 807 | |
f421950f | 808 | end = orig_end; |
d397712b | 809 | while (1) { |
e6dcd2dc | 810 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
d397712b | 811 | if (!ordered) |
e6dcd2dc | 812 | break; |
e5a2217e | 813 | if (ordered->file_offset > orig_end) { |
e6dcd2dc CM |
814 | btrfs_put_ordered_extent(ordered); |
815 | break; | |
816 | } | |
b52abf1e | 817 | if (ordered->file_offset + ordered->len <= start) { |
e6dcd2dc CM |
818 | btrfs_put_ordered_extent(ordered); |
819 | break; | |
820 | } | |
e5a2217e | 821 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc | 822 | end = ordered->file_offset; |
0ef8b726 JB |
823 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) |
824 | ret = -EIO; | |
e6dcd2dc | 825 | btrfs_put_ordered_extent(ordered); |
0ef8b726 | 826 | if (ret || end == 0 || end == start) |
e6dcd2dc CM |
827 | break; |
828 | end--; | |
829 | } | |
28aeeac1 | 830 | return ret_wb ? ret_wb : ret; |
cee36a03 CM |
831 | } |
832 | ||
eb84ae03 CM |
833 | /* |
834 | * find an ordered extent corresponding to file_offset. return NULL if | |
835 | * nothing is found, otherwise take a reference on the extent and return it | |
836 | */ | |
e6dcd2dc CM |
837 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
838 | u64 file_offset) | |
839 | { | |
840 | struct btrfs_ordered_inode_tree *tree; | |
841 | struct rb_node *node; | |
842 | struct btrfs_ordered_extent *entry = NULL; | |
843 | ||
844 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 845 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
846 | node = tree_search(tree, file_offset); |
847 | if (!node) | |
848 | goto out; | |
849 | ||
850 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
851 | if (!offset_in_entry(entry, file_offset)) | |
852 | entry = NULL; | |
853 | if (entry) | |
854 | atomic_inc(&entry->refs); | |
855 | out: | |
5fd02043 | 856 | spin_unlock_irq(&tree->lock); |
e6dcd2dc CM |
857 | return entry; |
858 | } | |
859 | ||
4b46fce2 JB |
860 | /* Since the DIO code tries to lock a wide area we need to look for any ordered |
861 | * extents that exist in the range, rather than just the start of the range. | |
862 | */ | |
863 | struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, | |
864 | u64 file_offset, | |
865 | u64 len) | |
866 | { | |
867 | struct btrfs_ordered_inode_tree *tree; | |
868 | struct rb_node *node; | |
869 | struct btrfs_ordered_extent *entry = NULL; | |
870 | ||
871 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 872 | spin_lock_irq(&tree->lock); |
4b46fce2 JB |
873 | node = tree_search(tree, file_offset); |
874 | if (!node) { | |
875 | node = tree_search(tree, file_offset + len); | |
876 | if (!node) | |
877 | goto out; | |
878 | } | |
879 | ||
880 | while (1) { | |
881 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
882 | if (range_overlaps(entry, file_offset, len)) | |
883 | break; | |
884 | ||
885 | if (entry->file_offset >= file_offset + len) { | |
886 | entry = NULL; | |
887 | break; | |
888 | } | |
889 | entry = NULL; | |
890 | node = rb_next(node); | |
891 | if (!node) | |
892 | break; | |
893 | } | |
894 | out: | |
895 | if (entry) | |
896 | atomic_inc(&entry->refs); | |
5fd02043 | 897 | spin_unlock_irq(&tree->lock); |
4b46fce2 JB |
898 | return entry; |
899 | } | |
900 | ||
b659ef02 FM |
901 | bool btrfs_have_ordered_extents_in_range(struct inode *inode, |
902 | u64 file_offset, | |
903 | u64 len) | |
904 | { | |
905 | struct btrfs_ordered_extent *oe; | |
906 | ||
907 | oe = btrfs_lookup_ordered_range(inode, file_offset, len); | |
908 | if (oe) { | |
909 | btrfs_put_ordered_extent(oe); | |
910 | return true; | |
911 | } | |
912 | return false; | |
913 | } | |
914 | ||
eb84ae03 CM |
915 | /* |
916 | * lookup and return any extent before 'file_offset'. NULL is returned | |
917 | * if none is found | |
918 | */ | |
e6dcd2dc | 919 | struct btrfs_ordered_extent * |
d397712b | 920 | btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) |
e6dcd2dc CM |
921 | { |
922 | struct btrfs_ordered_inode_tree *tree; | |
923 | struct rb_node *node; | |
924 | struct btrfs_ordered_extent *entry = NULL; | |
925 | ||
926 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 927 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
928 | node = tree_search(tree, file_offset); |
929 | if (!node) | |
930 | goto out; | |
931 | ||
932 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
933 | atomic_inc(&entry->refs); | |
934 | out: | |
5fd02043 | 935 | spin_unlock_irq(&tree->lock); |
e6dcd2dc | 936 | return entry; |
81d7ed29 | 937 | } |
dbe674a9 | 938 | |
eb84ae03 CM |
939 | /* |
940 | * After an extent is done, call this to conditionally update the on disk | |
941 | * i_size. i_size is updated to cover any fully written part of the file. | |
942 | */ | |
c2167754 | 943 | int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
dbe674a9 CM |
944 | struct btrfs_ordered_extent *ordered) |
945 | { | |
946 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
dbe674a9 CM |
947 | u64 disk_i_size; |
948 | u64 new_i_size; | |
c2167754 | 949 | u64 i_size = i_size_read(inode); |
dbe674a9 | 950 | struct rb_node *node; |
c2167754 | 951 | struct rb_node *prev = NULL; |
dbe674a9 | 952 | struct btrfs_ordered_extent *test; |
c2167754 YZ |
953 | int ret = 1; |
954 | ||
77cef2ec JB |
955 | spin_lock_irq(&tree->lock); |
956 | if (ordered) { | |
c2167754 | 957 | offset = entry_end(ordered); |
77cef2ec JB |
958 | if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) |
959 | offset = min(offset, | |
960 | ordered->file_offset + | |
961 | ordered->truncated_len); | |
962 | } else { | |
a038fab0 | 963 | offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); |
77cef2ec | 964 | } |
dbe674a9 CM |
965 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
966 | ||
c2167754 YZ |
967 | /* truncate file */ |
968 | if (disk_i_size > i_size) { | |
969 | BTRFS_I(inode)->disk_i_size = i_size; | |
970 | ret = 0; | |
971 | goto out; | |
972 | } | |
973 | ||
dbe674a9 CM |
974 | /* |
975 | * if the disk i_size is already at the inode->i_size, or | |
976 | * this ordered extent is inside the disk i_size, we're done | |
977 | */ | |
5d1f4020 JB |
978 | if (disk_i_size == i_size) |
979 | goto out; | |
980 | ||
981 | /* | |
982 | * We still need to update disk_i_size if outstanding_isize is greater | |
983 | * than disk_i_size. | |
984 | */ | |
985 | if (offset <= disk_i_size && | |
986 | (!ordered || ordered->outstanding_isize <= disk_i_size)) | |
dbe674a9 | 987 | goto out; |
dbe674a9 | 988 | |
dbe674a9 CM |
989 | /* |
990 | * walk backward from this ordered extent to disk_i_size. | |
991 | * if we find an ordered extent then we can't update disk i_size | |
992 | * yet | |
993 | */ | |
c2167754 YZ |
994 | if (ordered) { |
995 | node = rb_prev(&ordered->rb_node); | |
996 | } else { | |
997 | prev = tree_search(tree, offset); | |
998 | /* | |
999 | * we insert file extents without involving ordered struct, | |
1000 | * so there should be no ordered struct cover this offset | |
1001 | */ | |
1002 | if (prev) { | |
1003 | test = rb_entry(prev, struct btrfs_ordered_extent, | |
1004 | rb_node); | |
1005 | BUG_ON(offset_in_entry(test, offset)); | |
1006 | } | |
1007 | node = prev; | |
1008 | } | |
5fd02043 | 1009 | for (; node; node = rb_prev(node)) { |
dbe674a9 | 1010 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 JB |
1011 | |
1012 | /* We treat this entry as if it doesnt exist */ | |
1013 | if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) | |
1014 | continue; | |
dbe674a9 CM |
1015 | if (test->file_offset + test->len <= disk_i_size) |
1016 | break; | |
c2167754 | 1017 | if (test->file_offset >= i_size) |
dbe674a9 | 1018 | break; |
59fe4f41 | 1019 | if (entry_end(test) > disk_i_size) { |
b9a8cc5b MX |
1020 | /* |
1021 | * we don't update disk_i_size now, so record this | |
1022 | * undealt i_size. Or we will not know the real | |
1023 | * i_size. | |
1024 | */ | |
1025 | if (test->outstanding_isize < offset) | |
1026 | test->outstanding_isize = offset; | |
1027 | if (ordered && | |
1028 | ordered->outstanding_isize > | |
1029 | test->outstanding_isize) | |
1030 | test->outstanding_isize = | |
1031 | ordered->outstanding_isize; | |
dbe674a9 | 1032 | goto out; |
5fd02043 | 1033 | } |
dbe674a9 | 1034 | } |
b9a8cc5b | 1035 | new_i_size = min_t(u64, offset, i_size); |
dbe674a9 CM |
1036 | |
1037 | /* | |
b9a8cc5b MX |
1038 | * Some ordered extents may completed before the current one, and |
1039 | * we hold the real i_size in ->outstanding_isize. | |
dbe674a9 | 1040 | */ |
b9a8cc5b MX |
1041 | if (ordered && ordered->outstanding_isize > new_i_size) |
1042 | new_i_size = min_t(u64, ordered->outstanding_isize, i_size); | |
dbe674a9 | 1043 | BTRFS_I(inode)->disk_i_size = new_i_size; |
c2167754 | 1044 | ret = 0; |
dbe674a9 | 1045 | out: |
c2167754 | 1046 | /* |
5fd02043 JB |
1047 | * We need to do this because we can't remove ordered extents until |
1048 | * after the i_disk_size has been updated and then the inode has been | |
1049 | * updated to reflect the change, so we need to tell anybody who finds | |
1050 | * this ordered extent that we've already done all the real work, we | |
1051 | * just haven't completed all the other work. | |
c2167754 YZ |
1052 | */ |
1053 | if (ordered) | |
5fd02043 JB |
1054 | set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); |
1055 | spin_unlock_irq(&tree->lock); | |
c2167754 | 1056 | return ret; |
dbe674a9 | 1057 | } |
ba1da2f4 | 1058 | |
eb84ae03 CM |
1059 | /* |
1060 | * search the ordered extents for one corresponding to 'offset' and | |
1061 | * try to find a checksum. This is used because we allow pages to | |
1062 | * be reclaimed before their checksum is actually put into the btree | |
1063 | */ | |
d20f7043 | 1064 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, |
e4100d98 | 1065 | u32 *sum, int len) |
ba1da2f4 CM |
1066 | { |
1067 | struct btrfs_ordered_sum *ordered_sum; | |
ba1da2f4 CM |
1068 | struct btrfs_ordered_extent *ordered; |
1069 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
3edf7d33 CM |
1070 | unsigned long num_sectors; |
1071 | unsigned long i; | |
1072 | u32 sectorsize = BTRFS_I(inode)->root->sectorsize; | |
e4100d98 | 1073 | int index = 0; |
ba1da2f4 CM |
1074 | |
1075 | ordered = btrfs_lookup_ordered_extent(inode, offset); | |
1076 | if (!ordered) | |
e4100d98 | 1077 | return 0; |
ba1da2f4 | 1078 | |
5fd02043 | 1079 | spin_lock_irq(&tree->lock); |
c6e30871 | 1080 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
e4100d98 MX |
1081 | if (disk_bytenr >= ordered_sum->bytenr && |
1082 | disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { | |
1083 | i = (disk_bytenr - ordered_sum->bytenr) >> | |
1084 | inode->i_sb->s_blocksize_bits; | |
e4100d98 MX |
1085 | num_sectors = ordered_sum->len >> |
1086 | inode->i_sb->s_blocksize_bits; | |
f51a4a18 MX |
1087 | num_sectors = min_t(int, len - index, num_sectors - i); |
1088 | memcpy(sum + index, ordered_sum->sums + i, | |
1089 | num_sectors); | |
1090 | ||
1091 | index += (int)num_sectors; | |
1092 | if (index == len) | |
1093 | goto out; | |
1094 | disk_bytenr += num_sectors * sectorsize; | |
ba1da2f4 CM |
1095 | } |
1096 | } | |
1097 | out: | |
5fd02043 | 1098 | spin_unlock_irq(&tree->lock); |
89642229 | 1099 | btrfs_put_ordered_extent(ordered); |
e4100d98 | 1100 | return index; |
ba1da2f4 CM |
1101 | } |
1102 | ||
6352b91d MX |
1103 | int __init ordered_data_init(void) |
1104 | { | |
1105 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", | |
1106 | sizeof(struct btrfs_ordered_extent), 0, | |
1107 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | |
1108 | NULL); | |
1109 | if (!btrfs_ordered_extent_cache) | |
1110 | return -ENOMEM; | |
25287e0a | 1111 | |
6352b91d MX |
1112 | return 0; |
1113 | } | |
1114 | ||
1115 | void ordered_data_exit(void) | |
1116 | { | |
1117 | if (btrfs_ordered_extent_cache) | |
1118 | kmem_cache_destroy(btrfs_ordered_extent_cache); | |
1119 | } |