Commit | Line | Data |
---|---|---|
dc17ff8f CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/gfp.h> | |
20 | #include <linux/slab.h> | |
d6bfde87 | 21 | #include <linux/blkdev.h> |
f421950f CM |
22 | #include <linux/writeback.h> |
23 | #include <linux/pagevec.h> | |
dc17ff8f CM |
24 | #include "ctree.h" |
25 | #include "transaction.h" | |
26 | #include "btrfs_inode.h" | |
e6dcd2dc | 27 | #include "extent_io.h" |
dc17ff8f | 28 | |
dc17ff8f | 29 | |
e6dcd2dc | 30 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
dc17ff8f | 31 | { |
e6dcd2dc CM |
32 | if (entry->file_offset + entry->len < entry->file_offset) |
33 | return (u64)-1; | |
34 | return entry->file_offset + entry->len; | |
dc17ff8f CM |
35 | } |
36 | ||
e6dcd2dc CM |
37 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
38 | struct rb_node *node) | |
dc17ff8f CM |
39 | { |
40 | struct rb_node ** p = &root->rb_node; | |
41 | struct rb_node * parent = NULL; | |
e6dcd2dc | 42 | struct btrfs_ordered_extent *entry; |
dc17ff8f CM |
43 | |
44 | while(*p) { | |
45 | parent = *p; | |
e6dcd2dc | 46 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
dc17ff8f | 47 | |
e6dcd2dc | 48 | if (file_offset < entry->file_offset) |
dc17ff8f | 49 | p = &(*p)->rb_left; |
e6dcd2dc | 50 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
51 | p = &(*p)->rb_right; |
52 | else | |
53 | return parent; | |
54 | } | |
55 | ||
56 | rb_link_node(node, parent, p); | |
57 | rb_insert_color(node, root); | |
58 | return NULL; | |
59 | } | |
60 | ||
e6dcd2dc CM |
61 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
62 | struct rb_node **prev_ret) | |
dc17ff8f CM |
63 | { |
64 | struct rb_node * n = root->rb_node; | |
65 | struct rb_node *prev = NULL; | |
e6dcd2dc CM |
66 | struct rb_node *test; |
67 | struct btrfs_ordered_extent *entry; | |
68 | struct btrfs_ordered_extent *prev_entry = NULL; | |
dc17ff8f CM |
69 | |
70 | while(n) { | |
e6dcd2dc | 71 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
dc17ff8f CM |
72 | prev = n; |
73 | prev_entry = entry; | |
dc17ff8f | 74 | |
e6dcd2dc | 75 | if (file_offset < entry->file_offset) |
dc17ff8f | 76 | n = n->rb_left; |
e6dcd2dc | 77 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
78 | n = n->rb_right; |
79 | else | |
80 | return n; | |
81 | } | |
82 | if (!prev_ret) | |
83 | return NULL; | |
84 | ||
e6dcd2dc CM |
85 | while(prev && file_offset >= entry_end(prev_entry)) { |
86 | test = rb_next(prev); | |
87 | if (!test) | |
88 | break; | |
89 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
90 | rb_node); | |
91 | if (file_offset < entry_end(prev_entry)) | |
92 | break; | |
93 | ||
94 | prev = test; | |
95 | } | |
96 | if (prev) | |
97 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, | |
98 | rb_node); | |
99 | while(prev && file_offset < entry_end(prev_entry)) { | |
100 | test = rb_prev(prev); | |
101 | if (!test) | |
102 | break; | |
103 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
104 | rb_node); | |
105 | prev = test; | |
dc17ff8f CM |
106 | } |
107 | *prev_ret = prev; | |
108 | return NULL; | |
109 | } | |
110 | ||
e6dcd2dc CM |
111 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
112 | { | |
113 | if (file_offset < entry->file_offset || | |
114 | entry->file_offset + entry->len <= file_offset) | |
115 | return 0; | |
116 | return 1; | |
117 | } | |
118 | ||
119 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, | |
120 | u64 file_offset) | |
dc17ff8f | 121 | { |
e6dcd2dc | 122 | struct rb_root *root = &tree->tree; |
dc17ff8f CM |
123 | struct rb_node *prev; |
124 | struct rb_node *ret; | |
e6dcd2dc CM |
125 | struct btrfs_ordered_extent *entry; |
126 | ||
127 | if (tree->last) { | |
128 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, | |
129 | rb_node); | |
130 | if (offset_in_entry(entry, file_offset)) | |
131 | return tree->last; | |
132 | } | |
133 | ret = __tree_search(root, file_offset, &prev); | |
dc17ff8f | 134 | if (!ret) |
e6dcd2dc CM |
135 | ret = prev; |
136 | if (ret) | |
137 | tree->last = ret; | |
dc17ff8f CM |
138 | return ret; |
139 | } | |
140 | ||
eb84ae03 CM |
141 | /* allocate and add a new ordered_extent into the per-inode tree. |
142 | * file_offset is the logical offset in the file | |
143 | * | |
144 | * start is the disk block number of an extent already reserved in the | |
145 | * extent allocation tree | |
146 | * | |
147 | * len is the length of the extent | |
148 | * | |
149 | * This also sets the EXTENT_ORDERED bit on the range in the inode. | |
150 | * | |
151 | * The tree is given a single reference on the ordered extent that was | |
152 | * inserted. | |
153 | */ | |
e6dcd2dc | 154 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
7ea394f1 | 155 | u64 start, u64 len, int nocow) |
dc17ff8f | 156 | { |
dc17ff8f | 157 | struct btrfs_ordered_inode_tree *tree; |
e6dcd2dc CM |
158 | struct rb_node *node; |
159 | struct btrfs_ordered_extent *entry; | |
dc17ff8f | 160 | |
e6dcd2dc CM |
161 | tree = &BTRFS_I(inode)->ordered_tree; |
162 | entry = kzalloc(sizeof(*entry), GFP_NOFS); | |
dc17ff8f CM |
163 | if (!entry) |
164 | return -ENOMEM; | |
165 | ||
e6dcd2dc CM |
166 | mutex_lock(&tree->mutex); |
167 | entry->file_offset = file_offset; | |
168 | entry->start = start; | |
169 | entry->len = len; | |
3eaa2885 | 170 | entry->inode = inode; |
7ea394f1 YZ |
171 | if (nocow) |
172 | set_bit(BTRFS_ORDERED_NOCOW, &entry->flags); | |
3eaa2885 | 173 | |
e6dcd2dc CM |
174 | /* one ref for the tree */ |
175 | atomic_set(&entry->refs, 1); | |
176 | init_waitqueue_head(&entry->wait); | |
177 | INIT_LIST_HEAD(&entry->list); | |
3eaa2885 | 178 | INIT_LIST_HEAD(&entry->root_extent_list); |
dc17ff8f | 179 | |
e6dcd2dc CM |
180 | node = tree_insert(&tree->tree, file_offset, |
181 | &entry->rb_node); | |
182 | if (node) { | |
3eaa2885 CM |
183 | printk("warning dup entry from add_ordered_extent\n"); |
184 | BUG(); | |
e6dcd2dc CM |
185 | } |
186 | set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, | |
187 | entry_end(entry) - 1, GFP_NOFS); | |
1b1e2135 | 188 | |
3eaa2885 CM |
189 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
190 | list_add_tail(&entry->root_extent_list, | |
191 | &BTRFS_I(inode)->root->fs_info->ordered_extents); | |
192 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | |
193 | ||
e6dcd2dc CM |
194 | mutex_unlock(&tree->mutex); |
195 | BUG_ON(node); | |
dc17ff8f CM |
196 | return 0; |
197 | } | |
198 | ||
eb84ae03 CM |
199 | /* |
200 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted | |
3edf7d33 CM |
201 | * when an ordered extent is finished. If the list covers more than one |
202 | * ordered extent, it is split across multiples. | |
eb84ae03 | 203 | */ |
3edf7d33 CM |
204 | int btrfs_add_ordered_sum(struct inode *inode, |
205 | struct btrfs_ordered_extent *entry, | |
206 | struct btrfs_ordered_sum *sum) | |
dc17ff8f | 207 | { |
e6dcd2dc | 208 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 209 | |
e6dcd2dc CM |
210 | tree = &BTRFS_I(inode)->ordered_tree; |
211 | mutex_lock(&tree->mutex); | |
e6dcd2dc CM |
212 | list_add_tail(&sum->list, &entry->list); |
213 | mutex_unlock(&tree->mutex); | |
214 | return 0; | |
dc17ff8f CM |
215 | } |
216 | ||
eb84ae03 CM |
217 | /* |
218 | * this is used to account for finished IO across a given range | |
219 | * of the file. The IO should not span ordered extents. If | |
220 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
221 | * 0. | |
222 | * | |
223 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
224 | * to make sure this function only returns 1 once for a given ordered extent. | |
225 | */ | |
e6dcd2dc CM |
226 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
227 | u64 file_offset, u64 io_size) | |
dc17ff8f | 228 | { |
e6dcd2dc | 229 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 230 | struct rb_node *node; |
e6dcd2dc CM |
231 | struct btrfs_ordered_extent *entry; |
232 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
233 | int ret; | |
234 | ||
235 | tree = &BTRFS_I(inode)->ordered_tree; | |
236 | mutex_lock(&tree->mutex); | |
237 | clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1, | |
238 | GFP_NOFS); | |
239 | node = tree_search(tree, file_offset); | |
dc17ff8f | 240 | if (!node) { |
e6dcd2dc CM |
241 | ret = 1; |
242 | goto out; | |
dc17ff8f CM |
243 | } |
244 | ||
e6dcd2dc CM |
245 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
246 | if (!offset_in_entry(entry, file_offset)) { | |
247 | ret = 1; | |
248 | goto out; | |
dc17ff8f | 249 | } |
e6dcd2dc CM |
250 | |
251 | ret = test_range_bit(io_tree, entry->file_offset, | |
252 | entry->file_offset + entry->len - 1, | |
253 | EXTENT_ORDERED, 0); | |
e6dcd2dc CM |
254 | if (ret == 0) |
255 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | |
256 | out: | |
257 | mutex_unlock(&tree->mutex); | |
258 | return ret == 0; | |
259 | } | |
dc17ff8f | 260 | |
eb84ae03 CM |
261 | /* |
262 | * used to drop a reference on an ordered extent. This will free | |
263 | * the extent if the last reference is dropped | |
264 | */ | |
e6dcd2dc CM |
265 | int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
266 | { | |
ba1da2f4 CM |
267 | struct list_head *cur; |
268 | struct btrfs_ordered_sum *sum; | |
269 | ||
270 | if (atomic_dec_and_test(&entry->refs)) { | |
271 | while(!list_empty(&entry->list)) { | |
272 | cur = entry->list.next; | |
273 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | |
274 | list_del(&sum->list); | |
275 | kfree(sum); | |
276 | } | |
e6dcd2dc | 277 | kfree(entry); |
ba1da2f4 | 278 | } |
e6dcd2dc | 279 | return 0; |
dc17ff8f | 280 | } |
cee36a03 | 281 | |
eb84ae03 CM |
282 | /* |
283 | * remove an ordered extent from the tree. No references are dropped | |
284 | * but, anyone waiting on this extent is woken up. | |
285 | */ | |
e6dcd2dc CM |
286 | int btrfs_remove_ordered_extent(struct inode *inode, |
287 | struct btrfs_ordered_extent *entry) | |
cee36a03 | 288 | { |
e6dcd2dc | 289 | struct btrfs_ordered_inode_tree *tree; |
cee36a03 | 290 | struct rb_node *node; |
cee36a03 | 291 | |
e6dcd2dc CM |
292 | tree = &BTRFS_I(inode)->ordered_tree; |
293 | mutex_lock(&tree->mutex); | |
294 | node = &entry->rb_node; | |
cee36a03 | 295 | rb_erase(node, &tree->tree); |
e6dcd2dc CM |
296 | tree->last = NULL; |
297 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); | |
3eaa2885 CM |
298 | |
299 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | |
300 | list_del_init(&entry->root_extent_list); | |
301 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | |
302 | ||
e6dcd2dc CM |
303 | mutex_unlock(&tree->mutex); |
304 | wake_up(&entry->wait); | |
305 | return 0; | |
cee36a03 CM |
306 | } |
307 | ||
7ea394f1 | 308 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) |
3eaa2885 CM |
309 | { |
310 | struct list_head splice; | |
311 | struct list_head *cur; | |
312 | struct btrfs_ordered_extent *ordered; | |
313 | struct inode *inode; | |
314 | ||
315 | INIT_LIST_HEAD(&splice); | |
316 | ||
317 | spin_lock(&root->fs_info->ordered_extent_lock); | |
318 | list_splice_init(&root->fs_info->ordered_extents, &splice); | |
5b21f2ed | 319 | while (!list_empty(&splice)) { |
3eaa2885 CM |
320 | cur = splice.next; |
321 | ordered = list_entry(cur, struct btrfs_ordered_extent, | |
322 | root_extent_list); | |
7ea394f1 YZ |
323 | if (nocow_only && |
324 | !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { | |
5b21f2ed ZY |
325 | list_move(&ordered->root_extent_list, |
326 | &root->fs_info->ordered_extents); | |
7ea394f1 YZ |
327 | cond_resched_lock(&root->fs_info->ordered_extent_lock); |
328 | continue; | |
329 | } | |
330 | ||
3eaa2885 CM |
331 | list_del_init(&ordered->root_extent_list); |
332 | atomic_inc(&ordered->refs); | |
3eaa2885 CM |
333 | |
334 | /* | |
5b21f2ed | 335 | * the inode may be getting freed (in sys_unlink path). |
3eaa2885 | 336 | */ |
5b21f2ed ZY |
337 | inode = igrab(ordered->inode); |
338 | ||
3eaa2885 CM |
339 | spin_unlock(&root->fs_info->ordered_extent_lock); |
340 | ||
5b21f2ed ZY |
341 | if (inode) { |
342 | btrfs_start_ordered_extent(inode, ordered, 1); | |
343 | btrfs_put_ordered_extent(ordered); | |
344 | iput(inode); | |
345 | } else { | |
346 | btrfs_put_ordered_extent(ordered); | |
347 | } | |
3eaa2885 CM |
348 | |
349 | spin_lock(&root->fs_info->ordered_extent_lock); | |
350 | } | |
351 | spin_unlock(&root->fs_info->ordered_extent_lock); | |
352 | return 0; | |
353 | } | |
354 | ||
eb84ae03 CM |
355 | /* |
356 | * Used to start IO or wait for a given ordered extent to finish. | |
357 | * | |
358 | * If wait is one, this effectively waits on page writeback for all the pages | |
359 | * in the extent, and it waits on the io completion code to insert | |
360 | * metadata into the btree corresponding to the extent | |
361 | */ | |
362 | void btrfs_start_ordered_extent(struct inode *inode, | |
363 | struct btrfs_ordered_extent *entry, | |
364 | int wait) | |
e6dcd2dc CM |
365 | { |
366 | u64 start = entry->file_offset; | |
367 | u64 end = start + entry->len - 1; | |
e1b81e67 | 368 | |
eb84ae03 CM |
369 | /* |
370 | * pages in the range can be dirty, clean or writeback. We | |
371 | * start IO on any dirty ones so the wait doesn't stall waiting | |
372 | * for pdflush to find them | |
373 | */ | |
f421950f | 374 | btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE); |
e6dcd2dc CM |
375 | if (wait) |
376 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, | |
377 | &entry->flags)); | |
378 | } | |
cee36a03 | 379 | |
eb84ae03 CM |
380 | /* |
381 | * Used to wait on ordered extents across a large range of bytes. | |
382 | */ | |
e6dcd2dc CM |
383 | void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
384 | { | |
385 | u64 end; | |
e5a2217e CM |
386 | u64 orig_end; |
387 | u64 wait_end; | |
e6dcd2dc | 388 | struct btrfs_ordered_extent *ordered; |
e5a2217e CM |
389 | |
390 | if (start + len < start) { | |
f421950f | 391 | orig_end = INT_LIMIT(loff_t); |
e5a2217e CM |
392 | } else { |
393 | orig_end = start + len - 1; | |
f421950f CM |
394 | if (orig_end > INT_LIMIT(loff_t)) |
395 | orig_end = INT_LIMIT(loff_t); | |
e5a2217e | 396 | } |
f421950f | 397 | wait_end = orig_end; |
4a096752 | 398 | again: |
e5a2217e CM |
399 | /* start IO across the range first to instantiate any delalloc |
400 | * extents | |
401 | */ | |
f421950f CM |
402 | btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE); |
403 | ||
404 | btrfs_wait_on_page_writeback_range(inode->i_mapping, | |
405 | start >> PAGE_CACHE_SHIFT, | |
406 | orig_end >> PAGE_CACHE_SHIFT); | |
e5a2217e | 407 | |
f421950f | 408 | end = orig_end; |
e6dcd2dc CM |
409 | while(1) { |
410 | ordered = btrfs_lookup_first_ordered_extent(inode, end); | |
411 | if (!ordered) { | |
412 | break; | |
413 | } | |
e5a2217e | 414 | if (ordered->file_offset > orig_end) { |
e6dcd2dc CM |
415 | btrfs_put_ordered_extent(ordered); |
416 | break; | |
417 | } | |
418 | if (ordered->file_offset + ordered->len < start) { | |
419 | btrfs_put_ordered_extent(ordered); | |
420 | break; | |
421 | } | |
e5a2217e | 422 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc CM |
423 | end = ordered->file_offset; |
424 | btrfs_put_ordered_extent(ordered); | |
e5a2217e | 425 | if (end == 0 || end == start) |
e6dcd2dc CM |
426 | break; |
427 | end--; | |
428 | } | |
4a096752 CM |
429 | if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, |
430 | EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { | |
431 | printk("inode %lu still ordered or delalloc after wait " | |
432 | "%llu %llu\n", inode->i_ino, | |
433 | (unsigned long long)start, | |
434 | (unsigned long long)orig_end); | |
435 | goto again; | |
436 | } | |
cee36a03 CM |
437 | } |
438 | ||
eb84ae03 CM |
439 | /* |
440 | * find an ordered extent corresponding to file_offset. return NULL if | |
441 | * nothing is found, otherwise take a reference on the extent and return it | |
442 | */ | |
e6dcd2dc CM |
443 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
444 | u64 file_offset) | |
445 | { | |
446 | struct btrfs_ordered_inode_tree *tree; | |
447 | struct rb_node *node; | |
448 | struct btrfs_ordered_extent *entry = NULL; | |
449 | ||
450 | tree = &BTRFS_I(inode)->ordered_tree; | |
451 | mutex_lock(&tree->mutex); | |
452 | node = tree_search(tree, file_offset); | |
453 | if (!node) | |
454 | goto out; | |
455 | ||
456 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
457 | if (!offset_in_entry(entry, file_offset)) | |
458 | entry = NULL; | |
459 | if (entry) | |
460 | atomic_inc(&entry->refs); | |
461 | out: | |
462 | mutex_unlock(&tree->mutex); | |
463 | return entry; | |
464 | } | |
465 | ||
eb84ae03 CM |
466 | /* |
467 | * lookup and return any extent before 'file_offset'. NULL is returned | |
468 | * if none is found | |
469 | */ | |
e6dcd2dc CM |
470 | struct btrfs_ordered_extent * |
471 | btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) | |
472 | { | |
473 | struct btrfs_ordered_inode_tree *tree; | |
474 | struct rb_node *node; | |
475 | struct btrfs_ordered_extent *entry = NULL; | |
476 | ||
477 | tree = &BTRFS_I(inode)->ordered_tree; | |
478 | mutex_lock(&tree->mutex); | |
479 | node = tree_search(tree, file_offset); | |
480 | if (!node) | |
481 | goto out; | |
482 | ||
483 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
484 | atomic_inc(&entry->refs); | |
485 | out: | |
486 | mutex_unlock(&tree->mutex); | |
487 | return entry; | |
81d7ed29 | 488 | } |
dbe674a9 | 489 | |
eb84ae03 CM |
490 | /* |
491 | * After an extent is done, call this to conditionally update the on disk | |
492 | * i_size. i_size is updated to cover any fully written part of the file. | |
493 | */ | |
dbe674a9 CM |
494 | int btrfs_ordered_update_i_size(struct inode *inode, |
495 | struct btrfs_ordered_extent *ordered) | |
496 | { | |
497 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
498 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
499 | u64 disk_i_size; | |
500 | u64 new_i_size; | |
501 | u64 i_size_test; | |
502 | struct rb_node *node; | |
503 | struct btrfs_ordered_extent *test; | |
504 | ||
505 | mutex_lock(&tree->mutex); | |
506 | disk_i_size = BTRFS_I(inode)->disk_i_size; | |
507 | ||
508 | /* | |
509 | * if the disk i_size is already at the inode->i_size, or | |
510 | * this ordered extent is inside the disk i_size, we're done | |
511 | */ | |
512 | if (disk_i_size >= inode->i_size || | |
513 | ordered->file_offset + ordered->len <= disk_i_size) { | |
514 | goto out; | |
515 | } | |
516 | ||
517 | /* | |
518 | * we can't update the disk_isize if there are delalloc bytes | |
519 | * between disk_i_size and this ordered extent | |
520 | */ | |
521 | if (test_range_bit(io_tree, disk_i_size, | |
522 | ordered->file_offset + ordered->len - 1, | |
523 | EXTENT_DELALLOC, 0)) { | |
524 | goto out; | |
525 | } | |
526 | /* | |
527 | * walk backward from this ordered extent to disk_i_size. | |
528 | * if we find an ordered extent then we can't update disk i_size | |
529 | * yet | |
530 | */ | |
ba1da2f4 | 531 | node = &ordered->rb_node; |
dbe674a9 | 532 | while(1) { |
ba1da2f4 | 533 | node = rb_prev(node); |
dbe674a9 CM |
534 | if (!node) |
535 | break; | |
536 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
537 | if (test->file_offset + test->len <= disk_i_size) | |
538 | break; | |
539 | if (test->file_offset >= inode->i_size) | |
540 | break; | |
541 | if (test->file_offset >= disk_i_size) | |
542 | goto out; | |
543 | } | |
544 | new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode)); | |
545 | ||
546 | /* | |
547 | * at this point, we know we can safely update i_size to at least | |
548 | * the offset from this ordered extent. But, we need to | |
549 | * walk forward and see if ios from higher up in the file have | |
550 | * finished. | |
551 | */ | |
552 | node = rb_next(&ordered->rb_node); | |
553 | i_size_test = 0; | |
554 | if (node) { | |
555 | /* | |
556 | * do we have an area where IO might have finished | |
557 | * between our ordered extent and the next one. | |
558 | */ | |
559 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
560 | if (test->file_offset > entry_end(ordered)) { | |
b48652c1 | 561 | i_size_test = test->file_offset; |
dbe674a9 CM |
562 | } |
563 | } else { | |
564 | i_size_test = i_size_read(inode); | |
565 | } | |
566 | ||
567 | /* | |
568 | * i_size_test is the end of a region after this ordered | |
569 | * extent where there are no ordered extents. As long as there | |
570 | * are no delalloc bytes in this area, it is safe to update | |
571 | * disk_i_size to the end of the region. | |
572 | */ | |
573 | if (i_size_test > entry_end(ordered) && | |
b48652c1 | 574 | !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1, |
dbe674a9 CM |
575 | EXTENT_DELALLOC, 0)) { |
576 | new_i_size = min_t(u64, i_size_test, i_size_read(inode)); | |
577 | } | |
578 | BTRFS_I(inode)->disk_i_size = new_i_size; | |
579 | out: | |
580 | mutex_unlock(&tree->mutex); | |
581 | return 0; | |
582 | } | |
ba1da2f4 | 583 | |
eb84ae03 CM |
584 | /* |
585 | * search the ordered extents for one corresponding to 'offset' and | |
586 | * try to find a checksum. This is used because we allow pages to | |
587 | * be reclaimed before their checksum is actually put into the btree | |
588 | */ | |
ba1da2f4 CM |
589 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) |
590 | { | |
591 | struct btrfs_ordered_sum *ordered_sum; | |
592 | struct btrfs_sector_sum *sector_sums; | |
593 | struct btrfs_ordered_extent *ordered; | |
594 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
595 | struct list_head *cur; | |
3edf7d33 CM |
596 | unsigned long num_sectors; |
597 | unsigned long i; | |
598 | u32 sectorsize = BTRFS_I(inode)->root->sectorsize; | |
ba1da2f4 | 599 | int ret = 1; |
ba1da2f4 CM |
600 | |
601 | ordered = btrfs_lookup_ordered_extent(inode, offset); | |
602 | if (!ordered) | |
603 | return 1; | |
604 | ||
605 | mutex_lock(&tree->mutex); | |
606 | list_for_each_prev(cur, &ordered->list) { | |
607 | ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list); | |
3edf7d33 CM |
608 | if (offset >= ordered_sum->file_offset) { |
609 | num_sectors = ordered_sum->len / sectorsize; | |
ed98b56a | 610 | sector_sums = ordered_sum->sums; |
3edf7d33 CM |
611 | for (i = 0; i < num_sectors; i++) { |
612 | if (sector_sums[i].offset == offset) { | |
3edf7d33 CM |
613 | *sum = sector_sums[i].sum; |
614 | ret = 0; | |
615 | goto out; | |
616 | } | |
617 | } | |
ba1da2f4 CM |
618 | } |
619 | } | |
620 | out: | |
621 | mutex_unlock(&tree->mutex); | |
89642229 | 622 | btrfs_put_ordered_extent(ordered); |
ba1da2f4 CM |
623 | return ret; |
624 | } | |
625 | ||
f421950f CM |
626 | |
627 | /** | |
628 | * taken from mm/filemap.c because it isn't exported | |
629 | * | |
630 | * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range | |
631 | * @mapping: address space structure to write | |
632 | * @start: offset in bytes where the range starts | |
633 | * @end: offset in bytes where the range ends (inclusive) | |
634 | * @sync_mode: enable synchronous operation | |
635 | * | |
636 | * Start writeback against all of a mapping's dirty pages that lie | |
637 | * within the byte offsets <start, end> inclusive. | |
638 | * | |
639 | * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as | |
640 | * opposed to a regular memory cleansing writeback. The difference between | |
641 | * these two operations is that if a dirty page/buffer is encountered, it must | |
642 | * be waited upon, and not just skipped over. | |
643 | */ | |
644 | int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, | |
645 | loff_t end, int sync_mode) | |
646 | { | |
647 | struct writeback_control wbc = { | |
648 | .sync_mode = sync_mode, | |
649 | .nr_to_write = mapping->nrpages * 2, | |
650 | .range_start = start, | |
651 | .range_end = end, | |
652 | .for_writepages = 1, | |
653 | }; | |
654 | return btrfs_writepages(mapping, &wbc); | |
655 | } | |
656 | ||
657 | /** | |
658 | * taken from mm/filemap.c because it isn't exported | |
659 | * | |
660 | * wait_on_page_writeback_range - wait for writeback to complete | |
661 | * @mapping: target address_space | |
662 | * @start: beginning page index | |
663 | * @end: ending page index | |
664 | * | |
665 | * Wait for writeback to complete against pages indexed by start->end | |
666 | * inclusive | |
667 | */ | |
668 | int btrfs_wait_on_page_writeback_range(struct address_space *mapping, | |
669 | pgoff_t start, pgoff_t end) | |
670 | { | |
671 | struct pagevec pvec; | |
672 | int nr_pages; | |
673 | int ret = 0; | |
674 | pgoff_t index; | |
675 | ||
676 | if (end < start) | |
677 | return 0; | |
678 | ||
679 | pagevec_init(&pvec, 0); | |
680 | index = start; | |
681 | while ((index <= end) && | |
682 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | |
683 | PAGECACHE_TAG_WRITEBACK, | |
684 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { | |
685 | unsigned i; | |
686 | ||
687 | for (i = 0; i < nr_pages; i++) { | |
688 | struct page *page = pvec.pages[i]; | |
689 | ||
690 | /* until radix tree lookup accepts end_index */ | |
691 | if (page->index > end) | |
692 | continue; | |
693 | ||
694 | wait_on_page_writeback(page); | |
695 | if (PageError(page)) | |
696 | ret = -EIO; | |
697 | } | |
698 | pagevec_release(&pvec); | |
699 | cond_resched(); | |
700 | } | |
701 | ||
702 | /* Check for outstanding write errors */ | |
703 | if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) | |
704 | ret = -ENOSPC; | |
705 | if (test_and_clear_bit(AS_EIO, &mapping->flags)) | |
706 | ret = -EIO; | |
707 | ||
708 | return ret; | |
709 | } |