Btrfs: use a cached state for extent state operations during delalloc
[linux-2.6-block.git] / fs / btrfs / ordered-data.c
CommitLineData
dc17ff8f
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/gfp.h>
20#include <linux/slab.h>
d6bfde87 21#include <linux/blkdev.h>
f421950f
CM
22#include <linux/writeback.h>
23#include <linux/pagevec.h>
dc17ff8f
CM
24#include "ctree.h"
25#include "transaction.h"
26#include "btrfs_inode.h"
e6dcd2dc 27#include "extent_io.h"
dc17ff8f 28
e6dcd2dc 29static u64 entry_end(struct btrfs_ordered_extent *entry)
dc17ff8f 30{
e6dcd2dc
CM
31 if (entry->file_offset + entry->len < entry->file_offset)
32 return (u64)-1;
33 return entry->file_offset + entry->len;
dc17ff8f
CM
34}
35
d352ac68
CM
36/* returns NULL if the insertion worked, or it returns the node it did find
37 * in the tree
38 */
e6dcd2dc
CM
39static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
40 struct rb_node *node)
dc17ff8f 41{
d397712b
CM
42 struct rb_node **p = &root->rb_node;
43 struct rb_node *parent = NULL;
e6dcd2dc 44 struct btrfs_ordered_extent *entry;
dc17ff8f 45
d397712b 46 while (*p) {
dc17ff8f 47 parent = *p;
e6dcd2dc 48 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
dc17ff8f 49
e6dcd2dc 50 if (file_offset < entry->file_offset)
dc17ff8f 51 p = &(*p)->rb_left;
e6dcd2dc 52 else if (file_offset >= entry_end(entry))
dc17ff8f
CM
53 p = &(*p)->rb_right;
54 else
55 return parent;
56 }
57
58 rb_link_node(node, parent, p);
59 rb_insert_color(node, root);
60 return NULL;
61}
62
d352ac68
CM
63/*
64 * look for a given offset in the tree, and if it can't be found return the
65 * first lesser offset
66 */
e6dcd2dc
CM
67static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
68 struct rb_node **prev_ret)
dc17ff8f 69{
d397712b 70 struct rb_node *n = root->rb_node;
dc17ff8f 71 struct rb_node *prev = NULL;
e6dcd2dc
CM
72 struct rb_node *test;
73 struct btrfs_ordered_extent *entry;
74 struct btrfs_ordered_extent *prev_entry = NULL;
dc17ff8f 75
d397712b 76 while (n) {
e6dcd2dc 77 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
dc17ff8f
CM
78 prev = n;
79 prev_entry = entry;
dc17ff8f 80
e6dcd2dc 81 if (file_offset < entry->file_offset)
dc17ff8f 82 n = n->rb_left;
e6dcd2dc 83 else if (file_offset >= entry_end(entry))
dc17ff8f
CM
84 n = n->rb_right;
85 else
86 return n;
87 }
88 if (!prev_ret)
89 return NULL;
90
d397712b 91 while (prev && file_offset >= entry_end(prev_entry)) {
e6dcd2dc
CM
92 test = rb_next(prev);
93 if (!test)
94 break;
95 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
96 rb_node);
97 if (file_offset < entry_end(prev_entry))
98 break;
99
100 prev = test;
101 }
102 if (prev)
103 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
104 rb_node);
d397712b 105 while (prev && file_offset < entry_end(prev_entry)) {
e6dcd2dc
CM
106 test = rb_prev(prev);
107 if (!test)
108 break;
109 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
110 rb_node);
111 prev = test;
dc17ff8f
CM
112 }
113 *prev_ret = prev;
114 return NULL;
115}
116
d352ac68
CM
117/*
118 * helper to check if a given offset is inside a given entry
119 */
e6dcd2dc
CM
120static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
121{
122 if (file_offset < entry->file_offset ||
123 entry->file_offset + entry->len <= file_offset)
124 return 0;
125 return 1;
126}
127
d352ac68
CM
128/*
129 * look find the first ordered struct that has this offset, otherwise
130 * the first one less than this offset
131 */
e6dcd2dc
CM
132static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
133 u64 file_offset)
dc17ff8f 134{
e6dcd2dc 135 struct rb_root *root = &tree->tree;
dc17ff8f
CM
136 struct rb_node *prev;
137 struct rb_node *ret;
e6dcd2dc
CM
138 struct btrfs_ordered_extent *entry;
139
140 if (tree->last) {
141 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
142 rb_node);
143 if (offset_in_entry(entry, file_offset))
144 return tree->last;
145 }
146 ret = __tree_search(root, file_offset, &prev);
dc17ff8f 147 if (!ret)
e6dcd2dc
CM
148 ret = prev;
149 if (ret)
150 tree->last = ret;
dc17ff8f
CM
151 return ret;
152}
153
eb84ae03
CM
154/* allocate and add a new ordered_extent into the per-inode tree.
155 * file_offset is the logical offset in the file
156 *
157 * start is the disk block number of an extent already reserved in the
158 * extent allocation tree
159 *
160 * len is the length of the extent
161 *
162 * This also sets the EXTENT_ORDERED bit on the range in the inode.
163 *
164 * The tree is given a single reference on the ordered extent that was
165 * inserted.
166 */
e6dcd2dc 167int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
80ff3856 168 u64 start, u64 len, u64 disk_len, int type)
dc17ff8f 169{
dc17ff8f 170 struct btrfs_ordered_inode_tree *tree;
e6dcd2dc
CM
171 struct rb_node *node;
172 struct btrfs_ordered_extent *entry;
dc17ff8f 173
e6dcd2dc
CM
174 tree = &BTRFS_I(inode)->ordered_tree;
175 entry = kzalloc(sizeof(*entry), GFP_NOFS);
dc17ff8f
CM
176 if (!entry)
177 return -ENOMEM;
178
e6dcd2dc
CM
179 mutex_lock(&tree->mutex);
180 entry->file_offset = file_offset;
181 entry->start = start;
182 entry->len = len;
c8b97818 183 entry->disk_len = disk_len;
3eaa2885 184 entry->inode = inode;
d899e052 185 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
80ff3856 186 set_bit(type, &entry->flags);
3eaa2885 187
e6dcd2dc
CM
188 /* one ref for the tree */
189 atomic_set(&entry->refs, 1);
190 init_waitqueue_head(&entry->wait);
191 INIT_LIST_HEAD(&entry->list);
3eaa2885 192 INIT_LIST_HEAD(&entry->root_extent_list);
dc17ff8f 193
e6dcd2dc
CM
194 node = tree_insert(&tree->tree, file_offset,
195 &entry->rb_node);
d397712b
CM
196 BUG_ON(node);
197
e6dcd2dc
CM
198 set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset,
199 entry_end(entry) - 1, GFP_NOFS);
1b1e2135 200
3eaa2885
CM
201 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
202 list_add_tail(&entry->root_extent_list,
203 &BTRFS_I(inode)->root->fs_info->ordered_extents);
204 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
205
e6dcd2dc
CM
206 mutex_unlock(&tree->mutex);
207 BUG_ON(node);
dc17ff8f
CM
208 return 0;
209}
210
eb84ae03
CM
211/*
212 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
3edf7d33
CM
213 * when an ordered extent is finished. If the list covers more than one
214 * ordered extent, it is split across multiples.
eb84ae03 215 */
3edf7d33
CM
216int btrfs_add_ordered_sum(struct inode *inode,
217 struct btrfs_ordered_extent *entry,
218 struct btrfs_ordered_sum *sum)
dc17ff8f 219{
e6dcd2dc 220 struct btrfs_ordered_inode_tree *tree;
dc17ff8f 221
e6dcd2dc
CM
222 tree = &BTRFS_I(inode)->ordered_tree;
223 mutex_lock(&tree->mutex);
e6dcd2dc
CM
224 list_add_tail(&sum->list, &entry->list);
225 mutex_unlock(&tree->mutex);
226 return 0;
dc17ff8f
CM
227}
228
eb84ae03
CM
229/*
230 * this is used to account for finished IO across a given range
231 * of the file. The IO should not span ordered extents. If
232 * a given ordered_extent is completely done, 1 is returned, otherwise
233 * 0.
234 *
235 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
236 * to make sure this function only returns 1 once for a given ordered extent.
237 */
e6dcd2dc
CM
238int btrfs_dec_test_ordered_pending(struct inode *inode,
239 u64 file_offset, u64 io_size)
dc17ff8f 240{
e6dcd2dc 241 struct btrfs_ordered_inode_tree *tree;
dc17ff8f 242 struct rb_node *node;
e6dcd2dc
CM
243 struct btrfs_ordered_extent *entry;
244 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
245 int ret;
246
247 tree = &BTRFS_I(inode)->ordered_tree;
248 mutex_lock(&tree->mutex);
249 clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1,
250 GFP_NOFS);
251 node = tree_search(tree, file_offset);
dc17ff8f 252 if (!node) {
e6dcd2dc
CM
253 ret = 1;
254 goto out;
dc17ff8f
CM
255 }
256
e6dcd2dc
CM
257 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
258 if (!offset_in_entry(entry, file_offset)) {
259 ret = 1;
260 goto out;
dc17ff8f 261 }
e6dcd2dc
CM
262
263 ret = test_range_bit(io_tree, entry->file_offset,
264 entry->file_offset + entry->len - 1,
9655d298 265 EXTENT_ORDERED, 0, NULL);
e6dcd2dc
CM
266 if (ret == 0)
267 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
268out:
269 mutex_unlock(&tree->mutex);
270 return ret == 0;
271}
dc17ff8f 272
eb84ae03
CM
273/*
274 * used to drop a reference on an ordered extent. This will free
275 * the extent if the last reference is dropped
276 */
e6dcd2dc
CM
277int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
278{
ba1da2f4
CM
279 struct list_head *cur;
280 struct btrfs_ordered_sum *sum;
281
282 if (atomic_dec_and_test(&entry->refs)) {
d397712b 283 while (!list_empty(&entry->list)) {
ba1da2f4
CM
284 cur = entry->list.next;
285 sum = list_entry(cur, struct btrfs_ordered_sum, list);
286 list_del(&sum->list);
287 kfree(sum);
288 }
e6dcd2dc 289 kfree(entry);
ba1da2f4 290 }
e6dcd2dc 291 return 0;
dc17ff8f 292}
cee36a03 293
eb84ae03
CM
294/*
295 * remove an ordered extent from the tree. No references are dropped
296 * but, anyone waiting on this extent is woken up.
297 */
e6dcd2dc
CM
298int btrfs_remove_ordered_extent(struct inode *inode,
299 struct btrfs_ordered_extent *entry)
cee36a03 300{
e6dcd2dc 301 struct btrfs_ordered_inode_tree *tree;
cee36a03 302 struct rb_node *node;
cee36a03 303
e6dcd2dc
CM
304 tree = &BTRFS_I(inode)->ordered_tree;
305 mutex_lock(&tree->mutex);
306 node = &entry->rb_node;
cee36a03 307 rb_erase(node, &tree->tree);
e6dcd2dc
CM
308 tree->last = NULL;
309 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
3eaa2885
CM
310
311 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
312 list_del_init(&entry->root_extent_list);
5a3f23d5
CM
313
314 /*
315 * we have no more ordered extents for this inode and
316 * no dirty pages. We can safely remove it from the
317 * list of ordered extents
318 */
319 if (RB_EMPTY_ROOT(&tree->tree) &&
320 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
321 list_del_init(&BTRFS_I(inode)->ordered_operations);
322 }
3eaa2885
CM
323 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
324
e6dcd2dc
CM
325 mutex_unlock(&tree->mutex);
326 wake_up(&entry->wait);
327 return 0;
cee36a03
CM
328}
329
d352ac68
CM
330/*
331 * wait for all the ordered extents in a root. This is done when balancing
332 * space between drives.
333 */
7ea394f1 334int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only)
3eaa2885
CM
335{
336 struct list_head splice;
337 struct list_head *cur;
338 struct btrfs_ordered_extent *ordered;
339 struct inode *inode;
340
341 INIT_LIST_HEAD(&splice);
342
343 spin_lock(&root->fs_info->ordered_extent_lock);
344 list_splice_init(&root->fs_info->ordered_extents, &splice);
5b21f2ed 345 while (!list_empty(&splice)) {
3eaa2885
CM
346 cur = splice.next;
347 ordered = list_entry(cur, struct btrfs_ordered_extent,
348 root_extent_list);
7ea394f1 349 if (nocow_only &&
d899e052
YZ
350 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
351 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
5b21f2ed
ZY
352 list_move(&ordered->root_extent_list,
353 &root->fs_info->ordered_extents);
7ea394f1
YZ
354 cond_resched_lock(&root->fs_info->ordered_extent_lock);
355 continue;
356 }
357
3eaa2885
CM
358 list_del_init(&ordered->root_extent_list);
359 atomic_inc(&ordered->refs);
3eaa2885
CM
360
361 /*
5b21f2ed 362 * the inode may be getting freed (in sys_unlink path).
3eaa2885 363 */
5b21f2ed
ZY
364 inode = igrab(ordered->inode);
365
3eaa2885
CM
366 spin_unlock(&root->fs_info->ordered_extent_lock);
367
5b21f2ed
ZY
368 if (inode) {
369 btrfs_start_ordered_extent(inode, ordered, 1);
370 btrfs_put_ordered_extent(ordered);
371 iput(inode);
372 } else {
373 btrfs_put_ordered_extent(ordered);
374 }
3eaa2885
CM
375
376 spin_lock(&root->fs_info->ordered_extent_lock);
377 }
378 spin_unlock(&root->fs_info->ordered_extent_lock);
379 return 0;
380}
381
5a3f23d5
CM
382/*
383 * this is used during transaction commit to write all the inodes
384 * added to the ordered operation list. These files must be fully on
385 * disk before the transaction commits.
386 *
387 * we have two modes here, one is to just start the IO via filemap_flush
388 * and the other is to wait for all the io. When we wait, we have an
389 * extra check to make sure the ordered operation list really is empty
390 * before we return
391 */
392int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
393{
394 struct btrfs_inode *btrfs_inode;
395 struct inode *inode;
396 struct list_head splice;
397
398 INIT_LIST_HEAD(&splice);
399
400 mutex_lock(&root->fs_info->ordered_operations_mutex);
401 spin_lock(&root->fs_info->ordered_extent_lock);
402again:
403 list_splice_init(&root->fs_info->ordered_operations, &splice);
404
405 while (!list_empty(&splice)) {
406 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
407 ordered_operations);
408
409 inode = &btrfs_inode->vfs_inode;
410
411 list_del_init(&btrfs_inode->ordered_operations);
412
413 /*
414 * the inode may be getting freed (in sys_unlink path).
415 */
416 inode = igrab(inode);
417
418 if (!wait && inode) {
419 list_add_tail(&BTRFS_I(inode)->ordered_operations,
420 &root->fs_info->ordered_operations);
421 }
422 spin_unlock(&root->fs_info->ordered_extent_lock);
423
424 if (inode) {
425 if (wait)
426 btrfs_wait_ordered_range(inode, 0, (u64)-1);
427 else
428 filemap_flush(inode->i_mapping);
429 iput(inode);
430 }
431
432 cond_resched();
433 spin_lock(&root->fs_info->ordered_extent_lock);
434 }
435 if (wait && !list_empty(&root->fs_info->ordered_operations))
436 goto again;
437
438 spin_unlock(&root->fs_info->ordered_extent_lock);
439 mutex_unlock(&root->fs_info->ordered_operations_mutex);
440
441 return 0;
442}
443
eb84ae03
CM
444/*
445 * Used to start IO or wait for a given ordered extent to finish.
446 *
447 * If wait is one, this effectively waits on page writeback for all the pages
448 * in the extent, and it waits on the io completion code to insert
449 * metadata into the btree corresponding to the extent
450 */
451void btrfs_start_ordered_extent(struct inode *inode,
452 struct btrfs_ordered_extent *entry,
453 int wait)
e6dcd2dc
CM
454{
455 u64 start = entry->file_offset;
456 u64 end = start + entry->len - 1;
e1b81e67 457
eb84ae03
CM
458 /*
459 * pages in the range can be dirty, clean or writeback. We
460 * start IO on any dirty ones so the wait doesn't stall waiting
461 * for pdflush to find them
462 */
771ed689 463 btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL);
c8b97818 464 if (wait) {
e6dcd2dc
CM
465 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
466 &entry->flags));
c8b97818 467 }
e6dcd2dc 468}
cee36a03 469
eb84ae03
CM
470/*
471 * Used to wait on ordered extents across a large range of bytes.
472 */
cb843a6f 473int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
e6dcd2dc
CM
474{
475 u64 end;
e5a2217e
CM
476 u64 orig_end;
477 u64 wait_end;
e6dcd2dc 478 struct btrfs_ordered_extent *ordered;
e5a2217e
CM
479
480 if (start + len < start) {
f421950f 481 orig_end = INT_LIMIT(loff_t);
e5a2217e
CM
482 } else {
483 orig_end = start + len - 1;
f421950f
CM
484 if (orig_end > INT_LIMIT(loff_t))
485 orig_end = INT_LIMIT(loff_t);
e5a2217e 486 }
f421950f 487 wait_end = orig_end;
4a096752 488again:
e5a2217e
CM
489 /* start IO across the range first to instantiate any delalloc
490 * extents
491 */
ffbd517d 492 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
f421950f 493
771ed689
CM
494 /* The compression code will leave pages locked but return from
495 * writepage without setting the page writeback. Starting again
496 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
497 */
498 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
499
f421950f
CM
500 btrfs_wait_on_page_writeback_range(inode->i_mapping,
501 start >> PAGE_CACHE_SHIFT,
502 orig_end >> PAGE_CACHE_SHIFT);
e5a2217e 503
f421950f 504 end = orig_end;
d397712b 505 while (1) {
e6dcd2dc 506 ordered = btrfs_lookup_first_ordered_extent(inode, end);
d397712b 507 if (!ordered)
e6dcd2dc 508 break;
e5a2217e 509 if (ordered->file_offset > orig_end) {
e6dcd2dc
CM
510 btrfs_put_ordered_extent(ordered);
511 break;
512 }
513 if (ordered->file_offset + ordered->len < start) {
514 btrfs_put_ordered_extent(ordered);
515 break;
516 }
e5a2217e 517 btrfs_start_ordered_extent(inode, ordered, 1);
e6dcd2dc
CM
518 end = ordered->file_offset;
519 btrfs_put_ordered_extent(ordered);
e5a2217e 520 if (end == 0 || end == start)
e6dcd2dc
CM
521 break;
522 end--;
523 }
4a096752 524 if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
9655d298 525 EXTENT_ORDERED | EXTENT_DELALLOC, 0, NULL)) {
771ed689 526 schedule_timeout(1);
4a096752
CM
527 goto again;
528 }
cb843a6f 529 return 0;
cee36a03
CM
530}
531
eb84ae03
CM
532/*
533 * find an ordered extent corresponding to file_offset. return NULL if
534 * nothing is found, otherwise take a reference on the extent and return it
535 */
e6dcd2dc
CM
536struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
537 u64 file_offset)
538{
539 struct btrfs_ordered_inode_tree *tree;
540 struct rb_node *node;
541 struct btrfs_ordered_extent *entry = NULL;
542
543 tree = &BTRFS_I(inode)->ordered_tree;
544 mutex_lock(&tree->mutex);
545 node = tree_search(tree, file_offset);
546 if (!node)
547 goto out;
548
549 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
550 if (!offset_in_entry(entry, file_offset))
551 entry = NULL;
552 if (entry)
553 atomic_inc(&entry->refs);
554out:
555 mutex_unlock(&tree->mutex);
556 return entry;
557}
558
eb84ae03
CM
559/*
560 * lookup and return any extent before 'file_offset'. NULL is returned
561 * if none is found
562 */
e6dcd2dc 563struct btrfs_ordered_extent *
d397712b 564btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
e6dcd2dc
CM
565{
566 struct btrfs_ordered_inode_tree *tree;
567 struct rb_node *node;
568 struct btrfs_ordered_extent *entry = NULL;
569
570 tree = &BTRFS_I(inode)->ordered_tree;
571 mutex_lock(&tree->mutex);
572 node = tree_search(tree, file_offset);
573 if (!node)
574 goto out;
575
576 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
577 atomic_inc(&entry->refs);
578out:
579 mutex_unlock(&tree->mutex);
580 return entry;
81d7ed29 581}
dbe674a9 582
eb84ae03
CM
583/*
584 * After an extent is done, call this to conditionally update the on disk
585 * i_size. i_size is updated to cover any fully written part of the file.
586 */
dbe674a9
CM
587int btrfs_ordered_update_i_size(struct inode *inode,
588 struct btrfs_ordered_extent *ordered)
589{
590 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
591 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
592 u64 disk_i_size;
593 u64 new_i_size;
594 u64 i_size_test;
595 struct rb_node *node;
596 struct btrfs_ordered_extent *test;
597
598 mutex_lock(&tree->mutex);
599 disk_i_size = BTRFS_I(inode)->disk_i_size;
600
601 /*
602 * if the disk i_size is already at the inode->i_size, or
603 * this ordered extent is inside the disk i_size, we're done
604 */
605 if (disk_i_size >= inode->i_size ||
606 ordered->file_offset + ordered->len <= disk_i_size) {
607 goto out;
608 }
609
610 /*
611 * we can't update the disk_isize if there are delalloc bytes
612 * between disk_i_size and this ordered extent
613 */
614 if (test_range_bit(io_tree, disk_i_size,
615 ordered->file_offset + ordered->len - 1,
9655d298 616 EXTENT_DELALLOC, 0, NULL)) {
dbe674a9
CM
617 goto out;
618 }
619 /*
620 * walk backward from this ordered extent to disk_i_size.
621 * if we find an ordered extent then we can't update disk i_size
622 * yet
623 */
ba1da2f4 624 node = &ordered->rb_node;
d397712b 625 while (1) {
ba1da2f4 626 node = rb_prev(node);
dbe674a9
CM
627 if (!node)
628 break;
629 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
630 if (test->file_offset + test->len <= disk_i_size)
631 break;
632 if (test->file_offset >= inode->i_size)
633 break;
634 if (test->file_offset >= disk_i_size)
635 goto out;
636 }
637 new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode));
638
639 /*
640 * at this point, we know we can safely update i_size to at least
641 * the offset from this ordered extent. But, we need to
642 * walk forward and see if ios from higher up in the file have
643 * finished.
644 */
645 node = rb_next(&ordered->rb_node);
646 i_size_test = 0;
647 if (node) {
648 /*
649 * do we have an area where IO might have finished
650 * between our ordered extent and the next one.
651 */
652 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
d397712b 653 if (test->file_offset > entry_end(ordered))
b48652c1 654 i_size_test = test->file_offset;
dbe674a9
CM
655 } else {
656 i_size_test = i_size_read(inode);
657 }
658
659 /*
660 * i_size_test is the end of a region after this ordered
661 * extent where there are no ordered extents. As long as there
662 * are no delalloc bytes in this area, it is safe to update
663 * disk_i_size to the end of the region.
664 */
665 if (i_size_test > entry_end(ordered) &&
b48652c1 666 !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
9655d298 667 EXTENT_DELALLOC, 0, NULL)) {
dbe674a9
CM
668 new_i_size = min_t(u64, i_size_test, i_size_read(inode));
669 }
670 BTRFS_I(inode)->disk_i_size = new_i_size;
671out:
672 mutex_unlock(&tree->mutex);
673 return 0;
674}
ba1da2f4 675
eb84ae03
CM
676/*
677 * search the ordered extents for one corresponding to 'offset' and
678 * try to find a checksum. This is used because we allow pages to
679 * be reclaimed before their checksum is actually put into the btree
680 */
d20f7043
CM
681int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
682 u32 *sum)
ba1da2f4
CM
683{
684 struct btrfs_ordered_sum *ordered_sum;
685 struct btrfs_sector_sum *sector_sums;
686 struct btrfs_ordered_extent *ordered;
687 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
3edf7d33
CM
688 unsigned long num_sectors;
689 unsigned long i;
690 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
ba1da2f4 691 int ret = 1;
ba1da2f4
CM
692
693 ordered = btrfs_lookup_ordered_extent(inode, offset);
694 if (!ordered)
695 return 1;
696
697 mutex_lock(&tree->mutex);
c6e30871 698 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
d20f7043 699 if (disk_bytenr >= ordered_sum->bytenr) {
3edf7d33 700 num_sectors = ordered_sum->len / sectorsize;
ed98b56a 701 sector_sums = ordered_sum->sums;
3edf7d33 702 for (i = 0; i < num_sectors; i++) {
d20f7043 703 if (sector_sums[i].bytenr == disk_bytenr) {
3edf7d33
CM
704 *sum = sector_sums[i].sum;
705 ret = 0;
706 goto out;
707 }
708 }
ba1da2f4
CM
709 }
710 }
711out:
712 mutex_unlock(&tree->mutex);
89642229 713 btrfs_put_ordered_extent(ordered);
ba1da2f4
CM
714 return ret;
715}
716
f421950f
CM
717
718/**
719 * taken from mm/filemap.c because it isn't exported
720 *
721 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
722 * @mapping: address space structure to write
723 * @start: offset in bytes where the range starts
724 * @end: offset in bytes where the range ends (inclusive)
725 * @sync_mode: enable synchronous operation
726 *
727 * Start writeback against all of a mapping's dirty pages that lie
728 * within the byte offsets <start, end> inclusive.
729 *
730 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
731 * opposed to a regular memory cleansing writeback. The difference between
732 * these two operations is that if a dirty page/buffer is encountered, it must
733 * be waited upon, and not just skipped over.
734 */
735int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
736 loff_t end, int sync_mode)
737{
738 struct writeback_control wbc = {
739 .sync_mode = sync_mode,
740 .nr_to_write = mapping->nrpages * 2,
741 .range_start = start,
742 .range_end = end,
743 .for_writepages = 1,
744 };
745 return btrfs_writepages(mapping, &wbc);
746}
747
748/**
749 * taken from mm/filemap.c because it isn't exported
750 *
751 * wait_on_page_writeback_range - wait for writeback to complete
752 * @mapping: target address_space
753 * @start: beginning page index
754 * @end: ending page index
755 *
756 * Wait for writeback to complete against pages indexed by start->end
757 * inclusive
758 */
759int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
760 pgoff_t start, pgoff_t end)
761{
762 struct pagevec pvec;
763 int nr_pages;
764 int ret = 0;
765 pgoff_t index;
766
767 if (end < start)
768 return 0;
769
770 pagevec_init(&pvec, 0);
771 index = start;
772 while ((index <= end) &&
773 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
774 PAGECACHE_TAG_WRITEBACK,
775 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
776 unsigned i;
777
778 for (i = 0; i < nr_pages; i++) {
779 struct page *page = pvec.pages[i];
780
781 /* until radix tree lookup accepts end_index */
782 if (page->index > end)
783 continue;
784
785 wait_on_page_writeback(page);
786 if (PageError(page))
787 ret = -EIO;
788 }
789 pagevec_release(&pvec);
790 cond_resched();
791 }
792
793 /* Check for outstanding write errors */
794 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
795 ret = -ENOSPC;
796 if (test_and_clear_bit(AS_EIO, &mapping->flags))
797 ret = -EIO;
798
799 return ret;
800}
5a3f23d5
CM
801
802/*
803 * add a given inode to the list of inodes that must be fully on
804 * disk before a transaction commit finishes.
805 *
806 * This basically gives us the ext3 style data=ordered mode, and it is mostly
807 * used to make sure renamed files are fully on disk.
808 *
809 * It is a noop if the inode is already fully on disk.
810 *
811 * If trans is not null, we'll do a friendly check for a transaction that
812 * is already flushing things and force the IO down ourselves.
813 */
814int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
815 struct btrfs_root *root,
816 struct inode *inode)
817{
818 u64 last_mod;
819
820 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
821
822 /*
823 * if this file hasn't been changed since the last transaction
824 * commit, we can safely return without doing anything
825 */
826 if (last_mod < root->fs_info->last_trans_committed)
827 return 0;
828
829 /*
830 * the transaction is already committing. Just start the IO and
831 * don't bother with all of this list nonsense
832 */
833 if (trans && root->fs_info->running_transaction->blocked) {
834 btrfs_wait_ordered_range(inode, 0, (u64)-1);
835 return 0;
836 }
837
838 spin_lock(&root->fs_info->ordered_extent_lock);
839 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
840 list_add_tail(&BTRFS_I(inode)->ordered_operations,
841 &root->fs_info->ordered_operations);
842 }
843 spin_unlock(&root->fs_info->ordered_extent_lock);
844
845 return 0;
846}