2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/gfp.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
23 #include "transaction.h"
24 #include "btrfs_inode.h"
25 #include "extent_io.h"
28 static u64 entry_end(struct btrfs_ordered_extent *entry)
30 if (entry->file_offset + entry->len < entry->file_offset)
32 return entry->file_offset + entry->len;
35 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
38 struct rb_node ** p = &root->rb_node;
39 struct rb_node * parent = NULL;
40 struct btrfs_ordered_extent *entry;
44 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
46 if (file_offset < entry->file_offset)
48 else if (file_offset >= entry_end(entry))
54 rb_link_node(node, parent, p);
55 rb_insert_color(node, root);
59 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
60 struct rb_node **prev_ret)
62 struct rb_node * n = root->rb_node;
63 struct rb_node *prev = NULL;
65 struct btrfs_ordered_extent *entry;
66 struct btrfs_ordered_extent *prev_entry = NULL;
69 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
73 if (file_offset < entry->file_offset)
75 else if (file_offset >= entry_end(entry))
83 while(prev && file_offset >= entry_end(prev_entry)) {
87 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
89 if (file_offset < entry_end(prev_entry))
95 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
97 while(prev && file_offset < entry_end(prev_entry)) {
101 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
109 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
111 if (file_offset < entry->file_offset ||
112 entry->file_offset + entry->len <= file_offset)
117 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
120 struct rb_root *root = &tree->tree;
121 struct rb_node *prev;
123 struct btrfs_ordered_extent *entry;
126 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
128 if (offset_in_entry(entry, file_offset))
131 ret = __tree_search(root, file_offset, &prev);
139 /* allocate and add a new ordered_extent into the per-inode tree.
140 * file_offset is the logical offset in the file
142 * start is the disk block number of an extent already reserved in the
143 * extent allocation tree
145 * len is the length of the extent
147 * This also sets the EXTENT_ORDERED bit on the range in the inode.
149 * The tree is given a single reference on the ordered extent that was
152 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
155 struct btrfs_ordered_inode_tree *tree;
156 struct rb_node *node;
157 struct btrfs_ordered_extent *entry;
159 tree = &BTRFS_I(inode)->ordered_tree;
160 entry = kzalloc(sizeof(*entry), GFP_NOFS);
164 mutex_lock(&tree->mutex);
165 entry->file_offset = file_offset;
166 entry->start = start;
168 /* one ref for the tree */
169 atomic_set(&entry->refs, 1);
170 init_waitqueue_head(&entry->wait);
171 INIT_LIST_HEAD(&entry->list);
173 node = tree_insert(&tree->tree, file_offset,
176 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
177 atomic_inc(&entry->refs);
179 set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset,
180 entry_end(entry) - 1, GFP_NOFS);
182 mutex_unlock(&tree->mutex);
188 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
189 * when an ordered extent is finished. If the list covers more than one
190 * ordered extent, it is split across multiples.
192 int btrfs_add_ordered_sum(struct inode *inode,
193 struct btrfs_ordered_extent *entry,
194 struct btrfs_ordered_sum *sum)
196 struct btrfs_ordered_inode_tree *tree;
198 tree = &BTRFS_I(inode)->ordered_tree;
199 mutex_lock(&tree->mutex);
200 list_add_tail(&sum->list, &entry->list);
201 mutex_unlock(&tree->mutex);
206 * this is used to account for finished IO across a given range
207 * of the file. The IO should not span ordered extents. If
208 * a given ordered_extent is completely done, 1 is returned, otherwise
211 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
212 * to make sure this function only returns 1 once for a given ordered extent.
214 int btrfs_dec_test_ordered_pending(struct inode *inode,
215 u64 file_offset, u64 io_size)
217 struct btrfs_ordered_inode_tree *tree;
218 struct rb_node *node;
219 struct btrfs_ordered_extent *entry;
220 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
223 tree = &BTRFS_I(inode)->ordered_tree;
224 mutex_lock(&tree->mutex);
225 clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1,
227 node = tree_search(tree, file_offset);
233 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
234 if (!offset_in_entry(entry, file_offset)) {
239 ret = test_range_bit(io_tree, entry->file_offset,
240 entry->file_offset + entry->len - 1,
243 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
245 mutex_unlock(&tree->mutex);
250 * used to drop a reference on an ordered extent. This will free
251 * the extent if the last reference is dropped
253 int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
255 struct list_head *cur;
256 struct btrfs_ordered_sum *sum;
258 if (atomic_dec_and_test(&entry->refs)) {
259 while(!list_empty(&entry->list)) {
260 cur = entry->list.next;
261 sum = list_entry(cur, struct btrfs_ordered_sum, list);
262 list_del(&sum->list);
271 * remove an ordered extent from the tree. No references are dropped
272 * but, anyone waiting on this extent is woken up.
274 int btrfs_remove_ordered_extent(struct inode *inode,
275 struct btrfs_ordered_extent *entry)
277 struct btrfs_ordered_inode_tree *tree;
278 struct rb_node *node;
280 tree = &BTRFS_I(inode)->ordered_tree;
281 mutex_lock(&tree->mutex);
282 node = &entry->rb_node;
283 rb_erase(node, &tree->tree);
285 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
286 mutex_unlock(&tree->mutex);
287 wake_up(&entry->wait);
292 * Used to start IO or wait for a given ordered extent to finish.
294 * If wait is one, this effectively waits on page writeback for all the pages
295 * in the extent, and it waits on the io completion code to insert
296 * metadata into the btree corresponding to the extent
298 void btrfs_start_ordered_extent(struct inode *inode,
299 struct btrfs_ordered_extent *entry,
302 u64 start = entry->file_offset;
303 u64 end = start + entry->len - 1;
306 * pages in the range can be dirty, clean or writeback. We
307 * start IO on any dirty ones so the wait doesn't stall waiting
308 * for pdflush to find them
310 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
311 do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE);
313 do_sync_mapping_range(inode->i_mapping, start, end,
314 SYNC_FILE_RANGE_WRITE);
317 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
322 * Used to wait on ordered extents across a large range of bytes.
324 void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
329 struct btrfs_ordered_extent *ordered;
330 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
332 if (start + len < start) {
333 wait_end = (inode->i_size + mask) & ~mask;
336 orig_end = start + len - 1;
340 /* start IO across the range first to instantiate any delalloc
343 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
344 do_sync_file_range(file, start, wait_end, SYNC_FILE_RANGE_WRITE);
346 do_sync_mapping_range(inode->i_mapping, start, wait_end,
347 SYNC_FILE_RANGE_WRITE);
350 wait_on_extent_writeback(&BTRFS_I(inode)->io_tree, start, orig_end);
353 ordered = btrfs_lookup_first_ordered_extent(inode, end);
357 if (ordered->file_offset > orig_end) {
358 btrfs_put_ordered_extent(ordered);
361 if (ordered->file_offset + ordered->len < start) {
362 btrfs_put_ordered_extent(ordered);
365 btrfs_start_ordered_extent(inode, ordered, 1);
366 end = ordered->file_offset;
367 btrfs_put_ordered_extent(ordered);
368 if (end == 0 || end == start)
375 * find an ordered extent corresponding to file_offset. return NULL if
376 * nothing is found, otherwise take a reference on the extent and return it
378 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
381 struct btrfs_ordered_inode_tree *tree;
382 struct rb_node *node;
383 struct btrfs_ordered_extent *entry = NULL;
385 tree = &BTRFS_I(inode)->ordered_tree;
386 mutex_lock(&tree->mutex);
387 node = tree_search(tree, file_offset);
391 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
392 if (!offset_in_entry(entry, file_offset))
395 atomic_inc(&entry->refs);
397 mutex_unlock(&tree->mutex);
402 * lookup and return any extent before 'file_offset'. NULL is returned
405 struct btrfs_ordered_extent *
406 btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset)
408 struct btrfs_ordered_inode_tree *tree;
409 struct rb_node *node;
410 struct btrfs_ordered_extent *entry = NULL;
412 tree = &BTRFS_I(inode)->ordered_tree;
413 mutex_lock(&tree->mutex);
414 node = tree_search(tree, file_offset);
418 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
419 atomic_inc(&entry->refs);
421 mutex_unlock(&tree->mutex);
426 * After an extent is done, call this to conditionally update the on disk
427 * i_size. i_size is updated to cover any fully written part of the file.
429 int btrfs_ordered_update_i_size(struct inode *inode,
430 struct btrfs_ordered_extent *ordered)
432 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
433 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
437 struct rb_node *node;
438 struct btrfs_ordered_extent *test;
440 mutex_lock(&tree->mutex);
441 disk_i_size = BTRFS_I(inode)->disk_i_size;
444 * if the disk i_size is already at the inode->i_size, or
445 * this ordered extent is inside the disk i_size, we're done
447 if (disk_i_size >= inode->i_size ||
448 ordered->file_offset + ordered->len <= disk_i_size) {
453 * we can't update the disk_isize if there are delalloc bytes
454 * between disk_i_size and this ordered extent
456 if (test_range_bit(io_tree, disk_i_size,
457 ordered->file_offset + ordered->len - 1,
458 EXTENT_DELALLOC, 0)) {
462 * walk backward from this ordered extent to disk_i_size.
463 * if we find an ordered extent then we can't update disk i_size
466 node = &ordered->rb_node;
468 node = rb_prev(node);
471 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
472 if (test->file_offset + test->len <= disk_i_size)
474 if (test->file_offset >= inode->i_size)
476 if (test->file_offset >= disk_i_size)
479 new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode));
482 * at this point, we know we can safely update i_size to at least
483 * the offset from this ordered extent. But, we need to
484 * walk forward and see if ios from higher up in the file have
487 node = rb_next(&ordered->rb_node);
491 * do we have an area where IO might have finished
492 * between our ordered extent and the next one.
494 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
495 if (test->file_offset > entry_end(ordered)) {
496 i_size_test = test->file_offset - 1;
499 i_size_test = i_size_read(inode);
503 * i_size_test is the end of a region after this ordered
504 * extent where there are no ordered extents. As long as there
505 * are no delalloc bytes in this area, it is safe to update
506 * disk_i_size to the end of the region.
508 if (i_size_test > entry_end(ordered) &&
509 !test_range_bit(io_tree, entry_end(ordered), i_size_test,
510 EXTENT_DELALLOC, 0)) {
511 new_i_size = min_t(u64, i_size_test, i_size_read(inode));
513 BTRFS_I(inode)->disk_i_size = new_i_size;
515 mutex_unlock(&tree->mutex);
520 * search the ordered extents for one corresponding to 'offset' and
521 * try to find a checksum. This is used because we allow pages to
522 * be reclaimed before their checksum is actually put into the btree
524 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum)
526 struct btrfs_ordered_sum *ordered_sum;
527 struct btrfs_sector_sum *sector_sums;
528 struct btrfs_ordered_extent *ordered;
529 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
530 struct list_head *cur;
531 unsigned long num_sectors;
533 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
536 ordered = btrfs_lookup_ordered_extent(inode, offset);
540 mutex_lock(&tree->mutex);
541 list_for_each_prev(cur, &ordered->list) {
542 ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list);
543 if (offset >= ordered_sum->file_offset) {
544 num_sectors = ordered_sum->len / sectorsize;
545 sector_sums = &ordered_sum->sums;
546 for (i = 0; i < num_sectors; i++) {
547 if (sector_sums[i].offset == offset) {
548 printk("find ordered sum inode %lu offset %Lu\n", inode->i_ino, offset);
549 *sum = sector_sums[i].sum;
557 mutex_unlock(&tree->mutex);