1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
19 #include "btrfs_inode.h"
21 /* temporary define until extent_map moves out of btrfs */
22 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
30 static LIST_HEAD(buffers);
31 static LIST_HEAD(states);
34 static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
37 #define BUFFER_LRU_MAX 64
42 struct rb_node rb_node;
45 struct extent_page_data {
47 struct extent_io_tree *tree;
48 get_extent_t *get_extent;
51 int __init extent_io_init(void)
53 extent_state_cache = btrfs_cache_create("extent_state",
54 sizeof(struct extent_state), 0,
56 if (!extent_state_cache)
59 extent_buffer_cache = btrfs_cache_create("extent_buffers",
60 sizeof(struct extent_buffer), 0,
62 if (!extent_buffer_cache)
63 goto free_state_cache;
67 kmem_cache_destroy(extent_state_cache);
71 void extent_io_exit(void)
73 struct extent_state *state;
74 struct extent_buffer *eb;
76 while (!list_empty(&states)) {
77 state = list_entry(states.next, struct extent_state, leak_list);
78 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
79 list_del(&state->leak_list);
80 kmem_cache_free(extent_state_cache, state);
84 while (!list_empty(&buffers)) {
85 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
86 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
87 list_del(&eb->leak_list);
88 kmem_cache_free(extent_buffer_cache, eb);
90 if (extent_state_cache)
91 kmem_cache_destroy(extent_state_cache);
92 if (extent_buffer_cache)
93 kmem_cache_destroy(extent_buffer_cache);
96 void extent_io_tree_init(struct extent_io_tree *tree,
97 struct address_space *mapping, gfp_t mask)
99 tree->state.rb_node = NULL;
100 tree->buffer.rb_node = NULL;
102 tree->dirty_bytes = 0;
103 spin_lock_init(&tree->lock);
104 spin_lock_init(&tree->buffer_lock);
105 tree->mapping = mapping;
107 EXPORT_SYMBOL(extent_io_tree_init);
109 struct extent_state *alloc_extent_state(gfp_t mask)
111 struct extent_state *state;
116 state = kmem_cache_alloc(extent_state_cache, mask);
123 spin_lock_irqsave(&leak_lock, flags);
124 list_add(&state->leak_list, &states);
125 spin_unlock_irqrestore(&leak_lock, flags);
127 atomic_set(&state->refs, 1);
128 init_waitqueue_head(&state->wq);
131 EXPORT_SYMBOL(alloc_extent_state);
133 void free_extent_state(struct extent_state *state)
137 if (atomic_dec_and_test(&state->refs)) {
141 WARN_ON(state->tree);
143 spin_lock_irqsave(&leak_lock, flags);
144 list_del(&state->leak_list);
145 spin_unlock_irqrestore(&leak_lock, flags);
147 kmem_cache_free(extent_state_cache, state);
150 EXPORT_SYMBOL(free_extent_state);
152 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
153 struct rb_node *node)
155 struct rb_node ** p = &root->rb_node;
156 struct rb_node * parent = NULL;
157 struct tree_entry *entry;
161 entry = rb_entry(parent, struct tree_entry, rb_node);
163 if (offset < entry->start)
165 else if (offset > entry->end)
171 entry = rb_entry(node, struct tree_entry, rb_node);
172 rb_link_node(node, parent, p);
173 rb_insert_color(node, root);
177 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
178 struct rb_node **prev_ret,
179 struct rb_node **next_ret)
181 struct rb_root *root = &tree->state;
182 struct rb_node * n = root->rb_node;
183 struct rb_node *prev = NULL;
184 struct rb_node *orig_prev = NULL;
185 struct tree_entry *entry;
186 struct tree_entry *prev_entry = NULL;
189 entry = rb_entry(n, struct tree_entry, rb_node);
193 if (offset < entry->start)
195 else if (offset > entry->end)
204 while(prev && offset > prev_entry->end) {
205 prev = rb_next(prev);
206 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
213 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214 while(prev && offset < prev_entry->start) {
215 prev = rb_prev(prev);
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
223 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
226 struct rb_node *prev = NULL;
229 ret = __etree_search(tree, offset, &prev, NULL);
236 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
237 u64 offset, struct rb_node *node)
239 struct rb_root *root = &tree->buffer;
240 struct rb_node ** p = &root->rb_node;
241 struct rb_node * parent = NULL;
242 struct extent_buffer *eb;
246 eb = rb_entry(parent, struct extent_buffer, rb_node);
248 if (offset < eb->start)
250 else if (offset > eb->start)
256 rb_link_node(node, parent, p);
257 rb_insert_color(node, root);
261 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
264 struct rb_root *root = &tree->buffer;
265 struct rb_node * n = root->rb_node;
266 struct extent_buffer *eb;
269 eb = rb_entry(n, struct extent_buffer, rb_node);
270 if (offset < eb->start)
272 else if (offset > eb->start)
281 * utility function to look for merge candidates inside a given range.
282 * Any extents with matching state are merged together into a single
283 * extent in the tree. Extents with EXTENT_IO in their state field
284 * are not merged because the end_io handlers need to be able to do
285 * operations on them without sleeping (or doing allocations/splits).
287 * This should be called with the tree lock held.
289 static int merge_state(struct extent_io_tree *tree,
290 struct extent_state *state)
292 struct extent_state *other;
293 struct rb_node *other_node;
295 if (state->state & EXTENT_IOBITS)
298 other_node = rb_prev(&state->rb_node);
300 other = rb_entry(other_node, struct extent_state, rb_node);
301 if (other->end == state->start - 1 &&
302 other->state == state->state) {
303 state->start = other->start;
305 rb_erase(&other->rb_node, &tree->state);
306 free_extent_state(other);
309 other_node = rb_next(&state->rb_node);
311 other = rb_entry(other_node, struct extent_state, rb_node);
312 if (other->start == state->end + 1 &&
313 other->state == state->state) {
314 other->start = state->start;
316 rb_erase(&state->rb_node, &tree->state);
317 free_extent_state(state);
323 static void set_state_cb(struct extent_io_tree *tree,
324 struct extent_state *state,
327 if (tree->ops && tree->ops->set_bit_hook) {
328 tree->ops->set_bit_hook(tree->mapping->host, state->start,
329 state->end, state->state, bits);
333 static void clear_state_cb(struct extent_io_tree *tree,
334 struct extent_state *state,
337 if (tree->ops && tree->ops->set_bit_hook) {
338 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
339 state->end, state->state, bits);
344 * insert an extent_state struct into the tree. 'bits' are set on the
345 * struct before it is inserted.
347 * This may return -EEXIST if the extent is already there, in which case the
348 * state struct is freed.
350 * The tree lock is not taken internally. This is a utility function and
351 * probably isn't what you want to call (see set/clear_extent_bit).
353 static int insert_state(struct extent_io_tree *tree,
354 struct extent_state *state, u64 start, u64 end,
357 struct rb_node *node;
360 printk("end < start %Lu %Lu\n", end, start);
363 if (bits & EXTENT_DIRTY)
364 tree->dirty_bytes += end - start + 1;
365 set_state_cb(tree, state, bits);
366 state->state |= bits;
367 state->start = start;
369 node = tree_insert(&tree->state, end, &state->rb_node);
371 struct extent_state *found;
372 found = rb_entry(node, struct extent_state, rb_node);
373 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
374 free_extent_state(state);
378 merge_state(tree, state);
383 * split a given extent state struct in two, inserting the preallocated
384 * struct 'prealloc' as the newly created second half. 'split' indicates an
385 * offset inside 'orig' where it should be split.
388 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
389 * are two extent state structs in the tree:
390 * prealloc: [orig->start, split - 1]
391 * orig: [ split, orig->end ]
393 * The tree locks are not taken by this function. They need to be held
396 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
397 struct extent_state *prealloc, u64 split)
399 struct rb_node *node;
400 prealloc->start = orig->start;
401 prealloc->end = split - 1;
402 prealloc->state = orig->state;
405 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
407 struct extent_state *found;
408 found = rb_entry(node, struct extent_state, rb_node);
409 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
410 free_extent_state(prealloc);
413 prealloc->tree = tree;
418 * utility function to clear some bits in an extent state struct.
419 * it will optionally wake up any one waiting on this state (wake == 1), or
420 * forcibly remove the state from the tree (delete == 1).
422 * If no bits are set on the state struct after clearing things, the
423 * struct is freed and removed from the tree
425 static int clear_state_bit(struct extent_io_tree *tree,
426 struct extent_state *state, int bits, int wake,
429 int ret = state->state & bits;
431 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
432 u64 range = state->end - state->start + 1;
433 WARN_ON(range > tree->dirty_bytes);
434 tree->dirty_bytes -= range;
436 clear_state_cb(tree, state, bits);
437 state->state &= ~bits;
440 if (delete || state->state == 0) {
442 clear_state_cb(tree, state, state->state);
443 rb_erase(&state->rb_node, &tree->state);
445 free_extent_state(state);
450 merge_state(tree, state);
456 * clear some bits on a range in the tree. This may require splitting
457 * or inserting elements in the tree, so the gfp mask is used to
458 * indicate which allocations or sleeping are allowed.
460 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
461 * the given range from the tree regardless of state (ie for truncate).
463 * the range [start, end] is inclusive.
465 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
466 * bits were already set, or zero if none of the bits were already set.
468 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
469 int bits, int wake, int delete, gfp_t mask)
471 struct extent_state *state;
472 struct extent_state *prealloc = NULL;
473 struct rb_node *node;
479 if (!prealloc && (mask & __GFP_WAIT)) {
480 prealloc = alloc_extent_state(mask);
485 spin_lock_irqsave(&tree->lock, flags);
487 * this search will find the extents that end after
490 node = tree_search(tree, start);
493 state = rb_entry(node, struct extent_state, rb_node);
494 if (state->start > end)
496 WARN_ON(state->end < start);
499 * | ---- desired range ---- |
501 * | ------------- state -------------- |
503 * We need to split the extent we found, and may flip
504 * bits on second half.
506 * If the extent we found extends past our range, we
507 * just split and search again. It'll get split again
508 * the next time though.
510 * If the extent we found is inside our range, we clear
511 * the desired bit on it.
514 if (state->start < start) {
516 prealloc = alloc_extent_state(GFP_ATOMIC);
517 err = split_state(tree, state, prealloc, start);
518 BUG_ON(err == -EEXIST);
522 if (state->end <= end) {
523 start = state->end + 1;
524 set |= clear_state_bit(tree, state, bits,
527 start = state->start;
532 * | ---- desired range ---- |
534 * We need to split the extent, and clear the bit
537 if (state->start <= end && state->end > end) {
539 prealloc = alloc_extent_state(GFP_ATOMIC);
540 err = split_state(tree, state, prealloc, end + 1);
541 BUG_ON(err == -EEXIST);
545 set |= clear_state_bit(tree, prealloc, bits,
551 start = state->end + 1;
552 set |= clear_state_bit(tree, state, bits, wake, delete);
556 spin_unlock_irqrestore(&tree->lock, flags);
558 free_extent_state(prealloc);
565 spin_unlock_irqrestore(&tree->lock, flags);
566 if (mask & __GFP_WAIT)
570 EXPORT_SYMBOL(clear_extent_bit);
572 static int wait_on_state(struct extent_io_tree *tree,
573 struct extent_state *state)
576 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
577 spin_unlock_irq(&tree->lock);
579 spin_lock_irq(&tree->lock);
580 finish_wait(&state->wq, &wait);
585 * waits for one or more bits to clear on a range in the state tree.
586 * The range [start, end] is inclusive.
587 * The tree lock is taken by this function
589 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
591 struct extent_state *state;
592 struct rb_node *node;
594 spin_lock_irq(&tree->lock);
598 * this search will find all the extents that end after
601 node = tree_search(tree, start);
605 state = rb_entry(node, struct extent_state, rb_node);
607 if (state->start > end)
610 if (state->state & bits) {
611 start = state->start;
612 atomic_inc(&state->refs);
613 wait_on_state(tree, state);
614 free_extent_state(state);
617 start = state->end + 1;
622 if (need_resched()) {
623 spin_unlock_irq(&tree->lock);
625 spin_lock_irq(&tree->lock);
629 spin_unlock_irq(&tree->lock);
632 EXPORT_SYMBOL(wait_extent_bit);
634 static void set_state_bits(struct extent_io_tree *tree,
635 struct extent_state *state,
638 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
639 u64 range = state->end - state->start + 1;
640 tree->dirty_bytes += range;
642 set_state_cb(tree, state, bits);
643 state->state |= bits;
647 * set some bits on a range in the tree. This may require allocations
648 * or sleeping, so the gfp mask is used to indicate what is allowed.
650 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
651 * range already has the desired bits set. The start of the existing
652 * range is returned in failed_start in this case.
654 * [start, end] is inclusive
655 * This takes the tree lock.
657 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
658 int exclusive, u64 *failed_start, gfp_t mask)
660 struct extent_state *state;
661 struct extent_state *prealloc = NULL;
662 struct rb_node *node;
669 if (!prealloc && (mask & __GFP_WAIT)) {
670 prealloc = alloc_extent_state(mask);
675 spin_lock_irqsave(&tree->lock, flags);
677 * this search will find all the extents that end after
680 node = tree_search(tree, start);
682 err = insert_state(tree, prealloc, start, end, bits);
684 BUG_ON(err == -EEXIST);
688 state = rb_entry(node, struct extent_state, rb_node);
689 last_start = state->start;
690 last_end = state->end;
693 * | ---- desired range ---- |
696 * Just lock what we found and keep going
698 if (state->start == start && state->end <= end) {
699 set = state->state & bits;
700 if (set && exclusive) {
701 *failed_start = state->start;
705 set_state_bits(tree, state, bits);
706 start = state->end + 1;
707 merge_state(tree, state);
712 * | ---- desired range ---- |
715 * | ------------- state -------------- |
717 * We need to split the extent we found, and may flip bits on
720 * If the extent we found extends past our
721 * range, we just split and search again. It'll get split
722 * again the next time though.
724 * If the extent we found is inside our range, we set the
727 if (state->start < start) {
728 set = state->state & bits;
729 if (exclusive && set) {
730 *failed_start = start;
734 err = split_state(tree, state, prealloc, start);
735 BUG_ON(err == -EEXIST);
739 if (state->end <= end) {
740 set_state_bits(tree, state, bits);
741 start = state->end + 1;
742 merge_state(tree, state);
744 start = state->start;
749 * | ---- desired range ---- |
750 * | state | or | state |
752 * There's a hole, we need to insert something in it and
753 * ignore the extent we found.
755 if (state->start > start) {
757 if (end < last_start)
760 this_end = last_start -1;
761 err = insert_state(tree, prealloc, start, this_end,
764 BUG_ON(err == -EEXIST);
767 start = this_end + 1;
771 * | ---- desired range ---- |
773 * We need to split the extent, and set the bit
776 if (state->start <= end && state->end > end) {
777 set = state->state & bits;
778 if (exclusive && set) {
779 *failed_start = start;
783 err = split_state(tree, state, prealloc, end + 1);
784 BUG_ON(err == -EEXIST);
786 set_state_bits(tree, prealloc, bits);
787 merge_state(tree, prealloc);
795 spin_unlock_irqrestore(&tree->lock, flags);
797 free_extent_state(prealloc);
804 spin_unlock_irqrestore(&tree->lock, flags);
805 if (mask & __GFP_WAIT)
809 EXPORT_SYMBOL(set_extent_bit);
811 /* wrappers around set/clear extent bit */
812 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
815 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
818 EXPORT_SYMBOL(set_extent_dirty);
820 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
823 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
825 EXPORT_SYMBOL(set_extent_ordered);
827 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
828 int bits, gfp_t mask)
830 return set_extent_bit(tree, start, end, bits, 0, NULL,
833 EXPORT_SYMBOL(set_extent_bits);
835 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
836 int bits, gfp_t mask)
838 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
840 EXPORT_SYMBOL(clear_extent_bits);
842 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
845 return set_extent_bit(tree, start, end,
846 EXTENT_DELALLOC | EXTENT_DIRTY,
849 EXPORT_SYMBOL(set_extent_delalloc);
851 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
854 return clear_extent_bit(tree, start, end,
855 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
857 EXPORT_SYMBOL(clear_extent_dirty);
859 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
862 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
864 EXPORT_SYMBOL(clear_extent_ordered);
866 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
869 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
872 EXPORT_SYMBOL(set_extent_new);
874 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
877 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
879 EXPORT_SYMBOL(clear_extent_new);
881 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
884 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
887 EXPORT_SYMBOL(set_extent_uptodate);
889 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
892 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
894 EXPORT_SYMBOL(clear_extent_uptodate);
896 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
899 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
902 EXPORT_SYMBOL(set_extent_writeback);
904 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
907 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
909 EXPORT_SYMBOL(clear_extent_writeback);
911 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
913 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
915 EXPORT_SYMBOL(wait_on_extent_writeback);
917 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
922 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
923 &failed_start, mask);
924 if (err == -EEXIST && (mask & __GFP_WAIT)) {
925 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
926 start = failed_start;
930 WARN_ON(start > end);
934 EXPORT_SYMBOL(lock_extent);
936 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
939 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
941 EXPORT_SYMBOL(unlock_extent);
944 * helper function to set pages and extents in the tree dirty
946 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
948 unsigned long index = start >> PAGE_CACHE_SHIFT;
949 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
952 while (index <= end_index) {
953 page = find_get_page(tree->mapping, index);
955 __set_page_dirty_nobuffers(page);
956 page_cache_release(page);
959 set_extent_dirty(tree, start, end, GFP_NOFS);
962 EXPORT_SYMBOL(set_range_dirty);
965 * helper function to set both pages and extents in the tree writeback
967 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
969 unsigned long index = start >> PAGE_CACHE_SHIFT;
970 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
973 while (index <= end_index) {
974 page = find_get_page(tree->mapping, index);
976 set_page_writeback(page);
977 page_cache_release(page);
980 set_extent_writeback(tree, start, end, GFP_NOFS);
983 EXPORT_SYMBOL(set_range_writeback);
985 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
986 u64 *start_ret, u64 *end_ret, int bits)
988 struct rb_node *node;
989 struct extent_state *state;
992 spin_lock_irq(&tree->lock);
994 * this search will find all the extents that end after
997 node = tree_search(tree, start);
1003 state = rb_entry(node, struct extent_state, rb_node);
1004 if (state->end >= start && (state->state & bits)) {
1005 *start_ret = state->start;
1006 *end_ret = state->end;
1010 node = rb_next(node);
1015 spin_unlock_irq(&tree->lock);
1018 EXPORT_SYMBOL(find_first_extent_bit);
1020 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1021 u64 start, int bits)
1023 struct rb_node *node;
1024 struct extent_state *state;
1027 * this search will find all the extents that end after
1030 node = tree_search(tree, start);
1036 state = rb_entry(node, struct extent_state, rb_node);
1037 if (state->end >= start && (state->state & bits)) {
1040 node = rb_next(node);
1047 EXPORT_SYMBOL(find_first_extent_bit_state);
1049 u64 find_lock_delalloc_range(struct extent_io_tree *tree,
1050 u64 *start, u64 *end, u64 max_bytes)
1052 struct rb_node *node;
1053 struct extent_state *state;
1054 u64 cur_start = *start;
1056 u64 total_bytes = 0;
1058 spin_lock_irq(&tree->lock);
1060 * this search will find all the extents that end after
1064 node = tree_search(tree, cur_start);
1072 state = rb_entry(node, struct extent_state, rb_node);
1073 if (found && state->start != cur_start) {
1076 if (!(state->state & EXTENT_DELALLOC)) {
1082 struct extent_state *prev_state;
1083 struct rb_node *prev_node = node;
1085 prev_node = rb_prev(prev_node);
1088 prev_state = rb_entry(prev_node,
1089 struct extent_state,
1091 if (!(prev_state->state & EXTENT_DELALLOC))
1097 if (state->state & EXTENT_LOCKED) {
1099 atomic_inc(&state->refs);
1100 prepare_to_wait(&state->wq, &wait,
1101 TASK_UNINTERRUPTIBLE);
1102 spin_unlock_irq(&tree->lock);
1104 spin_lock_irq(&tree->lock);
1105 finish_wait(&state->wq, &wait);
1106 free_extent_state(state);
1109 set_state_cb(tree, state, EXTENT_LOCKED);
1110 state->state |= EXTENT_LOCKED;
1112 *start = state->start;
1115 cur_start = state->end + 1;
1116 node = rb_next(node);
1119 total_bytes += state->end - state->start + 1;
1120 if (total_bytes >= max_bytes)
1124 spin_unlock_irq(&tree->lock);
1128 u64 count_range_bits(struct extent_io_tree *tree,
1129 u64 *start, u64 search_end, u64 max_bytes,
1132 struct rb_node *node;
1133 struct extent_state *state;
1134 u64 cur_start = *start;
1135 u64 total_bytes = 0;
1138 if (search_end <= cur_start) {
1139 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1144 spin_lock_irq(&tree->lock);
1145 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1146 total_bytes = tree->dirty_bytes;
1150 * this search will find all the extents that end after
1153 node = tree_search(tree, cur_start);
1159 state = rb_entry(node, struct extent_state, rb_node);
1160 if (state->start > search_end)
1162 if (state->end >= cur_start && (state->state & bits)) {
1163 total_bytes += min(search_end, state->end) + 1 -
1164 max(cur_start, state->start);
1165 if (total_bytes >= max_bytes)
1168 *start = state->start;
1172 node = rb_next(node);
1177 spin_unlock_irq(&tree->lock);
1181 * helper function to lock both pages and extents in the tree.
1182 * pages must be locked first.
1184 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1186 unsigned long index = start >> PAGE_CACHE_SHIFT;
1187 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1191 while (index <= end_index) {
1192 page = grab_cache_page(tree->mapping, index);
1198 err = PTR_ERR(page);
1203 lock_extent(tree, start, end, GFP_NOFS);
1208 * we failed above in getting the page at 'index', so we undo here
1209 * up to but not including the page at 'index'
1212 index = start >> PAGE_CACHE_SHIFT;
1213 while (index < end_index) {
1214 page = find_get_page(tree->mapping, index);
1216 page_cache_release(page);
1221 EXPORT_SYMBOL(lock_range);
1224 * helper function to unlock both pages and extents in the tree.
1226 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1228 unsigned long index = start >> PAGE_CACHE_SHIFT;
1229 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1232 while (index <= end_index) {
1233 page = find_get_page(tree->mapping, index);
1235 page_cache_release(page);
1238 unlock_extent(tree, start, end, GFP_NOFS);
1241 EXPORT_SYMBOL(unlock_range);
1243 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1245 struct rb_node *node;
1246 struct extent_state *state;
1249 spin_lock_irq(&tree->lock);
1251 * this search will find all the extents that end after
1254 node = tree_search(tree, start);
1259 state = rb_entry(node, struct extent_state, rb_node);
1260 if (state->start != start) {
1264 state->private = private;
1266 spin_unlock_irq(&tree->lock);
1270 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1272 struct rb_node *node;
1273 struct extent_state *state;
1276 spin_lock_irq(&tree->lock);
1278 * this search will find all the extents that end after
1281 node = tree_search(tree, start);
1286 state = rb_entry(node, struct extent_state, rb_node);
1287 if (state->start != start) {
1291 *private = state->private;
1293 spin_unlock_irq(&tree->lock);
1298 * searches a range in the state tree for a given mask.
1299 * If 'filled' == 1, this returns 1 only if every extent in the tree
1300 * has the bits set. Otherwise, 1 is returned if any bit in the
1301 * range is found set.
1303 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1304 int bits, int filled)
1306 struct extent_state *state = NULL;
1307 struct rb_node *node;
1309 unsigned long flags;
1311 spin_lock_irqsave(&tree->lock, flags);
1312 node = tree_search(tree, start);
1313 while (node && start <= end) {
1314 state = rb_entry(node, struct extent_state, rb_node);
1316 if (filled && state->start > start) {
1321 if (state->start > end)
1324 if (state->state & bits) {
1328 } else if (filled) {
1332 start = state->end + 1;
1335 node = rb_next(node);
1342 spin_unlock_irqrestore(&tree->lock, flags);
1345 EXPORT_SYMBOL(test_range_bit);
1348 * helper function to set a given page up to date if all the
1349 * extents in the tree for that page are up to date
1351 static int check_page_uptodate(struct extent_io_tree *tree,
1354 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1355 u64 end = start + PAGE_CACHE_SIZE - 1;
1356 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1357 SetPageUptodate(page);
1362 * helper function to unlock a page if all the extents in the tree
1363 * for that page are unlocked
1365 static int check_page_locked(struct extent_io_tree *tree,
1368 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1369 u64 end = start + PAGE_CACHE_SIZE - 1;
1370 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1376 * helper function to end page writeback if all the extents
1377 * in the tree for that page are done with writeback
1379 static int check_page_writeback(struct extent_io_tree *tree,
1382 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1383 u64 end = start + PAGE_CACHE_SIZE - 1;
1384 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1385 end_page_writeback(page);
1389 /* lots and lots of room for performance fixes in the end_bio funcs */
1392 * after a writepage IO is done, we need to:
1393 * clear the uptodate bits on error
1394 * clear the writeback bits in the extent tree for this IO
1395 * end_page_writeback if the page has no more pending IO
1397 * Scheduling is not allowed, so the extent state tree is expected
1398 * to have one and only one object corresponding to this IO.
1400 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1401 static void end_bio_extent_writepage(struct bio *bio, int err)
1403 static int end_bio_extent_writepage(struct bio *bio,
1404 unsigned int bytes_done, int err)
1407 int uptodate = err == 0;
1408 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1409 struct extent_io_tree *tree;
1415 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1420 struct page *page = bvec->bv_page;
1421 tree = &BTRFS_I(page->mapping->host)->io_tree;
1423 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1425 end = start + bvec->bv_len - 1;
1427 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1432 if (--bvec >= bio->bi_io_vec)
1433 prefetchw(&bvec->bv_page->flags);
1434 if (tree->ops && tree->ops->writepage_end_io_hook) {
1435 ret = tree->ops->writepage_end_io_hook(page, start,
1436 end, NULL, uptodate);
1441 if (!uptodate && tree->ops &&
1442 tree->ops->writepage_io_failed_hook) {
1443 ret = tree->ops->writepage_io_failed_hook(bio, page,
1446 uptodate = (err == 0);
1452 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1453 ClearPageUptodate(page);
1457 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1460 end_page_writeback(page);
1462 check_page_writeback(tree, page);
1463 } while (bvec >= bio->bi_io_vec);
1465 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1471 * after a readpage IO is done, we need to:
1472 * clear the uptodate bits on error
1473 * set the uptodate bits if things worked
1474 * set the page up to date if all extents in the tree are uptodate
1475 * clear the lock bit in the extent tree
1476 * unlock the page if there are no other extents locked for it
1478 * Scheduling is not allowed, so the extent state tree is expected
1479 * to have one and only one object corresponding to this IO.
1481 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1482 static void end_bio_extent_readpage(struct bio *bio, int err)
1484 static int end_bio_extent_readpage(struct bio *bio,
1485 unsigned int bytes_done, int err)
1488 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1489 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1490 struct extent_io_tree *tree;
1496 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1502 struct page *page = bvec->bv_page;
1503 tree = &BTRFS_I(page->mapping->host)->io_tree;
1505 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1507 end = start + bvec->bv_len - 1;
1509 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1514 if (--bvec >= bio->bi_io_vec)
1515 prefetchw(&bvec->bv_page->flags);
1517 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1518 ret = tree->ops->readpage_end_io_hook(page, start, end,
1523 if (!uptodate && tree->ops &&
1524 tree->ops->readpage_io_failed_hook) {
1525 ret = tree->ops->readpage_io_failed_hook(bio, page,
1529 test_bit(BIO_UPTODATE, &bio->bi_flags);
1535 set_extent_uptodate(tree, start, end,
1537 unlock_extent(tree, start, end, GFP_ATOMIC);
1541 SetPageUptodate(page);
1543 ClearPageUptodate(page);
1549 check_page_uptodate(tree, page);
1551 ClearPageUptodate(page);
1554 check_page_locked(tree, page);
1556 } while (bvec >= bio->bi_io_vec);
1559 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1565 * IO done from prepare_write is pretty simple, we just unlock
1566 * the structs in the extent tree when done, and set the uptodate bits
1569 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1570 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1572 static int end_bio_extent_preparewrite(struct bio *bio,
1573 unsigned int bytes_done, int err)
1576 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1577 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1578 struct extent_io_tree *tree;
1582 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1588 struct page *page = bvec->bv_page;
1589 tree = &BTRFS_I(page->mapping->host)->io_tree;
1591 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1593 end = start + bvec->bv_len - 1;
1595 if (--bvec >= bio->bi_io_vec)
1596 prefetchw(&bvec->bv_page->flags);
1599 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1601 ClearPageUptodate(page);
1605 unlock_extent(tree, start, end, GFP_ATOMIC);
1607 } while (bvec >= bio->bi_io_vec);
1610 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1616 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1621 bio = bio_alloc(gfp_flags, nr_vecs);
1623 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1624 while (!bio && (nr_vecs /= 2))
1625 bio = bio_alloc(gfp_flags, nr_vecs);
1630 bio->bi_bdev = bdev;
1631 bio->bi_sector = first_sector;
1636 static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
1639 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1640 struct page *page = bvec->bv_page;
1641 struct extent_io_tree *tree = bio->bi_private;
1642 struct rb_node *node;
1643 struct extent_state *state;
1647 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1648 end = start + bvec->bv_len - 1;
1650 spin_lock_irq(&tree->lock);
1651 node = __etree_search(tree, start, NULL, NULL);
1653 state = rb_entry(node, struct extent_state, rb_node);
1654 while(state->end < end) {
1655 node = rb_next(node);
1656 state = rb_entry(node, struct extent_state, rb_node);
1658 BUG_ON(state->end != end);
1659 spin_unlock_irq(&tree->lock);
1661 bio->bi_private = NULL;
1665 if (tree->ops && tree->ops->submit_bio_hook)
1666 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1669 submit_bio(rw, bio);
1670 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1676 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1677 struct page *page, sector_t sector,
1678 size_t size, unsigned long offset,
1679 struct block_device *bdev,
1680 struct bio **bio_ret,
1681 unsigned long max_pages,
1682 bio_end_io_t end_io_func,
1689 if (bio_ret && *bio_ret) {
1691 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1692 (tree->ops && tree->ops->merge_bio_hook &&
1693 tree->ops->merge_bio_hook(page, offset, size, bio)) ||
1694 bio_add_page(bio, page, size, offset) < size) {
1695 ret = submit_one_bio(rw, bio, mirror_num);
1701 nr = bio_get_nr_vecs(bdev);
1702 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1704 printk("failed to allocate bio nr %d\n", nr);
1708 bio_add_page(bio, page, size, offset);
1709 bio->bi_end_io = end_io_func;
1710 bio->bi_private = tree;
1715 ret = submit_one_bio(rw, bio, mirror_num);
1721 void set_page_extent_mapped(struct page *page)
1723 if (!PagePrivate(page)) {
1724 SetPagePrivate(page);
1725 page_cache_get(page);
1726 set_page_private(page, EXTENT_PAGE_PRIVATE);
1730 void set_page_extent_head(struct page *page, unsigned long len)
1732 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1736 * basic readpage implementation. Locked extent state structs are inserted
1737 * into the tree that are removed when the IO is done (by the end_io
1740 static int __extent_read_full_page(struct extent_io_tree *tree,
1742 get_extent_t *get_extent,
1743 struct bio **bio, int mirror_num)
1745 struct inode *inode = page->mapping->host;
1746 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1747 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1751 u64 last_byte = i_size_read(inode);
1755 struct extent_map *em;
1756 struct block_device *bdev;
1759 size_t page_offset = 0;
1761 size_t blocksize = inode->i_sb->s_blocksize;
1763 set_page_extent_mapped(page);
1766 lock_extent(tree, start, end, GFP_NOFS);
1768 while (cur <= end) {
1769 if (cur >= last_byte) {
1771 iosize = PAGE_CACHE_SIZE - page_offset;
1772 userpage = kmap_atomic(page, KM_USER0);
1773 memset(userpage + page_offset, 0, iosize);
1774 flush_dcache_page(page);
1775 kunmap_atomic(userpage, KM_USER0);
1776 set_extent_uptodate(tree, cur, cur + iosize - 1,
1778 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1781 em = get_extent(inode, page, page_offset, cur,
1783 if (IS_ERR(em) || !em) {
1785 unlock_extent(tree, cur, end, GFP_NOFS);
1788 extent_offset = cur - em->start;
1789 if (extent_map_end(em) <= cur) {
1790 printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1792 BUG_ON(extent_map_end(em) <= cur);
1794 printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1798 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1799 cur_end = min(extent_map_end(em) - 1, end);
1800 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1801 sector = (em->block_start + extent_offset) >> 9;
1803 block_start = em->block_start;
1804 free_extent_map(em);
1807 /* we've found a hole, just zero and go on */
1808 if (block_start == EXTENT_MAP_HOLE) {
1810 userpage = kmap_atomic(page, KM_USER0);
1811 memset(userpage + page_offset, 0, iosize);
1812 flush_dcache_page(page);
1813 kunmap_atomic(userpage, KM_USER0);
1815 set_extent_uptodate(tree, cur, cur + iosize - 1,
1817 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1819 page_offset += iosize;
1822 /* the get_extent function already copied into the page */
1823 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1824 check_page_uptodate(tree, page);
1825 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1827 page_offset += iosize;
1830 /* we have an inline extent but it didn't get marked up
1831 * to date. Error out
1833 if (block_start == EXTENT_MAP_INLINE) {
1835 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1837 page_offset += iosize;
1842 if (tree->ops && tree->ops->readpage_io_hook) {
1843 ret = tree->ops->readpage_io_hook(page, cur,
1847 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1849 ret = submit_extent_page(READ, tree, page,
1850 sector, iosize, page_offset,
1852 end_bio_extent_readpage, mirror_num);
1858 page_offset += iosize;
1861 if (!PageError(page))
1862 SetPageUptodate(page);
1868 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1869 get_extent_t *get_extent)
1871 struct bio *bio = NULL;
1874 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
1876 submit_one_bio(READ, bio, 0);
1879 EXPORT_SYMBOL(extent_read_full_page);
1882 * the writepage semantics are similar to regular writepage. extent
1883 * records are inserted to lock ranges in the tree, and as dirty areas
1884 * are found, they are marked writeback. Then the lock bits are removed
1885 * and the end_io handler clears the writeback ranges
1887 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1890 struct inode *inode = page->mapping->host;
1891 struct extent_page_data *epd = data;
1892 struct extent_io_tree *tree = epd->tree;
1893 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1895 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1899 u64 last_byte = i_size_read(inode);
1904 struct extent_map *em;
1905 struct block_device *bdev;
1908 size_t pg_offset = 0;
1910 loff_t i_size = i_size_read(inode);
1911 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1915 WARN_ON(!PageLocked(page));
1916 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
1917 if (page->index > end_index ||
1918 (page->index == end_index && !pg_offset)) {
1919 page->mapping->a_ops->invalidatepage(page, 0);
1924 if (page->index == end_index) {
1927 userpage = kmap_atomic(page, KM_USER0);
1928 memset(userpage + pg_offset, 0,
1929 PAGE_CACHE_SIZE - pg_offset);
1930 kunmap_atomic(userpage, KM_USER0);
1931 flush_dcache_page(page);
1935 set_page_extent_mapped(page);
1937 delalloc_start = start;
1939 while(delalloc_end < page_end) {
1940 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1943 if (nr_delalloc == 0) {
1944 delalloc_start = delalloc_end + 1;
1947 tree->ops->fill_delalloc(inode, delalloc_start,
1949 clear_extent_bit(tree, delalloc_start,
1951 EXTENT_LOCKED | EXTENT_DELALLOC,
1953 delalloc_start = delalloc_end + 1;
1955 lock_extent(tree, start, page_end, GFP_NOFS);
1956 unlock_start = start;
1958 if (tree->ops && tree->ops->writepage_start_hook) {
1959 ret = tree->ops->writepage_start_hook(page, start, page_end);
1960 if (ret == -EAGAIN) {
1961 unlock_extent(tree, start, page_end, GFP_NOFS);
1962 redirty_page_for_writepage(wbc, page);
1969 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1970 printk("found delalloc bits after lock_extent\n");
1973 if (last_byte <= start) {
1974 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1975 unlock_extent(tree, start, page_end, GFP_NOFS);
1976 if (tree->ops && tree->ops->writepage_end_io_hook)
1977 tree->ops->writepage_end_io_hook(page, start,
1979 unlock_start = page_end + 1;
1983 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1984 blocksize = inode->i_sb->s_blocksize;
1986 while (cur <= end) {
1987 if (cur >= last_byte) {
1988 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1989 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
1990 if (tree->ops && tree->ops->writepage_end_io_hook)
1991 tree->ops->writepage_end_io_hook(page, cur,
1993 unlock_start = page_end + 1;
1996 em = epd->get_extent(inode, page, pg_offset, cur,
1998 if (IS_ERR(em) || !em) {
2003 extent_offset = cur - em->start;
2004 BUG_ON(extent_map_end(em) <= cur);
2006 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2007 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2008 sector = (em->block_start + extent_offset) >> 9;
2010 block_start = em->block_start;
2011 free_extent_map(em);
2014 if (block_start == EXTENT_MAP_HOLE ||
2015 block_start == EXTENT_MAP_INLINE) {
2016 clear_extent_dirty(tree, cur,
2017 cur + iosize - 1, GFP_NOFS);
2019 unlock_extent(tree, unlock_start, cur + iosize -1,
2022 if (tree->ops && tree->ops->writepage_end_io_hook)
2023 tree->ops->writepage_end_io_hook(page, cur,
2027 pg_offset += iosize;
2032 /* leave this out until we have a page_mkwrite call */
2033 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2036 pg_offset += iosize;
2039 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2040 if (tree->ops && tree->ops->writepage_io_hook) {
2041 ret = tree->ops->writepage_io_hook(page, cur,
2049 unsigned long max_nr = end_index + 1;
2051 set_range_writeback(tree, cur, cur + iosize - 1);
2052 if (!PageWriteback(page)) {
2053 printk("warning page %lu not writeback, "
2054 "cur %llu end %llu\n", page->index,
2055 (unsigned long long)cur,
2056 (unsigned long long)end);
2059 ret = submit_extent_page(WRITE, tree, page, sector,
2060 iosize, pg_offset, bdev,
2062 end_bio_extent_writepage, 0);
2067 pg_offset += iosize;
2072 /* make sure the mapping tag for page dirty gets cleared */
2073 set_page_writeback(page);
2074 end_page_writeback(page);
2076 if (unlock_start <= page_end)
2077 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2082 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
2083 /* Taken directly from 2.6.23 with a mod for a lockpage hook */
2084 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2089 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2090 * @mapping: address space structure to write
2091 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2092 * @writepage: function called for each page
2093 * @data: data passed to writepage function
2095 * If a page is already under I/O, write_cache_pages() skips it, even
2096 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2097 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2098 * and msync() need to guarantee that all the data which was dirty at the time
2099 * the call was made get new I/O started against them. If wbc->sync_mode is
2100 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2101 * existing IO to complete.
2103 int extent_write_cache_pages(struct extent_io_tree *tree,
2104 struct address_space *mapping,
2105 struct writeback_control *wbc,
2106 writepage_t writepage, void *data)
2108 struct backing_dev_info *bdi = mapping->backing_dev_info;
2111 struct pagevec pvec;
2114 pgoff_t end; /* Inclusive */
2116 int range_whole = 0;
2118 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2119 wbc->encountered_congestion = 1;
2123 pagevec_init(&pvec, 0);
2124 if (wbc->range_cyclic) {
2125 index = mapping->writeback_index; /* Start from prev offset */
2128 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2129 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2130 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2135 while (!done && (index <= end) &&
2136 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2137 PAGECACHE_TAG_DIRTY,
2138 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2142 for (i = 0; i < nr_pages; i++) {
2143 struct page *page = pvec.pages[i];
2146 * At this point we hold neither mapping->tree_lock nor
2147 * lock on the page itself: the page may be truncated or
2148 * invalidated (changing page->mapping to NULL), or even
2149 * swizzled back from swapper_space to tmpfs file
2152 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2153 tree->ops->write_cache_pages_lock_hook(page);
2157 if (unlikely(page->mapping != mapping)) {
2162 if (!wbc->range_cyclic && page->index > end) {
2168 if (wbc->sync_mode != WB_SYNC_NONE)
2169 wait_on_page_writeback(page);
2171 if (PageWriteback(page) ||
2172 !clear_page_dirty_for_io(page)) {
2177 ret = (*writepage)(page, wbc, data);
2179 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2183 if (ret || (--(wbc->nr_to_write) <= 0))
2185 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2186 wbc->encountered_congestion = 1;
2190 pagevec_release(&pvec);
2193 if (!scanned && !done) {
2195 * We hit the last page and there is more work to be done: wrap
2196 * back to the start of the file
2202 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2203 mapping->writeback_index = index;
2204 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2205 if (wbc->range_cont)
2206 wbc->range_start = index << PAGE_CACHE_SHIFT;
2210 EXPORT_SYMBOL(extent_write_cache_pages);
2212 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2213 get_extent_t *get_extent,
2214 struct writeback_control *wbc)
2217 struct address_space *mapping = page->mapping;
2218 struct extent_page_data epd = {
2221 .get_extent = get_extent,
2223 struct writeback_control wbc_writepages = {
2225 .sync_mode = WB_SYNC_NONE,
2226 .older_than_this = NULL,
2228 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2229 .range_end = (loff_t)-1,
2233 ret = __extent_writepage(page, wbc, &epd);
2235 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2236 __extent_writepage, &epd);
2238 submit_one_bio(WRITE, epd.bio, 0);
2242 EXPORT_SYMBOL(extent_write_full_page);
2245 int extent_writepages(struct extent_io_tree *tree,
2246 struct address_space *mapping,
2247 get_extent_t *get_extent,
2248 struct writeback_control *wbc)
2251 struct extent_page_data epd = {
2254 .get_extent = get_extent,
2257 ret = extent_write_cache_pages(tree, mapping, wbc,
2258 __extent_writepage, &epd);
2260 submit_one_bio(WRITE, epd.bio, 0);
2264 EXPORT_SYMBOL(extent_writepages);
2266 int extent_readpages(struct extent_io_tree *tree,
2267 struct address_space *mapping,
2268 struct list_head *pages, unsigned nr_pages,
2269 get_extent_t get_extent)
2271 struct bio *bio = NULL;
2273 struct pagevec pvec;
2275 pagevec_init(&pvec, 0);
2276 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2277 struct page *page = list_entry(pages->prev, struct page, lru);
2279 prefetchw(&page->flags);
2280 list_del(&page->lru);
2282 * what we want to do here is call add_to_page_cache_lru,
2283 * but that isn't exported, so we reproduce it here
2285 if (!add_to_page_cache(page, mapping,
2286 page->index, GFP_KERNEL)) {
2288 /* open coding of lru_cache_add, also not exported */
2289 page_cache_get(page);
2290 if (!pagevec_add(&pvec, page))
2291 __pagevec_lru_add(&pvec);
2292 __extent_read_full_page(tree, page, get_extent,
2295 page_cache_release(page);
2297 if (pagevec_count(&pvec))
2298 __pagevec_lru_add(&pvec);
2299 BUG_ON(!list_empty(pages));
2301 submit_one_bio(READ, bio, 0);
2304 EXPORT_SYMBOL(extent_readpages);
2307 * basic invalidatepage code, this waits on any locked or writeback
2308 * ranges corresponding to the page, and then deletes any extent state
2309 * records from the tree
2311 int extent_invalidatepage(struct extent_io_tree *tree,
2312 struct page *page, unsigned long offset)
2314 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2315 u64 end = start + PAGE_CACHE_SIZE - 1;
2316 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2318 start += (offset + blocksize -1) & ~(blocksize - 1);
2322 lock_extent(tree, start, end, GFP_NOFS);
2323 wait_on_extent_writeback(tree, start, end);
2324 clear_extent_bit(tree, start, end,
2325 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2329 EXPORT_SYMBOL(extent_invalidatepage);
2332 * simple commit_write call, set_range_dirty is used to mark both
2333 * the pages and the extent records as dirty
2335 int extent_commit_write(struct extent_io_tree *tree,
2336 struct inode *inode, struct page *page,
2337 unsigned from, unsigned to)
2339 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2341 set_page_extent_mapped(page);
2342 set_page_dirty(page);
2344 if (pos > inode->i_size) {
2345 i_size_write(inode, pos);
2346 mark_inode_dirty(inode);
2350 EXPORT_SYMBOL(extent_commit_write);
2352 int extent_prepare_write(struct extent_io_tree *tree,
2353 struct inode *inode, struct page *page,
2354 unsigned from, unsigned to, get_extent_t *get_extent)
2356 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2357 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2359 u64 orig_block_start;
2362 struct extent_map *em;
2363 unsigned blocksize = 1 << inode->i_blkbits;
2364 size_t page_offset = 0;
2365 size_t block_off_start;
2366 size_t block_off_end;
2372 set_page_extent_mapped(page);
2374 block_start = (page_start + from) & ~((u64)blocksize - 1);
2375 block_end = (page_start + to - 1) | (blocksize - 1);
2376 orig_block_start = block_start;
2378 lock_extent(tree, page_start, page_end, GFP_NOFS);
2379 while(block_start <= block_end) {
2380 em = get_extent(inode, page, page_offset, block_start,
2381 block_end - block_start + 1, 1);
2382 if (IS_ERR(em) || !em) {
2385 cur_end = min(block_end, extent_map_end(em) - 1);
2386 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2387 block_off_end = block_off_start + blocksize;
2388 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2390 if (!PageUptodate(page) && isnew &&
2391 (block_off_end > to || block_off_start < from)) {
2394 kaddr = kmap_atomic(page, KM_USER0);
2395 if (block_off_end > to)
2396 memset(kaddr + to, 0, block_off_end - to);
2397 if (block_off_start < from)
2398 memset(kaddr + block_off_start, 0,
2399 from - block_off_start);
2400 flush_dcache_page(page);
2401 kunmap_atomic(kaddr, KM_USER0);
2403 if ((em->block_start != EXTENT_MAP_HOLE &&
2404 em->block_start != EXTENT_MAP_INLINE) &&
2405 !isnew && !PageUptodate(page) &&
2406 (block_off_end > to || block_off_start < from) &&
2407 !test_range_bit(tree, block_start, cur_end,
2408 EXTENT_UPTODATE, 1)) {
2410 u64 extent_offset = block_start - em->start;
2412 sector = (em->block_start + extent_offset) >> 9;
2413 iosize = (cur_end - block_start + blocksize) &
2414 ~((u64)blocksize - 1);
2416 * we've already got the extent locked, but we
2417 * need to split the state such that our end_bio
2418 * handler can clear the lock.
2420 set_extent_bit(tree, block_start,
2421 block_start + iosize - 1,
2422 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2423 ret = submit_extent_page(READ, tree, page,
2424 sector, iosize, page_offset, em->bdev,
2426 end_bio_extent_preparewrite, 0);
2428 block_start = block_start + iosize;
2430 set_extent_uptodate(tree, block_start, cur_end,
2432 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2433 block_start = cur_end + 1;
2435 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2436 free_extent_map(em);
2439 wait_extent_bit(tree, orig_block_start,
2440 block_end, EXTENT_LOCKED);
2442 check_page_uptodate(tree, page);
2444 /* FIXME, zero out newly allocated blocks on error */
2447 EXPORT_SYMBOL(extent_prepare_write);
2450 * a helper for releasepage, this tests for areas of the page that
2451 * are locked or under IO and drops the related state bits if it is safe
2454 int try_release_extent_state(struct extent_map_tree *map,
2455 struct extent_io_tree *tree, struct page *page,
2458 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2459 u64 end = start + PAGE_CACHE_SIZE - 1;
2462 if (test_range_bit(tree, start, end,
2463 EXTENT_IOBITS | EXTENT_ORDERED, 0))
2466 if ((mask & GFP_NOFS) == GFP_NOFS)
2468 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2473 EXPORT_SYMBOL(try_release_extent_state);
2476 * a helper for releasepage. As long as there are no locked extents
2477 * in the range corresponding to the page, both state records and extent
2478 * map records are removed
2480 int try_release_extent_mapping(struct extent_map_tree *map,
2481 struct extent_io_tree *tree, struct page *page,
2484 struct extent_map *em;
2485 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2486 u64 end = start + PAGE_CACHE_SIZE - 1;
2488 if ((mask & __GFP_WAIT) &&
2489 page->mapping->host->i_size > 16 * 1024 * 1024) {
2491 while (start <= end) {
2492 len = end - start + 1;
2493 spin_lock(&map->lock);
2494 em = lookup_extent_mapping(map, start, len);
2495 if (!em || IS_ERR(em)) {
2496 spin_unlock(&map->lock);
2499 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2500 em->start != start) {
2501 spin_unlock(&map->lock);
2502 free_extent_map(em);
2505 if (!test_range_bit(tree, em->start,
2506 extent_map_end(em) - 1,
2507 EXTENT_LOCKED, 0)) {
2508 remove_extent_mapping(map, em);
2509 /* once for the rb tree */
2510 free_extent_map(em);
2512 start = extent_map_end(em);
2513 spin_unlock(&map->lock);
2516 free_extent_map(em);
2519 return try_release_extent_state(map, tree, page, mask);
2521 EXPORT_SYMBOL(try_release_extent_mapping);
2523 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2524 get_extent_t *get_extent)
2526 struct inode *inode = mapping->host;
2527 u64 start = iblock << inode->i_blkbits;
2528 sector_t sector = 0;
2529 struct extent_map *em;
2531 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2532 if (!em || IS_ERR(em))
2535 if (em->block_start == EXTENT_MAP_INLINE ||
2536 em->block_start == EXTENT_MAP_HOLE)
2539 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2541 free_extent_map(em);
2545 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2549 struct address_space *mapping;
2552 return eb->first_page;
2553 i += eb->start >> PAGE_CACHE_SHIFT;
2554 mapping = eb->first_page->mapping;
2559 * extent_buffer_page is only called after pinning the page
2560 * by increasing the reference count. So we know the page must
2561 * be in the radix tree.
2563 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2566 read_lock_irq(&mapping->tree_lock);
2568 p = radix_tree_lookup(&mapping->page_tree, i);
2570 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2573 read_unlock_irq(&mapping->tree_lock);
2578 static inline unsigned long num_extent_pages(u64 start, u64 len)
2580 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2581 (start >> PAGE_CACHE_SHIFT);
2584 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2589 struct extent_buffer *eb = NULL;
2591 unsigned long flags;
2594 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2597 mutex_init(&eb->mutex);
2599 spin_lock_irqsave(&leak_lock, flags);
2600 list_add(&eb->leak_list, &buffers);
2601 spin_unlock_irqrestore(&leak_lock, flags);
2603 atomic_set(&eb->refs, 1);
2608 static void __free_extent_buffer(struct extent_buffer *eb)
2611 unsigned long flags;
2612 spin_lock_irqsave(&leak_lock, flags);
2613 list_del(&eb->leak_list);
2614 spin_unlock_irqrestore(&leak_lock, flags);
2616 kmem_cache_free(extent_buffer_cache, eb);
2619 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2620 u64 start, unsigned long len,
2624 unsigned long num_pages = num_extent_pages(start, len);
2626 unsigned long index = start >> PAGE_CACHE_SHIFT;
2627 struct extent_buffer *eb;
2628 struct extent_buffer *exists = NULL;
2630 struct address_space *mapping = tree->mapping;
2633 spin_lock(&tree->buffer_lock);
2634 eb = buffer_search(tree, start);
2636 atomic_inc(&eb->refs);
2637 spin_unlock(&tree->buffer_lock);
2638 mark_page_accessed(eb->first_page);
2641 spin_unlock(&tree->buffer_lock);
2643 eb = __alloc_extent_buffer(tree, start, len, mask);
2648 eb->first_page = page0;
2651 page_cache_get(page0);
2652 mark_page_accessed(page0);
2653 set_page_extent_mapped(page0);
2654 set_page_extent_head(page0, len);
2655 uptodate = PageUptodate(page0);
2659 for (; i < num_pages; i++, index++) {
2660 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2665 set_page_extent_mapped(p);
2666 mark_page_accessed(p);
2669 set_page_extent_head(p, len);
2671 set_page_private(p, EXTENT_PAGE_PRIVATE);
2673 if (!PageUptodate(p))
2678 eb->flags |= EXTENT_UPTODATE;
2679 eb->flags |= EXTENT_BUFFER_FILLED;
2681 spin_lock(&tree->buffer_lock);
2682 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2684 /* add one reference for the caller */
2685 atomic_inc(&exists->refs);
2686 spin_unlock(&tree->buffer_lock);
2689 spin_unlock(&tree->buffer_lock);
2691 /* add one reference for the tree */
2692 atomic_inc(&eb->refs);
2696 if (!atomic_dec_and_test(&eb->refs))
2698 for (index = 1; index < i; index++)
2699 page_cache_release(extent_buffer_page(eb, index));
2700 page_cache_release(extent_buffer_page(eb, 0));
2701 __free_extent_buffer(eb);
2704 EXPORT_SYMBOL(alloc_extent_buffer);
2706 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2707 u64 start, unsigned long len,
2710 struct extent_buffer *eb;
2712 spin_lock(&tree->buffer_lock);
2713 eb = buffer_search(tree, start);
2715 atomic_inc(&eb->refs);
2716 spin_unlock(&tree->buffer_lock);
2719 mark_page_accessed(eb->first_page);
2723 EXPORT_SYMBOL(find_extent_buffer);
2725 void free_extent_buffer(struct extent_buffer *eb)
2730 if (!atomic_dec_and_test(&eb->refs))
2735 EXPORT_SYMBOL(free_extent_buffer);
2737 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2738 struct extent_buffer *eb)
2742 unsigned long num_pages;
2745 u64 start = eb->start;
2746 u64 end = start + eb->len - 1;
2748 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2749 num_pages = num_extent_pages(eb->start, eb->len);
2751 for (i = 0; i < num_pages; i++) {
2752 page = extent_buffer_page(eb, i);
2755 set_page_extent_head(page, eb->len);
2757 set_page_private(page, EXTENT_PAGE_PRIVATE);
2760 * if we're on the last page or the first page and the
2761 * block isn't aligned on a page boundary, do extra checks
2762 * to make sure we don't clean page that is partially dirty
2764 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2765 ((i == num_pages - 1) &&
2766 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2767 start = (u64)page->index << PAGE_CACHE_SHIFT;
2768 end = start + PAGE_CACHE_SIZE - 1;
2769 if (test_range_bit(tree, start, end,
2775 clear_page_dirty_for_io(page);
2776 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2777 spin_lock_irq(&page->mapping->tree_lock);
2779 read_lock_irq(&page->mapping->tree_lock);
2781 if (!PageDirty(page)) {
2782 radix_tree_tag_clear(&page->mapping->page_tree,
2784 PAGECACHE_TAG_DIRTY);
2786 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2787 spin_unlock_irq(&page->mapping->tree_lock);
2789 read_unlock_irq(&page->mapping->tree_lock);
2795 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2797 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2798 struct extent_buffer *eb)
2800 return wait_on_extent_writeback(tree, eb->start,
2801 eb->start + eb->len - 1);
2803 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2805 int set_extent_buffer_dirty(struct extent_io_tree *tree,
2806 struct extent_buffer *eb)
2809 unsigned long num_pages;
2811 num_pages = num_extent_pages(eb->start, eb->len);
2812 for (i = 0; i < num_pages; i++) {
2813 struct page *page = extent_buffer_page(eb, i);
2814 /* writepage may need to do something special for the
2815 * first page, we have to make sure page->private is
2816 * properly set. releasepage may drop page->private
2817 * on us if the page isn't already dirty.
2821 set_page_extent_head(page, eb->len);
2822 } else if (PagePrivate(page) &&
2823 page->private != EXTENT_PAGE_PRIVATE) {
2824 set_page_extent_mapped(page);
2826 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2827 set_extent_dirty(tree, page_offset(page),
2828 page_offset(page) + PAGE_CACHE_SIZE -1,
2834 EXPORT_SYMBOL(set_extent_buffer_dirty);
2836 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2837 struct extent_buffer *eb)
2841 unsigned long num_pages;
2843 num_pages = num_extent_pages(eb->start, eb->len);
2844 eb->flags &= ~EXTENT_UPTODATE;
2846 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2848 for (i = 0; i < num_pages; i++) {
2849 page = extent_buffer_page(eb, i);
2851 ClearPageUptodate(page);
2856 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2857 struct extent_buffer *eb)
2861 unsigned long num_pages;
2863 num_pages = num_extent_pages(eb->start, eb->len);
2865 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2867 for (i = 0; i < num_pages; i++) {
2868 page = extent_buffer_page(eb, i);
2869 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2870 ((i == num_pages - 1) &&
2871 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2872 check_page_uptodate(tree, page);
2875 SetPageUptodate(page);
2879 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2881 int extent_range_uptodate(struct extent_io_tree *tree,
2886 int pg_uptodate = 1;
2888 unsigned long index;
2890 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
2893 while(start <= end) {
2894 index = start >> PAGE_CACHE_SHIFT;
2895 page = find_get_page(tree->mapping, index);
2896 uptodate = PageUptodate(page);
2897 page_cache_release(page);
2902 start += PAGE_CACHE_SIZE;
2907 int extent_buffer_uptodate(struct extent_io_tree *tree,
2908 struct extent_buffer *eb)
2911 unsigned long num_pages;
2914 int pg_uptodate = 1;
2916 if (eb->flags & EXTENT_UPTODATE)
2919 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2920 EXTENT_UPTODATE, 1);
2924 num_pages = num_extent_pages(eb->start, eb->len);
2925 for (i = 0; i < num_pages; i++) {
2926 page = extent_buffer_page(eb, i);
2927 if (!PageUptodate(page)) {
2934 EXPORT_SYMBOL(extent_buffer_uptodate);
2936 int read_extent_buffer_pages(struct extent_io_tree *tree,
2937 struct extent_buffer *eb,
2938 u64 start, int wait,
2939 get_extent_t *get_extent, int mirror_num)
2942 unsigned long start_i;
2946 int locked_pages = 0;
2947 int all_uptodate = 1;
2948 int inc_all_pages = 0;
2949 unsigned long num_pages;
2950 struct bio *bio = NULL;
2952 if (eb->flags & EXTENT_UPTODATE)
2955 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2956 EXTENT_UPTODATE, 1)) {
2961 WARN_ON(start < eb->start);
2962 start_i = (start >> PAGE_CACHE_SHIFT) -
2963 (eb->start >> PAGE_CACHE_SHIFT);
2968 num_pages = num_extent_pages(eb->start, eb->len);
2969 for (i = start_i; i < num_pages; i++) {
2970 page = extent_buffer_page(eb, i);
2972 if (!trylock_page(page))
2978 if (!PageUptodate(page)) {
2984 eb->flags |= EXTENT_UPTODATE;
2986 printk("all up to date but ret is %d\n", ret);
2991 for (i = start_i; i < num_pages; i++) {
2992 page = extent_buffer_page(eb, i);
2994 page_cache_get(page);
2995 if (!PageUptodate(page)) {
2998 ClearPageError(page);
2999 err = __extent_read_full_page(tree, page,
3004 printk("err %d from __extent_read_full_page\n", ret);
3012 submit_one_bio(READ, bio, mirror_num);
3016 printk("ret %d wait %d returning\n", ret, wait);
3019 for (i = start_i; i < num_pages; i++) {
3020 page = extent_buffer_page(eb, i);
3021 wait_on_page_locked(page);
3022 if (!PageUptodate(page)) {
3023 printk("page not uptodate after wait_on_page_locked\n");
3028 eb->flags |= EXTENT_UPTODATE;
3033 while(locked_pages > 0) {
3034 page = extent_buffer_page(eb, i);
3041 EXPORT_SYMBOL(read_extent_buffer_pages);
3043 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3044 unsigned long start,
3051 char *dst = (char *)dstv;
3052 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3053 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3055 WARN_ON(start > eb->len);
3056 WARN_ON(start + len > eb->start + eb->len);
3058 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3061 page = extent_buffer_page(eb, i);
3063 cur = min(len, (PAGE_CACHE_SIZE - offset));
3064 kaddr = kmap_atomic(page, KM_USER1);
3065 memcpy(dst, kaddr + offset, cur);
3066 kunmap_atomic(kaddr, KM_USER1);
3074 EXPORT_SYMBOL(read_extent_buffer);
3076 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3077 unsigned long min_len, char **token, char **map,
3078 unsigned long *map_start,
3079 unsigned long *map_len, int km)
3081 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3084 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3085 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3086 unsigned long end_i = (start_offset + start + min_len - 1) >>
3093 offset = start_offset;
3097 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3099 if (start + min_len > eb->len) {
3100 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3104 p = extent_buffer_page(eb, i);
3105 kaddr = kmap_atomic(p, km);
3107 *map = kaddr + offset;
3108 *map_len = PAGE_CACHE_SIZE - offset;
3111 EXPORT_SYMBOL(map_private_extent_buffer);
3113 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3114 unsigned long min_len,
3115 char **token, char **map,
3116 unsigned long *map_start,
3117 unsigned long *map_len, int km)
3121 if (eb->map_token) {
3122 unmap_extent_buffer(eb, eb->map_token, km);
3123 eb->map_token = NULL;
3126 err = map_private_extent_buffer(eb, start, min_len, token, map,
3127 map_start, map_len, km);
3129 eb->map_token = *token;
3131 eb->map_start = *map_start;
3132 eb->map_len = *map_len;
3136 EXPORT_SYMBOL(map_extent_buffer);
3138 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3140 kunmap_atomic(token, km);
3142 EXPORT_SYMBOL(unmap_extent_buffer);
3144 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3145 unsigned long start,
3152 char *ptr = (char *)ptrv;
3153 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3154 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3157 WARN_ON(start > eb->len);
3158 WARN_ON(start + len > eb->start + eb->len);
3160 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3163 page = extent_buffer_page(eb, i);
3165 cur = min(len, (PAGE_CACHE_SIZE - offset));
3167 kaddr = kmap_atomic(page, KM_USER0);
3168 ret = memcmp(ptr, kaddr + offset, cur);
3169 kunmap_atomic(kaddr, KM_USER0);
3180 EXPORT_SYMBOL(memcmp_extent_buffer);
3182 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3183 unsigned long start, unsigned long len)
3189 char *src = (char *)srcv;
3190 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3191 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3193 WARN_ON(start > eb->len);
3194 WARN_ON(start + len > eb->start + eb->len);
3196 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3199 page = extent_buffer_page(eb, i);
3200 WARN_ON(!PageUptodate(page));
3202 cur = min(len, PAGE_CACHE_SIZE - offset);
3203 kaddr = kmap_atomic(page, KM_USER1);
3204 memcpy(kaddr + offset, src, cur);
3205 kunmap_atomic(kaddr, KM_USER1);
3213 EXPORT_SYMBOL(write_extent_buffer);
3215 void memset_extent_buffer(struct extent_buffer *eb, char c,
3216 unsigned long start, unsigned long len)
3222 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3223 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3225 WARN_ON(start > eb->len);
3226 WARN_ON(start + len > eb->start + eb->len);
3228 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3231 page = extent_buffer_page(eb, i);
3232 WARN_ON(!PageUptodate(page));
3234 cur = min(len, PAGE_CACHE_SIZE - offset);
3235 kaddr = kmap_atomic(page, KM_USER0);
3236 memset(kaddr + offset, c, cur);
3237 kunmap_atomic(kaddr, KM_USER0);
3244 EXPORT_SYMBOL(memset_extent_buffer);
3246 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3247 unsigned long dst_offset, unsigned long src_offset,
3250 u64 dst_len = dst->len;
3255 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3256 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3258 WARN_ON(src->len != dst_len);
3260 offset = (start_offset + dst_offset) &
3261 ((unsigned long)PAGE_CACHE_SIZE - 1);
3264 page = extent_buffer_page(dst, i);
3265 WARN_ON(!PageUptodate(page));
3267 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3269 kaddr = kmap_atomic(page, KM_USER0);
3270 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3271 kunmap_atomic(kaddr, KM_USER0);
3279 EXPORT_SYMBOL(copy_extent_buffer);
3281 static void move_pages(struct page *dst_page, struct page *src_page,
3282 unsigned long dst_off, unsigned long src_off,
3285 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3286 if (dst_page == src_page) {
3287 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3289 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3290 char *p = dst_kaddr + dst_off + len;
3291 char *s = src_kaddr + src_off + len;
3296 kunmap_atomic(src_kaddr, KM_USER1);
3298 kunmap_atomic(dst_kaddr, KM_USER0);
3301 static void copy_pages(struct page *dst_page, struct page *src_page,
3302 unsigned long dst_off, unsigned long src_off,
3305 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3308 if (dst_page != src_page)
3309 src_kaddr = kmap_atomic(src_page, KM_USER1);
3311 src_kaddr = dst_kaddr;
3313 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3314 kunmap_atomic(dst_kaddr, KM_USER0);
3315 if (dst_page != src_page)
3316 kunmap_atomic(src_kaddr, KM_USER1);
3319 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3320 unsigned long src_offset, unsigned long len)
3323 size_t dst_off_in_page;
3324 size_t src_off_in_page;
3325 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3326 unsigned long dst_i;
3327 unsigned long src_i;
3329 if (src_offset + len > dst->len) {
3330 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3331 src_offset, len, dst->len);
3334 if (dst_offset + len > dst->len) {
3335 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3336 dst_offset, len, dst->len);
3341 dst_off_in_page = (start_offset + dst_offset) &
3342 ((unsigned long)PAGE_CACHE_SIZE - 1);
3343 src_off_in_page = (start_offset + src_offset) &
3344 ((unsigned long)PAGE_CACHE_SIZE - 1);
3346 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3347 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3349 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3351 cur = min_t(unsigned long, cur,
3352 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3354 copy_pages(extent_buffer_page(dst, dst_i),
3355 extent_buffer_page(dst, src_i),
3356 dst_off_in_page, src_off_in_page, cur);
3363 EXPORT_SYMBOL(memcpy_extent_buffer);
3365 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3366 unsigned long src_offset, unsigned long len)
3369 size_t dst_off_in_page;
3370 size_t src_off_in_page;
3371 unsigned long dst_end = dst_offset + len - 1;
3372 unsigned long src_end = src_offset + len - 1;
3373 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3374 unsigned long dst_i;
3375 unsigned long src_i;
3377 if (src_offset + len > dst->len) {
3378 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3379 src_offset, len, dst->len);
3382 if (dst_offset + len > dst->len) {
3383 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3384 dst_offset, len, dst->len);
3387 if (dst_offset < src_offset) {
3388 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3392 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3393 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3395 dst_off_in_page = (start_offset + dst_end) &
3396 ((unsigned long)PAGE_CACHE_SIZE - 1);
3397 src_off_in_page = (start_offset + src_end) &
3398 ((unsigned long)PAGE_CACHE_SIZE - 1);
3400 cur = min_t(unsigned long, len, src_off_in_page + 1);
3401 cur = min(cur, dst_off_in_page + 1);
3402 move_pages(extent_buffer_page(dst, dst_i),
3403 extent_buffer_page(dst, src_i),
3404 dst_off_in_page - cur + 1,
3405 src_off_in_page - cur + 1, cur);
3412 EXPORT_SYMBOL(memmove_extent_buffer);
3414 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3416 u64 start = page_offset(page);
3417 struct extent_buffer *eb;
3420 unsigned long num_pages;
3422 spin_lock(&tree->buffer_lock);
3423 eb = buffer_search(tree, start);
3427 if (atomic_read(&eb->refs) > 1) {
3431 /* at this point we can safely release the extent buffer */
3432 num_pages = num_extent_pages(eb->start, eb->len);
3433 for (i = 0; i < num_pages; i++)
3434 page_cache_release(extent_buffer_page(eb, i));
3435 rb_erase(&eb->rb_node, &tree->buffer);
3436 __free_extent_buffer(eb);
3438 spin_unlock(&tree->buffer_lock);
3441 EXPORT_SYMBOL(try_release_extent_buffer);