1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
18 /* temporary define until extent_map moves out of btrfs */
19 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
20 unsigned long extra_flags,
21 void (*ctor)(void *, struct kmem_cache *,
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29 static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
36 struct rb_node rb_node;
39 struct extent_page_data {
41 struct extent_io_tree *tree;
42 get_extent_t *get_extent;
45 int __init extent_io_init(void)
47 extent_state_cache = btrfs_cache_create("extent_state",
48 sizeof(struct extent_state), 0,
50 if (!extent_state_cache)
53 extent_buffer_cache = btrfs_cache_create("extent_buffers",
54 sizeof(struct extent_buffer), 0,
56 if (!extent_buffer_cache)
57 goto free_state_cache;
61 kmem_cache_destroy(extent_state_cache);
65 void extent_io_exit(void)
67 struct extent_state *state;
68 struct extent_buffer *eb;
70 while (!list_empty(&states)) {
71 state = list_entry(states.next, struct extent_state, leak_list);
72 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
73 list_del(&state->leak_list);
74 kmem_cache_free(extent_state_cache, state);
78 while (!list_empty(&buffers)) {
79 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
80 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
81 list_del(&eb->leak_list);
82 kmem_cache_free(extent_buffer_cache, eb);
84 if (extent_state_cache)
85 kmem_cache_destroy(extent_state_cache);
86 if (extent_buffer_cache)
87 kmem_cache_destroy(extent_buffer_cache);
90 void extent_io_tree_init(struct extent_io_tree *tree,
91 struct address_space *mapping, gfp_t mask)
93 tree->state.rb_node = NULL;
95 tree->dirty_bytes = 0;
96 spin_lock_init(&tree->lock);
97 spin_lock_init(&tree->lru_lock);
98 tree->mapping = mapping;
99 INIT_LIST_HEAD(&tree->buffer_lru);
103 EXPORT_SYMBOL(extent_io_tree_init);
105 void extent_io_tree_empty_lru(struct extent_io_tree *tree)
107 struct extent_buffer *eb;
108 while(!list_empty(&tree->buffer_lru)) {
109 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
111 list_del_init(&eb->lru);
112 free_extent_buffer(eb);
115 EXPORT_SYMBOL(extent_io_tree_empty_lru);
117 struct extent_state *alloc_extent_state(gfp_t mask)
119 struct extent_state *state;
122 state = kmem_cache_alloc(extent_state_cache, mask);
128 spin_lock_irqsave(&leak_lock, flags);
129 list_add(&state->leak_list, &states);
130 spin_unlock_irqrestore(&leak_lock, flags);
132 atomic_set(&state->refs, 1);
133 init_waitqueue_head(&state->wq);
136 EXPORT_SYMBOL(alloc_extent_state);
138 void free_extent_state(struct extent_state *state)
142 if (atomic_dec_and_test(&state->refs)) {
144 WARN_ON(state->tree);
145 spin_lock_irqsave(&leak_lock, flags);
146 list_del(&state->leak_list);
147 spin_unlock_irqrestore(&leak_lock, flags);
148 kmem_cache_free(extent_state_cache, state);
151 EXPORT_SYMBOL(free_extent_state);
153 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
154 struct rb_node *node)
156 struct rb_node ** p = &root->rb_node;
157 struct rb_node * parent = NULL;
158 struct tree_entry *entry;
162 entry = rb_entry(parent, struct tree_entry, rb_node);
164 if (offset < entry->start)
166 else if (offset > entry->end)
172 entry = rb_entry(node, struct tree_entry, rb_node);
173 rb_link_node(node, parent, p);
174 rb_insert_color(node, root);
178 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
179 struct rb_node **prev_ret,
180 struct rb_node **next_ret)
182 struct rb_root *root = &tree->state;
183 struct rb_node * n = root->rb_node;
184 struct rb_node *prev = NULL;
185 struct rb_node *orig_prev = NULL;
186 struct tree_entry *entry;
187 struct tree_entry *prev_entry = NULL;
190 struct extent_state *state;
192 if (state->start <= offset && offset <= state->end)
193 return &tree->last->rb_node;
196 entry = rb_entry(n, struct tree_entry, rb_node);
200 if (offset < entry->start)
202 else if (offset > entry->end)
205 tree->last = rb_entry(n, struct extent_state, rb_node);
212 while(prev && offset > prev_entry->end) {
213 prev = rb_next(prev);
214 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
221 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
222 while(prev && offset < prev_entry->start) {
223 prev = rb_prev(prev);
224 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
231 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
234 struct rb_node *prev = NULL;
237 ret = __etree_search(tree, offset, &prev, NULL);
240 tree->last = rb_entry(prev, struct extent_state,
249 * utility function to look for merge candidates inside a given range.
250 * Any extents with matching state are merged together into a single
251 * extent in the tree. Extents with EXTENT_IO in their state field
252 * are not merged because the end_io handlers need to be able to do
253 * operations on them without sleeping (or doing allocations/splits).
255 * This should be called with the tree lock held.
257 static int merge_state(struct extent_io_tree *tree,
258 struct extent_state *state)
260 struct extent_state *other;
261 struct rb_node *other_node;
263 if (state->state & EXTENT_IOBITS)
266 other_node = rb_prev(&state->rb_node);
268 other = rb_entry(other_node, struct extent_state, rb_node);
269 if (other->end == state->start - 1 &&
270 other->state == state->state) {
271 state->start = other->start;
273 if (tree->last == other)
275 rb_erase(&other->rb_node, &tree->state);
276 free_extent_state(other);
279 other_node = rb_next(&state->rb_node);
281 other = rb_entry(other_node, struct extent_state, rb_node);
282 if (other->start == state->end + 1 &&
283 other->state == state->state) {
284 other->start = state->start;
286 if (tree->last == state)
288 rb_erase(&state->rb_node, &tree->state);
289 free_extent_state(state);
295 static void set_state_cb(struct extent_io_tree *tree,
296 struct extent_state *state,
299 if (tree->ops && tree->ops->set_bit_hook) {
300 tree->ops->set_bit_hook(tree->mapping->host, state->start,
301 state->end, state->state, bits);
305 static void clear_state_cb(struct extent_io_tree *tree,
306 struct extent_state *state,
309 if (tree->ops && tree->ops->set_bit_hook) {
310 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
311 state->end, state->state, bits);
316 * insert an extent_state struct into the tree. 'bits' are set on the
317 * struct before it is inserted.
319 * This may return -EEXIST if the extent is already there, in which case the
320 * state struct is freed.
322 * The tree lock is not taken internally. This is a utility function and
323 * probably isn't what you want to call (see set/clear_extent_bit).
325 static int insert_state(struct extent_io_tree *tree,
326 struct extent_state *state, u64 start, u64 end,
329 struct rb_node *node;
332 printk("end < start %Lu %Lu\n", end, start);
335 if (bits & EXTENT_DIRTY)
336 tree->dirty_bytes += end - start + 1;
337 set_state_cb(tree, state, bits);
338 state->state |= bits;
339 state->start = start;
341 node = tree_insert(&tree->state, end, &state->rb_node);
343 struct extent_state *found;
344 found = rb_entry(node, struct extent_state, rb_node);
345 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
346 free_extent_state(state);
351 merge_state(tree, state);
356 * split a given extent state struct in two, inserting the preallocated
357 * struct 'prealloc' as the newly created second half. 'split' indicates an
358 * offset inside 'orig' where it should be split.
361 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
362 * are two extent state structs in the tree:
363 * prealloc: [orig->start, split - 1]
364 * orig: [ split, orig->end ]
366 * The tree locks are not taken by this function. They need to be held
369 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
370 struct extent_state *prealloc, u64 split)
372 struct rb_node *node;
373 prealloc->start = orig->start;
374 prealloc->end = split - 1;
375 prealloc->state = orig->state;
378 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
380 struct extent_state *found;
381 found = rb_entry(node, struct extent_state, rb_node);
382 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
383 free_extent_state(prealloc);
386 prealloc->tree = tree;
391 * utility function to clear some bits in an extent state struct.
392 * it will optionally wake up any one waiting on this state (wake == 1), or
393 * forcibly remove the state from the tree (delete == 1).
395 * If no bits are set on the state struct after clearing things, the
396 * struct is freed and removed from the tree
398 static int clear_state_bit(struct extent_io_tree *tree,
399 struct extent_state *state, int bits, int wake,
402 int ret = state->state & bits;
404 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
405 u64 range = state->end - state->start + 1;
406 WARN_ON(range > tree->dirty_bytes);
407 tree->dirty_bytes -= range;
409 clear_state_cb(tree, state, bits);
410 state->state &= ~bits;
413 if (delete || state->state == 0) {
415 clear_state_cb(tree, state, state->state);
416 if (tree->last == state) {
417 tree->last = extent_state_next(state);
419 rb_erase(&state->rb_node, &tree->state);
421 free_extent_state(state);
426 merge_state(tree, state);
432 * clear some bits on a range in the tree. This may require splitting
433 * or inserting elements in the tree, so the gfp mask is used to
434 * indicate which allocations or sleeping are allowed.
436 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
437 * the given range from the tree regardless of state (ie for truncate).
439 * the range [start, end] is inclusive.
441 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
442 * bits were already set, or zero if none of the bits were already set.
444 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
445 int bits, int wake, int delete, gfp_t mask)
447 struct extent_state *state;
448 struct extent_state *prealloc = NULL;
449 struct rb_node *node;
455 if (!prealloc && (mask & __GFP_WAIT)) {
456 prealloc = alloc_extent_state(mask);
461 spin_lock_irqsave(&tree->lock, flags);
463 * this search will find the extents that end after
466 node = tree_search(tree, start);
469 state = rb_entry(node, struct extent_state, rb_node);
470 if (state->start > end)
472 WARN_ON(state->end < start);
475 * | ---- desired range ---- |
477 * | ------------- state -------------- |
479 * We need to split the extent we found, and may flip
480 * bits on second half.
482 * If the extent we found extends past our range, we
483 * just split and search again. It'll get split again
484 * the next time though.
486 * If the extent we found is inside our range, we clear
487 * the desired bit on it.
490 if (state->start < start) {
492 prealloc = alloc_extent_state(GFP_ATOMIC);
493 err = split_state(tree, state, prealloc, start);
494 BUG_ON(err == -EEXIST);
498 if (state->end <= end) {
499 start = state->end + 1;
500 set |= clear_state_bit(tree, state, bits,
503 start = state->start;
508 * | ---- desired range ---- |
510 * We need to split the extent, and clear the bit
513 if (state->start <= end && state->end > end) {
515 prealloc = alloc_extent_state(GFP_ATOMIC);
516 err = split_state(tree, state, prealloc, end + 1);
517 BUG_ON(err == -EEXIST);
521 set |= clear_state_bit(tree, prealloc, bits,
527 start = state->end + 1;
528 set |= clear_state_bit(tree, state, bits, wake, delete);
532 spin_unlock_irqrestore(&tree->lock, flags);
534 free_extent_state(prealloc);
541 spin_unlock_irqrestore(&tree->lock, flags);
542 if (mask & __GFP_WAIT)
546 EXPORT_SYMBOL(clear_extent_bit);
548 static int wait_on_state(struct extent_io_tree *tree,
549 struct extent_state *state)
552 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
553 spin_unlock_irq(&tree->lock);
555 spin_lock_irq(&tree->lock);
556 finish_wait(&state->wq, &wait);
561 * waits for one or more bits to clear on a range in the state tree.
562 * The range [start, end] is inclusive.
563 * The tree lock is taken by this function
565 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
567 struct extent_state *state;
568 struct rb_node *node;
570 spin_lock_irq(&tree->lock);
574 * this search will find all the extents that end after
577 node = tree_search(tree, start);
581 state = rb_entry(node, struct extent_state, rb_node);
583 if (state->start > end)
586 if (state->state & bits) {
587 start = state->start;
588 atomic_inc(&state->refs);
589 wait_on_state(tree, state);
590 free_extent_state(state);
593 start = state->end + 1;
598 if (need_resched()) {
599 spin_unlock_irq(&tree->lock);
601 spin_lock_irq(&tree->lock);
605 spin_unlock_irq(&tree->lock);
608 EXPORT_SYMBOL(wait_extent_bit);
610 static void set_state_bits(struct extent_io_tree *tree,
611 struct extent_state *state,
614 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
615 u64 range = state->end - state->start + 1;
616 tree->dirty_bytes += range;
618 set_state_cb(tree, state, bits);
619 state->state |= bits;
623 * set some bits on a range in the tree. This may require allocations
624 * or sleeping, so the gfp mask is used to indicate what is allowed.
626 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
627 * range already has the desired bits set. The start of the existing
628 * range is returned in failed_start in this case.
630 * [start, end] is inclusive
631 * This takes the tree lock.
633 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
634 int exclusive, u64 *failed_start, gfp_t mask)
636 struct extent_state *state;
637 struct extent_state *prealloc = NULL;
638 struct rb_node *node;
645 if (!prealloc && (mask & __GFP_WAIT)) {
646 prealloc = alloc_extent_state(mask);
651 spin_lock_irqsave(&tree->lock, flags);
653 * this search will find all the extents that end after
656 node = tree_search(tree, start);
658 err = insert_state(tree, prealloc, start, end, bits);
660 BUG_ON(err == -EEXIST);
664 state = rb_entry(node, struct extent_state, rb_node);
665 last_start = state->start;
666 last_end = state->end;
669 * | ---- desired range ---- |
672 * Just lock what we found and keep going
674 if (state->start == start && state->end <= end) {
675 set = state->state & bits;
676 if (set && exclusive) {
677 *failed_start = state->start;
681 set_state_bits(tree, state, bits);
682 start = state->end + 1;
683 merge_state(tree, state);
688 * | ---- desired range ---- |
691 * | ------------- state -------------- |
693 * We need to split the extent we found, and may flip bits on
696 * If the extent we found extends past our
697 * range, we just split and search again. It'll get split
698 * again the next time though.
700 * If the extent we found is inside our range, we set the
703 if (state->start < start) {
704 set = state->state & bits;
705 if (exclusive && set) {
706 *failed_start = start;
710 err = split_state(tree, state, prealloc, start);
711 BUG_ON(err == -EEXIST);
715 if (state->end <= end) {
716 set_state_bits(tree, state, bits);
717 start = state->end + 1;
718 merge_state(tree, state);
720 start = state->start;
725 * | ---- desired range ---- |
726 * | state | or | state |
728 * There's a hole, we need to insert something in it and
729 * ignore the extent we found.
731 if (state->start > start) {
733 if (end < last_start)
736 this_end = last_start -1;
737 err = insert_state(tree, prealloc, start, this_end,
740 BUG_ON(err == -EEXIST);
743 start = this_end + 1;
747 * | ---- desired range ---- |
749 * We need to split the extent, and set the bit
752 if (state->start <= end && state->end > end) {
753 set = state->state & bits;
754 if (exclusive && set) {
755 *failed_start = start;
759 err = split_state(tree, state, prealloc, end + 1);
760 BUG_ON(err == -EEXIST);
762 set_state_bits(tree, prealloc, bits);
763 merge_state(tree, prealloc);
771 spin_unlock_irqrestore(&tree->lock, flags);
773 free_extent_state(prealloc);
780 spin_unlock_irqrestore(&tree->lock, flags);
781 if (mask & __GFP_WAIT)
785 EXPORT_SYMBOL(set_extent_bit);
787 /* wrappers around set/clear extent bit */
788 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
791 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
794 EXPORT_SYMBOL(set_extent_dirty);
796 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
797 int bits, gfp_t mask)
799 return set_extent_bit(tree, start, end, bits, 0, NULL,
802 EXPORT_SYMBOL(set_extent_bits);
804 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
805 int bits, gfp_t mask)
807 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
809 EXPORT_SYMBOL(clear_extent_bits);
811 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
814 return set_extent_bit(tree, start, end,
815 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
818 EXPORT_SYMBOL(set_extent_delalloc);
820 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
823 return clear_extent_bit(tree, start, end,
824 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
826 EXPORT_SYMBOL(clear_extent_dirty);
828 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
831 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
834 EXPORT_SYMBOL(set_extent_new);
836 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
839 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
841 EXPORT_SYMBOL(clear_extent_new);
843 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
846 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
849 EXPORT_SYMBOL(set_extent_uptodate);
851 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
854 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
856 EXPORT_SYMBOL(clear_extent_uptodate);
858 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
861 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
864 EXPORT_SYMBOL(set_extent_writeback);
866 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
869 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
871 EXPORT_SYMBOL(clear_extent_writeback);
873 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
875 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
877 EXPORT_SYMBOL(wait_on_extent_writeback);
879 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
884 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
885 &failed_start, mask);
886 if (err == -EEXIST && (mask & __GFP_WAIT)) {
887 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
888 start = failed_start;
892 WARN_ON(start > end);
896 EXPORT_SYMBOL(lock_extent);
898 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
901 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
903 EXPORT_SYMBOL(unlock_extent);
906 * helper function to set pages and extents in the tree dirty
908 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
910 unsigned long index = start >> PAGE_CACHE_SHIFT;
911 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
914 while (index <= end_index) {
915 page = find_get_page(tree->mapping, index);
917 __set_page_dirty_nobuffers(page);
918 page_cache_release(page);
921 set_extent_dirty(tree, start, end, GFP_NOFS);
924 EXPORT_SYMBOL(set_range_dirty);
927 * helper function to set both pages and extents in the tree writeback
929 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
931 unsigned long index = start >> PAGE_CACHE_SHIFT;
932 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
935 while (index <= end_index) {
936 page = find_get_page(tree->mapping, index);
938 set_page_writeback(page);
939 page_cache_release(page);
942 set_extent_writeback(tree, start, end, GFP_NOFS);
945 EXPORT_SYMBOL(set_range_writeback);
947 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
948 u64 *start_ret, u64 *end_ret, int bits)
950 struct rb_node *node;
951 struct extent_state *state;
954 spin_lock_irq(&tree->lock);
956 * this search will find all the extents that end after
959 node = tree_search(tree, start);
965 state = rb_entry(node, struct extent_state, rb_node);
966 if (state->end >= start && (state->state & bits)) {
967 *start_ret = state->start;
968 *end_ret = state->end;
972 node = rb_next(node);
977 spin_unlock_irq(&tree->lock);
980 EXPORT_SYMBOL(find_first_extent_bit);
982 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
985 struct rb_node *node;
986 struct extent_state *state;
989 * this search will find all the extents that end after
992 node = tree_search(tree, start);
998 state = rb_entry(node, struct extent_state, rb_node);
999 if (state->end >= start && (state->state & bits)) {
1002 node = rb_next(node);
1009 EXPORT_SYMBOL(find_first_extent_bit_state);
1011 u64 find_lock_delalloc_range(struct extent_io_tree *tree,
1012 u64 *start, u64 *end, u64 max_bytes)
1014 struct rb_node *node;
1015 struct extent_state *state;
1016 u64 cur_start = *start;
1018 u64 total_bytes = 0;
1020 spin_lock_irq(&tree->lock);
1022 * this search will find all the extents that end after
1026 node = tree_search(tree, cur_start);
1034 state = rb_entry(node, struct extent_state, rb_node);
1035 if (found && state->start != cur_start) {
1038 if (!(state->state & EXTENT_DELALLOC)) {
1044 struct extent_state *prev_state;
1045 struct rb_node *prev_node = node;
1047 prev_node = rb_prev(prev_node);
1050 prev_state = rb_entry(prev_node,
1051 struct extent_state,
1053 if (!(prev_state->state & EXTENT_DELALLOC))
1059 if (state->state & EXTENT_LOCKED) {
1061 atomic_inc(&state->refs);
1062 prepare_to_wait(&state->wq, &wait,
1063 TASK_UNINTERRUPTIBLE);
1064 spin_unlock_irq(&tree->lock);
1066 spin_lock_irq(&tree->lock);
1067 finish_wait(&state->wq, &wait);
1068 free_extent_state(state);
1071 set_state_cb(tree, state, EXTENT_LOCKED);
1072 state->state |= EXTENT_LOCKED;
1074 *start = state->start;
1077 cur_start = state->end + 1;
1078 node = rb_next(node);
1081 total_bytes += state->end - state->start + 1;
1082 if (total_bytes >= max_bytes)
1086 spin_unlock_irq(&tree->lock);
1090 u64 count_range_bits(struct extent_io_tree *tree,
1091 u64 *start, u64 search_end, u64 max_bytes,
1094 struct rb_node *node;
1095 struct extent_state *state;
1096 u64 cur_start = *start;
1097 u64 total_bytes = 0;
1100 if (search_end <= cur_start) {
1101 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1106 spin_lock_irq(&tree->lock);
1107 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1108 total_bytes = tree->dirty_bytes;
1112 * this search will find all the extents that end after
1115 node = tree_search(tree, cur_start);
1121 state = rb_entry(node, struct extent_state, rb_node);
1122 if (state->start > search_end)
1124 if (state->end >= cur_start && (state->state & bits)) {
1125 total_bytes += min(search_end, state->end) + 1 -
1126 max(cur_start, state->start);
1127 if (total_bytes >= max_bytes)
1130 *start = state->start;
1134 node = rb_next(node);
1139 spin_unlock_irq(&tree->lock);
1143 * helper function to lock both pages and extents in the tree.
1144 * pages must be locked first.
1146 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1148 unsigned long index = start >> PAGE_CACHE_SHIFT;
1149 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1153 while (index <= end_index) {
1154 page = grab_cache_page(tree->mapping, index);
1160 err = PTR_ERR(page);
1165 lock_extent(tree, start, end, GFP_NOFS);
1170 * we failed above in getting the page at 'index', so we undo here
1171 * up to but not including the page at 'index'
1174 index = start >> PAGE_CACHE_SHIFT;
1175 while (index < end_index) {
1176 page = find_get_page(tree->mapping, index);
1178 page_cache_release(page);
1183 EXPORT_SYMBOL(lock_range);
1186 * helper function to unlock both pages and extents in the tree.
1188 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1190 unsigned long index = start >> PAGE_CACHE_SHIFT;
1191 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1194 while (index <= end_index) {
1195 page = find_get_page(tree->mapping, index);
1197 page_cache_release(page);
1200 unlock_extent(tree, start, end, GFP_NOFS);
1203 EXPORT_SYMBOL(unlock_range);
1205 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1207 struct rb_node *node;
1208 struct extent_state *state;
1211 spin_lock_irq(&tree->lock);
1213 * this search will find all the extents that end after
1216 node = tree_search(tree, start);
1221 state = rb_entry(node, struct extent_state, rb_node);
1222 if (state->start != start) {
1226 state->private = private;
1228 spin_unlock_irq(&tree->lock);
1232 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1234 struct rb_node *node;
1235 struct extent_state *state;
1238 spin_lock_irq(&tree->lock);
1240 * this search will find all the extents that end after
1243 node = tree_search(tree, start);
1248 state = rb_entry(node, struct extent_state, rb_node);
1249 if (state->start != start) {
1253 *private = state->private;
1255 spin_unlock_irq(&tree->lock);
1260 * searches a range in the state tree for a given mask.
1261 * If 'filled' == 1, this returns 1 only if every extent in the tree
1262 * has the bits set. Otherwise, 1 is returned if any bit in the
1263 * range is found set.
1265 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1266 int bits, int filled)
1268 struct extent_state *state = NULL;
1269 struct rb_node *node;
1271 unsigned long flags;
1273 spin_lock_irqsave(&tree->lock, flags);
1274 node = tree_search(tree, start);
1275 while (node && start <= end) {
1276 state = rb_entry(node, struct extent_state, rb_node);
1278 if (filled && state->start > start) {
1283 if (state->start > end)
1286 if (state->state & bits) {
1290 } else if (filled) {
1294 start = state->end + 1;
1297 node = rb_next(node);
1304 spin_unlock_irqrestore(&tree->lock, flags);
1307 EXPORT_SYMBOL(test_range_bit);
1310 * helper function to set a given page up to date if all the
1311 * extents in the tree for that page are up to date
1313 static int check_page_uptodate(struct extent_io_tree *tree,
1316 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1317 u64 end = start + PAGE_CACHE_SIZE - 1;
1318 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1319 SetPageUptodate(page);
1324 * helper function to unlock a page if all the extents in the tree
1325 * for that page are unlocked
1327 static int check_page_locked(struct extent_io_tree *tree,
1330 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1331 u64 end = start + PAGE_CACHE_SIZE - 1;
1332 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1338 * helper function to end page writeback if all the extents
1339 * in the tree for that page are done with writeback
1341 static int check_page_writeback(struct extent_io_tree *tree,
1344 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1345 u64 end = start + PAGE_CACHE_SIZE - 1;
1346 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1347 end_page_writeback(page);
1351 /* lots and lots of room for performance fixes in the end_bio funcs */
1354 * after a writepage IO is done, we need to:
1355 * clear the uptodate bits on error
1356 * clear the writeback bits in the extent tree for this IO
1357 * end_page_writeback if the page has no more pending IO
1359 * Scheduling is not allowed, so the extent state tree is expected
1360 * to have one and only one object corresponding to this IO.
1362 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1363 static void end_bio_extent_writepage(struct bio *bio, int err)
1365 static int end_bio_extent_writepage(struct bio *bio,
1366 unsigned int bytes_done, int err)
1369 int uptodate = err == 0;
1370 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1371 struct extent_state *state = bio->bi_private;
1372 struct extent_io_tree *tree = state->tree;
1373 struct rb_node *node;
1379 unsigned long flags;
1381 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1386 struct page *page = bvec->bv_page;
1387 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1389 end = start + bvec->bv_len - 1;
1391 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1396 if (--bvec >= bio->bi_io_vec)
1397 prefetchw(&bvec->bv_page->flags);
1399 if (tree->ops && tree->ops->writepage_end_io_hook) {
1400 ret = tree->ops->writepage_end_io_hook(page, start,
1406 if (!uptodate && tree->ops &&
1407 tree->ops->writepage_io_failed_hook) {
1408 ret = tree->ops->writepage_io_failed_hook(bio, page,
1412 uptodate = (err == 0);
1418 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1419 ClearPageUptodate(page);
1424 * bios can get merged in funny ways, and so we need to
1425 * be careful with the state variable. We know the
1426 * state won't be merged with others because it has
1427 * WRITEBACK set, but we can't be sure each biovec is
1428 * sequential in the file. So, if our cached state
1429 * doesn't match the expected end, search the tree
1430 * for the correct one.
1433 spin_lock_irqsave(&tree->lock, flags);
1434 if (!state || state->end != end) {
1436 node = __etree_search(tree, start, NULL, NULL);
1438 state = rb_entry(node, struct extent_state,
1440 if (state->end != end ||
1441 !(state->state & EXTENT_WRITEBACK))
1445 spin_unlock_irqrestore(&tree->lock, flags);
1446 clear_extent_writeback(tree, start,
1453 struct extent_state *clear = state;
1455 node = rb_prev(&state->rb_node);
1457 state = rb_entry(node,
1458 struct extent_state,
1464 clear_state_bit(tree, clear, EXTENT_WRITEBACK,
1475 /* before releasing the lock, make sure the next state
1476 * variable has the expected bits set and corresponds
1477 * to the correct offsets in the file
1479 if (state && (state->end + 1 != start ||
1480 !(state->state & EXTENT_WRITEBACK))) {
1483 spin_unlock_irqrestore(&tree->lock, flags);
1487 end_page_writeback(page);
1489 check_page_writeback(tree, page);
1490 } while (bvec >= bio->bi_io_vec);
1492 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1498 * after a readpage IO is done, we need to:
1499 * clear the uptodate bits on error
1500 * set the uptodate bits if things worked
1501 * set the page up to date if all extents in the tree are uptodate
1502 * clear the lock bit in the extent tree
1503 * unlock the page if there are no other extents locked for it
1505 * Scheduling is not allowed, so the extent state tree is expected
1506 * to have one and only one object corresponding to this IO.
1508 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1509 static void end_bio_extent_readpage(struct bio *bio, int err)
1511 static int end_bio_extent_readpage(struct bio *bio,
1512 unsigned int bytes_done, int err)
1515 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1516 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1517 struct extent_state *state = bio->bi_private;
1518 struct extent_io_tree *tree = state->tree;
1519 struct rb_node *node;
1523 unsigned long flags;
1527 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1533 struct page *page = bvec->bv_page;
1534 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1536 end = start + bvec->bv_len - 1;
1538 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1543 if (--bvec >= bio->bi_io_vec)
1544 prefetchw(&bvec->bv_page->flags);
1546 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1547 ret = tree->ops->readpage_end_io_hook(page, start, end,
1552 if (!uptodate && tree->ops &&
1553 tree->ops->readpage_io_failed_hook) {
1554 ret = tree->ops->readpage_io_failed_hook(bio, page,
1559 test_bit(BIO_UPTODATE, &bio->bi_flags);
1564 spin_lock_irqsave(&tree->lock, flags);
1565 if (!state || state->end != end) {
1567 node = __etree_search(tree, start, NULL, NULL);
1569 state = rb_entry(node, struct extent_state,
1571 if (state->end != end ||
1572 !(state->state & EXTENT_LOCKED))
1576 spin_unlock_irqrestore(&tree->lock, flags);
1578 set_extent_uptodate(tree, start, end,
1580 unlock_extent(tree, start, end, GFP_ATOMIC);
1587 struct extent_state *clear = state;
1589 node = rb_prev(&state->rb_node);
1591 state = rb_entry(node,
1592 struct extent_state,
1598 set_state_cb(tree, clear, EXTENT_UPTODATE);
1599 clear->state |= EXTENT_UPTODATE;
1601 clear_state_bit(tree, clear, EXTENT_LOCKED,
1612 /* before releasing the lock, make sure the next state
1613 * variable has the expected bits set and corresponds
1614 * to the correct offsets in the file
1616 if (state && (state->end + 1 != start ||
1617 !(state->state & EXTENT_LOCKED))) {
1620 spin_unlock_irqrestore(&tree->lock, flags);
1624 SetPageUptodate(page);
1626 ClearPageUptodate(page);
1632 check_page_uptodate(tree, page);
1634 ClearPageUptodate(page);
1637 check_page_locked(tree, page);
1639 } while (bvec >= bio->bi_io_vec);
1642 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1648 * IO done from prepare_write is pretty simple, we just unlock
1649 * the structs in the extent tree when done, and set the uptodate bits
1652 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1653 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1655 static int end_bio_extent_preparewrite(struct bio *bio,
1656 unsigned int bytes_done, int err)
1659 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1660 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1661 struct extent_state *state = bio->bi_private;
1662 struct extent_io_tree *tree = state->tree;
1666 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1672 struct page *page = bvec->bv_page;
1673 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1675 end = start + bvec->bv_len - 1;
1677 if (--bvec >= bio->bi_io_vec)
1678 prefetchw(&bvec->bv_page->flags);
1681 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1683 ClearPageUptodate(page);
1687 unlock_extent(tree, start, end, GFP_ATOMIC);
1689 } while (bvec >= bio->bi_io_vec);
1692 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1698 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1703 bio = bio_alloc(gfp_flags, nr_vecs);
1705 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1706 while (!bio && (nr_vecs /= 2))
1707 bio = bio_alloc(gfp_flags, nr_vecs);
1712 bio->bi_bdev = bdev;
1713 bio->bi_sector = first_sector;
1718 static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
1721 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1722 struct page *page = bvec->bv_page;
1723 struct extent_io_tree *tree = bio->bi_private;
1724 struct rb_node *node;
1725 struct extent_state *state;
1729 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1730 end = start + bvec->bv_len - 1;
1732 spin_lock_irq(&tree->lock);
1733 node = __etree_search(tree, start, NULL, NULL);
1735 state = rb_entry(node, struct extent_state, rb_node);
1736 while(state->end < end) {
1737 node = rb_next(node);
1738 state = rb_entry(node, struct extent_state, rb_node);
1740 BUG_ON(state->end != end);
1741 spin_unlock_irq(&tree->lock);
1743 bio->bi_private = state;
1747 if (tree->ops && tree->ops->submit_bio_hook)
1748 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1751 submit_bio(rw, bio);
1752 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1758 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1759 struct page *page, sector_t sector,
1760 size_t size, unsigned long offset,
1761 struct block_device *bdev,
1762 struct bio **bio_ret,
1763 unsigned long max_pages,
1764 bio_end_io_t end_io_func,
1771 if (bio_ret && *bio_ret) {
1773 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1774 (tree->ops && tree->ops->merge_bio_hook &&
1775 tree->ops->merge_bio_hook(page, offset, size, bio)) ||
1776 bio_add_page(bio, page, size, offset) < size) {
1777 ret = submit_one_bio(rw, bio, mirror_num);
1783 nr = bio_get_nr_vecs(bdev);
1784 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1786 printk("failed to allocate bio nr %d\n", nr);
1790 bio_add_page(bio, page, size, offset);
1791 bio->bi_end_io = end_io_func;
1792 bio->bi_private = tree;
1797 ret = submit_one_bio(rw, bio, mirror_num);
1803 void set_page_extent_mapped(struct page *page)
1805 if (!PagePrivate(page)) {
1806 SetPagePrivate(page);
1807 WARN_ON(!page->mapping->a_ops->invalidatepage);
1808 set_page_private(page, EXTENT_PAGE_PRIVATE);
1809 page_cache_get(page);
1813 void set_page_extent_head(struct page *page, unsigned long len)
1815 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1819 * basic readpage implementation. Locked extent state structs are inserted
1820 * into the tree that are removed when the IO is done (by the end_io
1823 static int __extent_read_full_page(struct extent_io_tree *tree,
1825 get_extent_t *get_extent,
1826 struct bio **bio, int mirror_num)
1828 struct inode *inode = page->mapping->host;
1829 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1830 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1834 u64 last_byte = i_size_read(inode);
1838 struct extent_map *em;
1839 struct block_device *bdev;
1842 size_t page_offset = 0;
1844 size_t blocksize = inode->i_sb->s_blocksize;
1846 set_page_extent_mapped(page);
1849 lock_extent(tree, start, end, GFP_NOFS);
1851 while (cur <= end) {
1852 if (cur >= last_byte) {
1854 iosize = PAGE_CACHE_SIZE - page_offset;
1855 userpage = kmap_atomic(page, KM_USER0);
1856 memset(userpage + page_offset, 0, iosize);
1857 flush_dcache_page(page);
1858 kunmap_atomic(userpage, KM_USER0);
1859 set_extent_uptodate(tree, cur, cur + iosize - 1,
1861 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1864 em = get_extent(inode, page, page_offset, cur,
1866 if (IS_ERR(em) || !em) {
1868 unlock_extent(tree, cur, end, GFP_NOFS);
1872 extent_offset = cur - em->start;
1873 BUG_ON(extent_map_end(em) <= cur);
1876 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1877 cur_end = min(extent_map_end(em) - 1, end);
1878 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1879 sector = (em->block_start + extent_offset) >> 9;
1881 block_start = em->block_start;
1882 free_extent_map(em);
1885 /* we've found a hole, just zero and go on */
1886 if (block_start == EXTENT_MAP_HOLE) {
1888 userpage = kmap_atomic(page, KM_USER0);
1889 memset(userpage + page_offset, 0, iosize);
1890 flush_dcache_page(page);
1891 kunmap_atomic(userpage, KM_USER0);
1893 set_extent_uptodate(tree, cur, cur + iosize - 1,
1895 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1897 page_offset += iosize;
1900 /* the get_extent function already copied into the page */
1901 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1902 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1904 page_offset += iosize;
1907 /* we have an inline extent but it didn't get marked up
1908 * to date. Error out
1910 if (block_start == EXTENT_MAP_INLINE) {
1912 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1914 page_offset += iosize;
1919 if (tree->ops && tree->ops->readpage_io_hook) {
1920 ret = tree->ops->readpage_io_hook(page, cur,
1924 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1926 ret = submit_extent_page(READ, tree, page,
1927 sector, iosize, page_offset,
1929 end_bio_extent_readpage, mirror_num);
1934 page_offset += iosize;
1938 if (!PageError(page))
1939 SetPageUptodate(page);
1945 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1946 get_extent_t *get_extent)
1948 struct bio *bio = NULL;
1951 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
1953 submit_one_bio(READ, bio, 0);
1956 EXPORT_SYMBOL(extent_read_full_page);
1959 * the writepage semantics are similar to regular writepage. extent
1960 * records are inserted to lock ranges in the tree, and as dirty areas
1961 * are found, they are marked writeback. Then the lock bits are removed
1962 * and the end_io handler clears the writeback ranges
1964 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1967 struct inode *inode = page->mapping->host;
1968 struct extent_page_data *epd = data;
1969 struct extent_io_tree *tree = epd->tree;
1970 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1972 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1976 u64 last_byte = i_size_read(inode);
1980 struct extent_map *em;
1981 struct block_device *bdev;
1984 size_t page_offset = 0;
1986 loff_t i_size = i_size_read(inode);
1987 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1991 WARN_ON(!PageLocked(page));
1992 if (page->index > end_index) {
1993 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1998 if (page->index == end_index) {
2001 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
2003 userpage = kmap_atomic(page, KM_USER0);
2004 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
2005 flush_dcache_page(page);
2006 kunmap_atomic(userpage, KM_USER0);
2009 set_page_extent_mapped(page);
2011 delalloc_start = start;
2013 while(delalloc_end < page_end) {
2014 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
2017 if (nr_delalloc == 0) {
2018 delalloc_start = delalloc_end + 1;
2021 tree->ops->fill_delalloc(inode, delalloc_start,
2023 clear_extent_bit(tree, delalloc_start,
2025 EXTENT_LOCKED | EXTENT_DELALLOC,
2027 delalloc_start = delalloc_end + 1;
2029 lock_extent(tree, start, page_end, GFP_NOFS);
2032 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
2033 printk("found delalloc bits after lock_extent\n");
2036 if (last_byte <= start) {
2037 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2041 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2042 blocksize = inode->i_sb->s_blocksize;
2044 while (cur <= end) {
2045 if (cur >= last_byte) {
2046 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2049 em = epd->get_extent(inode, page, page_offset, cur,
2051 if (IS_ERR(em) || !em) {
2056 extent_offset = cur - em->start;
2057 BUG_ON(extent_map_end(em) <= cur);
2059 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2060 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2061 sector = (em->block_start + extent_offset) >> 9;
2063 block_start = em->block_start;
2064 free_extent_map(em);
2067 if (block_start == EXTENT_MAP_HOLE ||
2068 block_start == EXTENT_MAP_INLINE) {
2069 clear_extent_dirty(tree, cur,
2070 cur + iosize - 1, GFP_NOFS);
2072 page_offset += iosize;
2076 /* leave this out until we have a page_mkwrite call */
2077 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2080 page_offset += iosize;
2083 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2084 if (tree->ops && tree->ops->writepage_io_hook) {
2085 ret = tree->ops->writepage_io_hook(page, cur,
2093 unsigned long max_nr = end_index + 1;
2094 set_range_writeback(tree, cur, cur + iosize - 1);
2095 if (!PageWriteback(page)) {
2096 printk("warning page %lu not writeback, "
2097 "cur %llu end %llu\n", page->index,
2098 (unsigned long long)cur,
2099 (unsigned long long)end);
2102 ret = submit_extent_page(WRITE, tree, page, sector,
2103 iosize, page_offset, bdev,
2105 end_bio_extent_writepage, 0);
2110 page_offset += iosize;
2115 /* make sure the mapping tag for page dirty gets cleared */
2116 set_page_writeback(page);
2117 end_page_writeback(page);
2119 unlock_extent(tree, start, page_end, GFP_NOFS);
2124 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
2125 /* Taken directly from 2.6.23 for 2.6.18 back port */
2126 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2130 * write_cache_pages - walk the list of dirty pages of the given address space
2131 * and write all of them.
2132 * @mapping: address space structure to write
2133 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2134 * @writepage: function called for each page
2135 * @data: data passed to writepage function
2137 * If a page is already under I/O, write_cache_pages() skips it, even
2138 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2139 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2140 * and msync() need to guarantee that all the data which was dirty at the time
2141 * the call was made get new I/O started against them. If wbc->sync_mode is
2142 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2143 * existing IO to complete.
2145 static int write_cache_pages(struct address_space *mapping,
2146 struct writeback_control *wbc, writepage_t writepage,
2149 struct backing_dev_info *bdi = mapping->backing_dev_info;
2152 struct pagevec pvec;
2155 pgoff_t end; /* Inclusive */
2157 int range_whole = 0;
2159 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2160 wbc->encountered_congestion = 1;
2164 pagevec_init(&pvec, 0);
2165 if (wbc->range_cyclic) {
2166 index = mapping->writeback_index; /* Start from prev offset */
2169 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2170 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2171 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2176 while (!done && (index <= end) &&
2177 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2178 PAGECACHE_TAG_DIRTY,
2179 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2183 for (i = 0; i < nr_pages; i++) {
2184 struct page *page = pvec.pages[i];
2187 * At this point we hold neither mapping->tree_lock nor
2188 * lock on the page itself: the page may be truncated or
2189 * invalidated (changing page->mapping to NULL), or even
2190 * swizzled back from swapper_space to tmpfs file
2195 if (unlikely(page->mapping != mapping)) {
2200 if (!wbc->range_cyclic && page->index > end) {
2206 if (wbc->sync_mode != WB_SYNC_NONE)
2207 wait_on_page_writeback(page);
2209 if (PageWriteback(page) ||
2210 !clear_page_dirty_for_io(page)) {
2215 ret = (*writepage)(page, wbc, data);
2217 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2221 if (ret || (--(wbc->nr_to_write) <= 0))
2223 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2224 wbc->encountered_congestion = 1;
2228 pagevec_release(&pvec);
2231 if (!scanned && !done) {
2233 * We hit the last page and there is more work to be done: wrap
2234 * back to the start of the file
2240 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2241 mapping->writeback_index = index;
2246 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2247 get_extent_t *get_extent,
2248 struct writeback_control *wbc)
2251 struct address_space *mapping = page->mapping;
2252 struct extent_page_data epd = {
2255 .get_extent = get_extent,
2257 struct writeback_control wbc_writepages = {
2259 .sync_mode = WB_SYNC_NONE,
2260 .older_than_this = NULL,
2262 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2263 .range_end = (loff_t)-1,
2267 ret = __extent_writepage(page, wbc, &epd);
2269 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2271 submit_one_bio(WRITE, epd.bio, 0);
2275 EXPORT_SYMBOL(extent_write_full_page);
2278 int extent_writepages(struct extent_io_tree *tree,
2279 struct address_space *mapping,
2280 get_extent_t *get_extent,
2281 struct writeback_control *wbc)
2284 struct extent_page_data epd = {
2287 .get_extent = get_extent,
2290 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2292 submit_one_bio(WRITE, epd.bio, 0);
2296 EXPORT_SYMBOL(extent_writepages);
2298 int extent_readpages(struct extent_io_tree *tree,
2299 struct address_space *mapping,
2300 struct list_head *pages, unsigned nr_pages,
2301 get_extent_t get_extent)
2303 struct bio *bio = NULL;
2305 struct pagevec pvec;
2307 pagevec_init(&pvec, 0);
2308 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2309 struct page *page = list_entry(pages->prev, struct page, lru);
2311 prefetchw(&page->flags);
2312 list_del(&page->lru);
2314 * what we want to do here is call add_to_page_cache_lru,
2315 * but that isn't exported, so we reproduce it here
2317 if (!add_to_page_cache(page, mapping,
2318 page->index, GFP_KERNEL)) {
2320 /* open coding of lru_cache_add, also not exported */
2321 page_cache_get(page);
2322 if (!pagevec_add(&pvec, page))
2323 __pagevec_lru_add(&pvec);
2324 __extent_read_full_page(tree, page, get_extent,
2327 page_cache_release(page);
2329 if (pagevec_count(&pvec))
2330 __pagevec_lru_add(&pvec);
2331 BUG_ON(!list_empty(pages));
2333 submit_one_bio(READ, bio, 0);
2336 EXPORT_SYMBOL(extent_readpages);
2339 * basic invalidatepage code, this waits on any locked or writeback
2340 * ranges corresponding to the page, and then deletes any extent state
2341 * records from the tree
2343 int extent_invalidatepage(struct extent_io_tree *tree,
2344 struct page *page, unsigned long offset)
2346 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2347 u64 end = start + PAGE_CACHE_SIZE - 1;
2348 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2350 start += (offset + blocksize -1) & ~(blocksize - 1);
2354 lock_extent(tree, start, end, GFP_NOFS);
2355 wait_on_extent_writeback(tree, start, end);
2356 clear_extent_bit(tree, start, end,
2357 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2361 EXPORT_SYMBOL(extent_invalidatepage);
2364 * simple commit_write call, set_range_dirty is used to mark both
2365 * the pages and the extent records as dirty
2367 int extent_commit_write(struct extent_io_tree *tree,
2368 struct inode *inode, struct page *page,
2369 unsigned from, unsigned to)
2371 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2373 set_page_extent_mapped(page);
2374 set_page_dirty(page);
2376 if (pos > inode->i_size) {
2377 i_size_write(inode, pos);
2378 mark_inode_dirty(inode);
2382 EXPORT_SYMBOL(extent_commit_write);
2384 int extent_prepare_write(struct extent_io_tree *tree,
2385 struct inode *inode, struct page *page,
2386 unsigned from, unsigned to, get_extent_t *get_extent)
2388 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2389 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2391 u64 orig_block_start;
2394 struct extent_map *em;
2395 unsigned blocksize = 1 << inode->i_blkbits;
2396 size_t page_offset = 0;
2397 size_t block_off_start;
2398 size_t block_off_end;
2404 set_page_extent_mapped(page);
2406 block_start = (page_start + from) & ~((u64)blocksize - 1);
2407 block_end = (page_start + to - 1) | (blocksize - 1);
2408 orig_block_start = block_start;
2410 lock_extent(tree, page_start, page_end, GFP_NOFS);
2411 while(block_start <= block_end) {
2412 em = get_extent(inode, page, page_offset, block_start,
2413 block_end - block_start + 1, 1);
2414 if (IS_ERR(em) || !em) {
2417 cur_end = min(block_end, extent_map_end(em) - 1);
2418 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2419 block_off_end = block_off_start + blocksize;
2420 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2422 if (!PageUptodate(page) && isnew &&
2423 (block_off_end > to || block_off_start < from)) {
2426 kaddr = kmap_atomic(page, KM_USER0);
2427 if (block_off_end > to)
2428 memset(kaddr + to, 0, block_off_end - to);
2429 if (block_off_start < from)
2430 memset(kaddr + block_off_start, 0,
2431 from - block_off_start);
2432 flush_dcache_page(page);
2433 kunmap_atomic(kaddr, KM_USER0);
2435 if ((em->block_start != EXTENT_MAP_HOLE &&
2436 em->block_start != EXTENT_MAP_INLINE) &&
2437 !isnew && !PageUptodate(page) &&
2438 (block_off_end > to || block_off_start < from) &&
2439 !test_range_bit(tree, block_start, cur_end,
2440 EXTENT_UPTODATE, 1)) {
2442 u64 extent_offset = block_start - em->start;
2444 sector = (em->block_start + extent_offset) >> 9;
2445 iosize = (cur_end - block_start + blocksize) &
2446 ~((u64)blocksize - 1);
2448 * we've already got the extent locked, but we
2449 * need to split the state such that our end_bio
2450 * handler can clear the lock.
2452 set_extent_bit(tree, block_start,
2453 block_start + iosize - 1,
2454 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2455 ret = submit_extent_page(READ, tree, page,
2456 sector, iosize, page_offset, em->bdev,
2458 end_bio_extent_preparewrite, 0);
2460 block_start = block_start + iosize;
2462 set_extent_uptodate(tree, block_start, cur_end,
2464 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2465 block_start = cur_end + 1;
2467 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2468 free_extent_map(em);
2471 wait_extent_bit(tree, orig_block_start,
2472 block_end, EXTENT_LOCKED);
2474 check_page_uptodate(tree, page);
2476 /* FIXME, zero out newly allocated blocks on error */
2479 EXPORT_SYMBOL(extent_prepare_write);
2482 * a helper for releasepage, this tests for areas of the page that
2483 * are locked or under IO and drops the related state bits if it is safe
2486 int try_release_extent_state(struct extent_map_tree *map,
2487 struct extent_io_tree *tree, struct page *page,
2490 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2491 u64 end = start + PAGE_CACHE_SIZE - 1;
2494 if (test_range_bit(tree, start, end, EXTENT_IOBITS, 0))
2497 if ((mask & GFP_NOFS) == GFP_NOFS)
2499 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2504 EXPORT_SYMBOL(try_release_extent_state);
2507 * a helper for releasepage. As long as there are no locked extents
2508 * in the range corresponding to the page, both state records and extent
2509 * map records are removed
2511 int try_release_extent_mapping(struct extent_map_tree *map,
2512 struct extent_io_tree *tree, struct page *page,
2515 struct extent_map *em;
2516 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2517 u64 end = start + PAGE_CACHE_SIZE - 1;
2519 if ((mask & __GFP_WAIT) &&
2520 page->mapping->host->i_size > 16 * 1024 * 1024) {
2522 while (start <= end) {
2523 len = end - start + 1;
2524 spin_lock(&map->lock);
2525 em = lookup_extent_mapping(map, start, len);
2526 if (!em || IS_ERR(em)) {
2527 spin_unlock(&map->lock);
2530 if (em->start != start) {
2531 spin_unlock(&map->lock);
2532 free_extent_map(em);
2535 if (!test_range_bit(tree, em->start,
2536 extent_map_end(em) - 1,
2537 EXTENT_LOCKED, 0)) {
2538 remove_extent_mapping(map, em);
2539 /* once for the rb tree */
2540 free_extent_map(em);
2542 start = extent_map_end(em);
2543 spin_unlock(&map->lock);
2546 free_extent_map(em);
2549 return try_release_extent_state(map, tree, page, mask);
2551 EXPORT_SYMBOL(try_release_extent_mapping);
2553 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2554 get_extent_t *get_extent)
2556 struct inode *inode = mapping->host;
2557 u64 start = iblock << inode->i_blkbits;
2558 sector_t sector = 0;
2559 struct extent_map *em;
2561 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2562 if (!em || IS_ERR(em))
2565 if (em->block_start == EXTENT_MAP_INLINE ||
2566 em->block_start == EXTENT_MAP_HOLE)
2569 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2571 free_extent_map(em);
2575 static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb)
2577 if (list_empty(&eb->lru)) {
2578 extent_buffer_get(eb);
2579 list_add(&eb->lru, &tree->buffer_lru);
2581 if (tree->lru_size >= BUFFER_LRU_MAX) {
2582 struct extent_buffer *rm;
2583 rm = list_entry(tree->buffer_lru.prev,
2584 struct extent_buffer, lru);
2586 list_del_init(&rm->lru);
2587 free_extent_buffer(rm);
2590 list_move(&eb->lru, &tree->buffer_lru);
2593 static struct extent_buffer *find_lru(struct extent_io_tree *tree,
2594 u64 start, unsigned long len)
2596 struct list_head *lru = &tree->buffer_lru;
2597 struct list_head *cur = lru->next;
2598 struct extent_buffer *eb;
2600 if (list_empty(lru))
2604 eb = list_entry(cur, struct extent_buffer, lru);
2605 if (eb->start == start && eb->len == len) {
2606 extent_buffer_get(eb);
2610 } while (cur != lru);
2614 static inline unsigned long num_extent_pages(u64 start, u64 len)
2616 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2617 (start >> PAGE_CACHE_SHIFT);
2620 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2624 struct address_space *mapping;
2627 return eb->first_page;
2628 i += eb->start >> PAGE_CACHE_SHIFT;
2629 mapping = eb->first_page->mapping;
2630 read_lock_irq(&mapping->tree_lock);
2631 p = radix_tree_lookup(&mapping->page_tree, i);
2632 read_unlock_irq(&mapping->tree_lock);
2636 int release_extent_buffer_tail_pages(struct extent_buffer *eb)
2638 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2644 for (i = 1; i < num_pages; i++) {
2645 page = extent_buffer_page(eb, i);
2646 page_cache_release(page);
2652 int invalidate_extent_lru(struct extent_io_tree *tree, u64 start,
2655 struct list_head *lru = &tree->buffer_lru;
2656 struct list_head *cur = lru->next;
2657 struct extent_buffer *eb;
2660 spin_lock(&tree->lru_lock);
2661 if (list_empty(lru))
2665 eb = list_entry(cur, struct extent_buffer, lru);
2666 if (eb->start <= start && eb->start + eb->len > start) {
2667 eb->flags &= ~EXTENT_UPTODATE;
2670 } while (cur != lru);
2672 spin_unlock(&tree->lru_lock);
2676 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2681 struct extent_buffer *eb = NULL;
2682 unsigned long flags;
2684 spin_lock(&tree->lru_lock);
2685 eb = find_lru(tree, start, len);
2686 spin_unlock(&tree->lru_lock);
2691 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2692 INIT_LIST_HEAD(&eb->lru);
2695 spin_lock_irqsave(&leak_lock, flags);
2696 list_add(&eb->leak_list, &buffers);
2697 spin_unlock_irqrestore(&leak_lock, flags);
2698 atomic_set(&eb->refs, 1);
2703 static void __free_extent_buffer(struct extent_buffer *eb)
2705 unsigned long flags;
2706 spin_lock_irqsave(&leak_lock, flags);
2707 list_del(&eb->leak_list);
2708 spin_unlock_irqrestore(&leak_lock, flags);
2709 kmem_cache_free(extent_buffer_cache, eb);
2712 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2713 u64 start, unsigned long len,
2717 unsigned long num_pages = num_extent_pages(start, len);
2719 unsigned long index = start >> PAGE_CACHE_SHIFT;
2720 struct extent_buffer *eb;
2722 struct address_space *mapping = tree->mapping;
2725 eb = __alloc_extent_buffer(tree, start, len, mask);
2729 if (eb->flags & EXTENT_BUFFER_FILLED)
2733 eb->first_page = page0;
2736 page_cache_get(page0);
2737 mark_page_accessed(page0);
2738 set_page_extent_mapped(page0);
2739 set_page_extent_head(page0, len);
2740 uptodate = PageUptodate(page0);
2744 for (; i < num_pages; i++, index++) {
2745 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2750 set_page_extent_mapped(p);
2751 mark_page_accessed(p);
2754 set_page_extent_head(p, len);
2756 set_page_private(p, EXTENT_PAGE_PRIVATE);
2758 if (!PageUptodate(p))
2763 eb->flags |= EXTENT_UPTODATE;
2764 eb->flags |= EXTENT_BUFFER_FILLED;
2767 spin_lock(&tree->lru_lock);
2769 spin_unlock(&tree->lru_lock);
2773 spin_lock(&tree->lru_lock);
2774 list_del_init(&eb->lru);
2775 spin_unlock(&tree->lru_lock);
2776 if (!atomic_dec_and_test(&eb->refs))
2778 for (index = 1; index < i; index++) {
2779 page_cache_release(extent_buffer_page(eb, index));
2782 page_cache_release(extent_buffer_page(eb, 0));
2783 __free_extent_buffer(eb);
2786 EXPORT_SYMBOL(alloc_extent_buffer);
2788 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2789 u64 start, unsigned long len,
2792 unsigned long num_pages = num_extent_pages(start, len);
2794 unsigned long index = start >> PAGE_CACHE_SHIFT;
2795 struct extent_buffer *eb;
2797 struct address_space *mapping = tree->mapping;
2800 eb = __alloc_extent_buffer(tree, start, len, mask);
2804 if (eb->flags & EXTENT_BUFFER_FILLED)
2807 for (i = 0; i < num_pages; i++, index++) {
2808 p = find_lock_page(mapping, index);
2812 set_page_extent_mapped(p);
2813 mark_page_accessed(p);
2817 set_page_extent_head(p, len);
2819 set_page_private(p, EXTENT_PAGE_PRIVATE);
2822 if (!PageUptodate(p))
2827 eb->flags |= EXTENT_UPTODATE;
2828 eb->flags |= EXTENT_BUFFER_FILLED;
2831 spin_lock(&tree->lru_lock);
2833 spin_unlock(&tree->lru_lock);
2836 spin_lock(&tree->lru_lock);
2837 list_del_init(&eb->lru);
2838 spin_unlock(&tree->lru_lock);
2839 if (!atomic_dec_and_test(&eb->refs))
2841 for (index = 1; index < i; index++) {
2842 page_cache_release(extent_buffer_page(eb, index));
2845 page_cache_release(extent_buffer_page(eb, 0));
2846 __free_extent_buffer(eb);
2849 EXPORT_SYMBOL(find_extent_buffer);
2851 void free_extent_buffer(struct extent_buffer *eb)
2854 unsigned long num_pages;
2859 if (!atomic_dec_and_test(&eb->refs))
2862 WARN_ON(!list_empty(&eb->lru));
2863 num_pages = num_extent_pages(eb->start, eb->len);
2865 for (i = 1; i < num_pages; i++) {
2866 page_cache_release(extent_buffer_page(eb, i));
2868 page_cache_release(extent_buffer_page(eb, 0));
2869 __free_extent_buffer(eb);
2871 EXPORT_SYMBOL(free_extent_buffer);
2873 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2874 struct extent_buffer *eb)
2878 unsigned long num_pages;
2881 u64 start = eb->start;
2882 u64 end = start + eb->len - 1;
2884 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2885 num_pages = num_extent_pages(eb->start, eb->len);
2887 for (i = 0; i < num_pages; i++) {
2888 page = extent_buffer_page(eb, i);
2891 set_page_extent_head(page, eb->len);
2893 set_page_private(page, EXTENT_PAGE_PRIVATE);
2896 * if we're on the last page or the first page and the
2897 * block isn't aligned on a page boundary, do extra checks
2898 * to make sure we don't clean page that is partially dirty
2900 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2901 ((i == num_pages - 1) &&
2902 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2903 start = (u64)page->index << PAGE_CACHE_SHIFT;
2904 end = start + PAGE_CACHE_SIZE - 1;
2905 if (test_range_bit(tree, start, end,
2911 clear_page_dirty_for_io(page);
2912 read_lock_irq(&page->mapping->tree_lock);
2913 if (!PageDirty(page)) {
2914 radix_tree_tag_clear(&page->mapping->page_tree,
2916 PAGECACHE_TAG_DIRTY);
2918 read_unlock_irq(&page->mapping->tree_lock);
2923 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2925 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2926 struct extent_buffer *eb)
2928 return wait_on_extent_writeback(tree, eb->start,
2929 eb->start + eb->len - 1);
2931 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2933 int set_extent_buffer_dirty(struct extent_io_tree *tree,
2934 struct extent_buffer *eb)
2937 unsigned long num_pages;
2939 num_pages = num_extent_pages(eb->start, eb->len);
2940 for (i = 0; i < num_pages; i++) {
2941 struct page *page = extent_buffer_page(eb, i);
2942 /* writepage may need to do something special for the
2943 * first page, we have to make sure page->private is
2944 * properly set. releasepage may drop page->private
2945 * on us if the page isn't already dirty.
2949 set_page_extent_head(page, eb->len);
2950 } else if (PagePrivate(page) &&
2951 page->private != EXTENT_PAGE_PRIVATE) {
2953 set_page_extent_mapped(page);
2956 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2960 return set_extent_dirty(tree, eb->start,
2961 eb->start + eb->len - 1, GFP_NOFS);
2963 EXPORT_SYMBOL(set_extent_buffer_dirty);
2965 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2966 struct extent_buffer *eb)
2970 unsigned long num_pages;
2972 num_pages = num_extent_pages(eb->start, eb->len);
2973 eb->flags &= ~EXTENT_UPTODATE;
2975 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2977 for (i = 0; i < num_pages; i++) {
2978 page = extent_buffer_page(eb, i);
2979 ClearPageUptodate(page);
2984 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2985 struct extent_buffer *eb)
2989 unsigned long num_pages;
2991 num_pages = num_extent_pages(eb->start, eb->len);
2993 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2995 for (i = 0; i < num_pages; i++) {
2996 page = extent_buffer_page(eb, i);
2997 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2998 ((i == num_pages - 1) &&
2999 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3000 check_page_uptodate(tree, page);
3003 SetPageUptodate(page);
3007 EXPORT_SYMBOL(set_extent_buffer_uptodate);
3009 int extent_range_uptodate(struct extent_io_tree *tree,
3014 int pg_uptodate = 1;
3016 unsigned long index;
3018 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3021 while(start <= end) {
3022 index = start >> PAGE_CACHE_SHIFT;
3023 page = find_get_page(tree->mapping, index);
3024 uptodate = PageUptodate(page);
3025 page_cache_release(page);
3030 start += PAGE_CACHE_SIZE;
3035 int extent_buffer_uptodate(struct extent_io_tree *tree,
3036 struct extent_buffer *eb)
3039 unsigned long num_pages;
3042 int pg_uptodate = 1;
3044 if (eb->flags & EXTENT_UPTODATE)
3047 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3048 EXTENT_UPTODATE, 1);
3052 num_pages = num_extent_pages(eb->start, eb->len);
3053 for (i = 0; i < num_pages; i++) {
3054 page = extent_buffer_page(eb, i);
3055 if (!PageUptodate(page)) {
3062 EXPORT_SYMBOL(extent_buffer_uptodate);
3064 int read_extent_buffer_pages(struct extent_io_tree *tree,
3065 struct extent_buffer *eb,
3066 u64 start, int wait,
3067 get_extent_t *get_extent, int mirror_num)
3070 unsigned long start_i;
3074 int locked_pages = 0;
3075 int all_uptodate = 1;
3076 int inc_all_pages = 0;
3077 unsigned long num_pages;
3078 struct bio *bio = NULL;
3080 if (eb->flags & EXTENT_UPTODATE)
3083 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3084 EXTENT_UPTODATE, 1)) {
3089 WARN_ON(start < eb->start);
3090 start_i = (start >> PAGE_CACHE_SHIFT) -
3091 (eb->start >> PAGE_CACHE_SHIFT);
3096 num_pages = num_extent_pages(eb->start, eb->len);
3097 for (i = start_i; i < num_pages; i++) {
3098 page = extent_buffer_page(eb, i);
3100 if (TestSetPageLocked(page))
3106 if (!PageUptodate(page)) {
3112 eb->flags |= EXTENT_UPTODATE;
3116 for (i = start_i; i < num_pages; i++) {
3117 page = extent_buffer_page(eb, i);
3119 page_cache_get(page);
3120 if (!PageUptodate(page)) {
3123 ClearPageError(page);
3124 err = __extent_read_full_page(tree, page,
3136 submit_one_bio(READ, bio, mirror_num);
3141 for (i = start_i; i < num_pages; i++) {
3142 page = extent_buffer_page(eb, i);
3143 wait_on_page_locked(page);
3144 if (!PageUptodate(page)) {
3149 eb->flags |= EXTENT_UPTODATE;
3154 while(locked_pages > 0) {
3155 page = extent_buffer_page(eb, i);
3162 EXPORT_SYMBOL(read_extent_buffer_pages);
3164 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3165 unsigned long start,
3172 char *dst = (char *)dstv;
3173 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3174 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3176 WARN_ON(start > eb->len);
3177 WARN_ON(start + len > eb->start + eb->len);
3179 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3182 page = extent_buffer_page(eb, i);
3184 cur = min(len, (PAGE_CACHE_SIZE - offset));
3185 kaddr = kmap_atomic(page, KM_USER1);
3186 memcpy(dst, kaddr + offset, cur);
3187 kunmap_atomic(kaddr, KM_USER1);
3195 EXPORT_SYMBOL(read_extent_buffer);
3197 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3198 unsigned long min_len, char **token, char **map,
3199 unsigned long *map_start,
3200 unsigned long *map_len, int km)
3202 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3205 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3206 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3207 unsigned long end_i = (start_offset + start + min_len - 1) >>
3214 offset = start_offset;
3218 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3220 if (start + min_len > eb->len) {
3221 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3225 p = extent_buffer_page(eb, i);
3226 kaddr = kmap_atomic(p, km);
3228 *map = kaddr + offset;
3229 *map_len = PAGE_CACHE_SIZE - offset;
3232 EXPORT_SYMBOL(map_private_extent_buffer);
3234 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3235 unsigned long min_len,
3236 char **token, char **map,
3237 unsigned long *map_start,
3238 unsigned long *map_len, int km)
3242 if (eb->map_token) {
3243 unmap_extent_buffer(eb, eb->map_token, km);
3244 eb->map_token = NULL;
3247 err = map_private_extent_buffer(eb, start, min_len, token, map,
3248 map_start, map_len, km);
3250 eb->map_token = *token;
3252 eb->map_start = *map_start;
3253 eb->map_len = *map_len;
3257 EXPORT_SYMBOL(map_extent_buffer);
3259 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3261 kunmap_atomic(token, km);
3263 EXPORT_SYMBOL(unmap_extent_buffer);
3265 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3266 unsigned long start,
3273 char *ptr = (char *)ptrv;
3274 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3275 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3278 WARN_ON(start > eb->len);
3279 WARN_ON(start + len > eb->start + eb->len);
3281 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3284 page = extent_buffer_page(eb, i);
3286 cur = min(len, (PAGE_CACHE_SIZE - offset));
3288 kaddr = kmap_atomic(page, KM_USER0);
3289 ret = memcmp(ptr, kaddr + offset, cur);
3290 kunmap_atomic(kaddr, KM_USER0);
3301 EXPORT_SYMBOL(memcmp_extent_buffer);
3303 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3304 unsigned long start, unsigned long len)
3310 char *src = (char *)srcv;
3311 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3312 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3314 WARN_ON(start > eb->len);
3315 WARN_ON(start + len > eb->start + eb->len);
3317 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3320 page = extent_buffer_page(eb, i);
3321 WARN_ON(!PageUptodate(page));
3323 cur = min(len, PAGE_CACHE_SIZE - offset);
3324 kaddr = kmap_atomic(page, KM_USER1);
3325 memcpy(kaddr + offset, src, cur);
3326 kunmap_atomic(kaddr, KM_USER1);
3334 EXPORT_SYMBOL(write_extent_buffer);
3336 void memset_extent_buffer(struct extent_buffer *eb, char c,
3337 unsigned long start, unsigned long len)
3343 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3344 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3346 WARN_ON(start > eb->len);
3347 WARN_ON(start + len > eb->start + eb->len);
3349 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3352 page = extent_buffer_page(eb, i);
3353 WARN_ON(!PageUptodate(page));
3355 cur = min(len, PAGE_CACHE_SIZE - offset);
3356 kaddr = kmap_atomic(page, KM_USER0);
3357 memset(kaddr + offset, c, cur);
3358 kunmap_atomic(kaddr, KM_USER0);
3365 EXPORT_SYMBOL(memset_extent_buffer);
3367 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3368 unsigned long dst_offset, unsigned long src_offset,
3371 u64 dst_len = dst->len;
3376 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3377 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3379 WARN_ON(src->len != dst_len);
3381 offset = (start_offset + dst_offset) &
3382 ((unsigned long)PAGE_CACHE_SIZE - 1);
3385 page = extent_buffer_page(dst, i);
3386 WARN_ON(!PageUptodate(page));
3388 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3390 kaddr = kmap_atomic(page, KM_USER0);
3391 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3392 kunmap_atomic(kaddr, KM_USER0);
3400 EXPORT_SYMBOL(copy_extent_buffer);
3402 static void move_pages(struct page *dst_page, struct page *src_page,
3403 unsigned long dst_off, unsigned long src_off,
3406 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3407 if (dst_page == src_page) {
3408 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3410 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3411 char *p = dst_kaddr + dst_off + len;
3412 char *s = src_kaddr + src_off + len;
3417 kunmap_atomic(src_kaddr, KM_USER1);
3419 kunmap_atomic(dst_kaddr, KM_USER0);
3422 static void copy_pages(struct page *dst_page, struct page *src_page,
3423 unsigned long dst_off, unsigned long src_off,
3426 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3429 if (dst_page != src_page)
3430 src_kaddr = kmap_atomic(src_page, KM_USER1);
3432 src_kaddr = dst_kaddr;
3434 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3435 kunmap_atomic(dst_kaddr, KM_USER0);
3436 if (dst_page != src_page)
3437 kunmap_atomic(src_kaddr, KM_USER1);
3440 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3441 unsigned long src_offset, unsigned long len)
3444 size_t dst_off_in_page;
3445 size_t src_off_in_page;
3446 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3447 unsigned long dst_i;
3448 unsigned long src_i;
3450 if (src_offset + len > dst->len) {
3451 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3452 src_offset, len, dst->len);
3455 if (dst_offset + len > dst->len) {
3456 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3457 dst_offset, len, dst->len);
3462 dst_off_in_page = (start_offset + dst_offset) &
3463 ((unsigned long)PAGE_CACHE_SIZE - 1);
3464 src_off_in_page = (start_offset + src_offset) &
3465 ((unsigned long)PAGE_CACHE_SIZE - 1);
3467 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3468 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3470 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3472 cur = min_t(unsigned long, cur,
3473 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3475 copy_pages(extent_buffer_page(dst, dst_i),
3476 extent_buffer_page(dst, src_i),
3477 dst_off_in_page, src_off_in_page, cur);
3484 EXPORT_SYMBOL(memcpy_extent_buffer);
3486 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3487 unsigned long src_offset, unsigned long len)
3490 size_t dst_off_in_page;
3491 size_t src_off_in_page;
3492 unsigned long dst_end = dst_offset + len - 1;
3493 unsigned long src_end = src_offset + len - 1;
3494 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3495 unsigned long dst_i;
3496 unsigned long src_i;
3498 if (src_offset + len > dst->len) {
3499 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3500 src_offset, len, dst->len);
3503 if (dst_offset + len > dst->len) {
3504 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3505 dst_offset, len, dst->len);
3508 if (dst_offset < src_offset) {
3509 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3513 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3514 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3516 dst_off_in_page = (start_offset + dst_end) &
3517 ((unsigned long)PAGE_CACHE_SIZE - 1);
3518 src_off_in_page = (start_offset + src_end) &
3519 ((unsigned long)PAGE_CACHE_SIZE - 1);
3521 cur = min_t(unsigned long, len, src_off_in_page + 1);
3522 cur = min(cur, dst_off_in_page + 1);
3523 move_pages(extent_buffer_page(dst, dst_i),
3524 extent_buffer_page(dst, src_i),
3525 dst_off_in_page - cur + 1,
3526 src_off_in_page - cur + 1, cur);
3533 EXPORT_SYMBOL(memmove_extent_buffer);