1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
19 #include "btrfs_inode.h"
21 /* temporary define until extent_map moves out of btrfs */
22 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
30 static LIST_HEAD(buffers);
31 static LIST_HEAD(states);
35 static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
38 #define BUFFER_LRU_MAX 64
43 struct rb_node rb_node;
46 struct extent_page_data {
48 struct extent_io_tree *tree;
49 get_extent_t *get_extent;
52 int __init extent_io_init(void)
54 extent_state_cache = btrfs_cache_create("extent_state",
55 sizeof(struct extent_state), 0,
57 if (!extent_state_cache)
60 extent_buffer_cache = btrfs_cache_create("extent_buffers",
61 sizeof(struct extent_buffer), 0,
63 if (!extent_buffer_cache)
64 goto free_state_cache;
68 kmem_cache_destroy(extent_state_cache);
72 void extent_io_exit(void)
74 struct extent_state *state;
75 struct extent_buffer *eb;
77 while (!list_empty(&states)) {
78 state = list_entry(states.next, struct extent_state, leak_list);
79 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
80 list_del(&state->leak_list);
81 kmem_cache_free(extent_state_cache, state);
85 while (!list_empty(&buffers)) {
86 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
87 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
88 list_del(&eb->leak_list);
89 kmem_cache_free(extent_buffer_cache, eb);
91 if (extent_state_cache)
92 kmem_cache_destroy(extent_state_cache);
93 if (extent_buffer_cache)
94 kmem_cache_destroy(extent_buffer_cache);
97 void extent_io_tree_init(struct extent_io_tree *tree,
98 struct address_space *mapping, gfp_t mask)
100 tree->state.rb_node = NULL;
101 tree->buffer.rb_node = NULL;
103 tree->dirty_bytes = 0;
104 spin_lock_init(&tree->lock);
105 spin_lock_init(&tree->buffer_lock);
106 tree->mapping = mapping;
108 EXPORT_SYMBOL(extent_io_tree_init);
110 struct extent_state *alloc_extent_state(gfp_t mask)
112 struct extent_state *state;
117 state = kmem_cache_alloc(extent_state_cache, mask);
124 spin_lock_irqsave(&leak_lock, flags);
125 list_add(&state->leak_list, &states);
126 spin_unlock_irqrestore(&leak_lock, flags);
128 atomic_set(&state->refs, 1);
129 init_waitqueue_head(&state->wq);
132 EXPORT_SYMBOL(alloc_extent_state);
134 void free_extent_state(struct extent_state *state)
138 if (atomic_dec_and_test(&state->refs)) {
142 WARN_ON(state->tree);
144 spin_lock_irqsave(&leak_lock, flags);
145 list_del(&state->leak_list);
146 spin_unlock_irqrestore(&leak_lock, flags);
148 kmem_cache_free(extent_state_cache, state);
151 EXPORT_SYMBOL(free_extent_state);
153 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
154 struct rb_node *node)
156 struct rb_node ** p = &root->rb_node;
157 struct rb_node * parent = NULL;
158 struct tree_entry *entry;
162 entry = rb_entry(parent, struct tree_entry, rb_node);
164 if (offset < entry->start)
166 else if (offset > entry->end)
172 entry = rb_entry(node, struct tree_entry, rb_node);
173 rb_link_node(node, parent, p);
174 rb_insert_color(node, root);
178 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
179 struct rb_node **prev_ret,
180 struct rb_node **next_ret)
182 struct rb_root *root = &tree->state;
183 struct rb_node * n = root->rb_node;
184 struct rb_node *prev = NULL;
185 struct rb_node *orig_prev = NULL;
186 struct tree_entry *entry;
187 struct tree_entry *prev_entry = NULL;
190 entry = rb_entry(n, struct tree_entry, rb_node);
194 if (offset < entry->start)
196 else if (offset > entry->end)
205 while(prev && offset > prev_entry->end) {
206 prev = rb_next(prev);
207 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
215 while(prev && offset < prev_entry->start) {
216 prev = rb_prev(prev);
217 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
227 struct rb_node *prev = NULL;
230 ret = __etree_search(tree, offset, &prev, NULL);
237 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
238 u64 offset, struct rb_node *node)
240 struct rb_root *root = &tree->buffer;
241 struct rb_node ** p = &root->rb_node;
242 struct rb_node * parent = NULL;
243 struct extent_buffer *eb;
247 eb = rb_entry(parent, struct extent_buffer, rb_node);
249 if (offset < eb->start)
251 else if (offset > eb->start)
257 rb_link_node(node, parent, p);
258 rb_insert_color(node, root);
262 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
265 struct rb_root *root = &tree->buffer;
266 struct rb_node * n = root->rb_node;
267 struct extent_buffer *eb;
270 eb = rb_entry(n, struct extent_buffer, rb_node);
271 if (offset < eb->start)
273 else if (offset > eb->start)
282 * utility function to look for merge candidates inside a given range.
283 * Any extents with matching state are merged together into a single
284 * extent in the tree. Extents with EXTENT_IO in their state field
285 * are not merged because the end_io handlers need to be able to do
286 * operations on them without sleeping (or doing allocations/splits).
288 * This should be called with the tree lock held.
290 static int merge_state(struct extent_io_tree *tree,
291 struct extent_state *state)
293 struct extent_state *other;
294 struct rb_node *other_node;
296 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
299 other_node = rb_prev(&state->rb_node);
301 other = rb_entry(other_node, struct extent_state, rb_node);
302 if (other->end == state->start - 1 &&
303 other->state == state->state) {
304 state->start = other->start;
306 rb_erase(&other->rb_node, &tree->state);
307 free_extent_state(other);
310 other_node = rb_next(&state->rb_node);
312 other = rb_entry(other_node, struct extent_state, rb_node);
313 if (other->start == state->end + 1 &&
314 other->state == state->state) {
315 other->start = state->start;
317 rb_erase(&state->rb_node, &tree->state);
318 free_extent_state(state);
324 static void set_state_cb(struct extent_io_tree *tree,
325 struct extent_state *state,
328 if (tree->ops && tree->ops->set_bit_hook) {
329 tree->ops->set_bit_hook(tree->mapping->host, state->start,
330 state->end, state->state, bits);
334 static void clear_state_cb(struct extent_io_tree *tree,
335 struct extent_state *state,
338 if (tree->ops && tree->ops->set_bit_hook) {
339 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
340 state->end, state->state, bits);
345 * insert an extent_state struct into the tree. 'bits' are set on the
346 * struct before it is inserted.
348 * This may return -EEXIST if the extent is already there, in which case the
349 * state struct is freed.
351 * The tree lock is not taken internally. This is a utility function and
352 * probably isn't what you want to call (see set/clear_extent_bit).
354 static int insert_state(struct extent_io_tree *tree,
355 struct extent_state *state, u64 start, u64 end,
358 struct rb_node *node;
361 printk("end < start %Lu %Lu\n", end, start);
364 if (bits & EXTENT_DIRTY)
365 tree->dirty_bytes += end - start + 1;
366 set_state_cb(tree, state, bits);
367 state->state |= bits;
368 state->start = start;
370 node = tree_insert(&tree->state, end, &state->rb_node);
372 struct extent_state *found;
373 found = rb_entry(node, struct extent_state, rb_node);
374 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
375 free_extent_state(state);
379 merge_state(tree, state);
384 * split a given extent state struct in two, inserting the preallocated
385 * struct 'prealloc' as the newly created second half. 'split' indicates an
386 * offset inside 'orig' where it should be split.
389 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
390 * are two extent state structs in the tree:
391 * prealloc: [orig->start, split - 1]
392 * orig: [ split, orig->end ]
394 * The tree locks are not taken by this function. They need to be held
397 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
398 struct extent_state *prealloc, u64 split)
400 struct rb_node *node;
401 prealloc->start = orig->start;
402 prealloc->end = split - 1;
403 prealloc->state = orig->state;
406 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
408 struct extent_state *found;
409 found = rb_entry(node, struct extent_state, rb_node);
410 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
411 free_extent_state(prealloc);
414 prealloc->tree = tree;
419 * utility function to clear some bits in an extent state struct.
420 * it will optionally wake up any one waiting on this state (wake == 1), or
421 * forcibly remove the state from the tree (delete == 1).
423 * If no bits are set on the state struct after clearing things, the
424 * struct is freed and removed from the tree
426 static int clear_state_bit(struct extent_io_tree *tree,
427 struct extent_state *state, int bits, int wake,
430 int ret = state->state & bits;
432 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
433 u64 range = state->end - state->start + 1;
434 WARN_ON(range > tree->dirty_bytes);
435 tree->dirty_bytes -= range;
437 clear_state_cb(tree, state, bits);
438 state->state &= ~bits;
441 if (delete || state->state == 0) {
443 clear_state_cb(tree, state, state->state);
444 rb_erase(&state->rb_node, &tree->state);
446 free_extent_state(state);
451 merge_state(tree, state);
457 * clear some bits on a range in the tree. This may require splitting
458 * or inserting elements in the tree, so the gfp mask is used to
459 * indicate which allocations or sleeping are allowed.
461 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
462 * the given range from the tree regardless of state (ie for truncate).
464 * the range [start, end] is inclusive.
466 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
467 * bits were already set, or zero if none of the bits were already set.
469 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
470 int bits, int wake, int delete, gfp_t mask)
472 struct extent_state *state;
473 struct extent_state *prealloc = NULL;
474 struct rb_node *node;
480 if (!prealloc && (mask & __GFP_WAIT)) {
481 prealloc = alloc_extent_state(mask);
486 spin_lock_irqsave(&tree->lock, flags);
488 * this search will find the extents that end after
491 node = tree_search(tree, start);
494 state = rb_entry(node, struct extent_state, rb_node);
495 if (state->start > end)
497 WARN_ON(state->end < start);
500 * | ---- desired range ---- |
502 * | ------------- state -------------- |
504 * We need to split the extent we found, and may flip
505 * bits on second half.
507 * If the extent we found extends past our range, we
508 * just split and search again. It'll get split again
509 * the next time though.
511 * If the extent we found is inside our range, we clear
512 * the desired bit on it.
515 if (state->start < start) {
517 prealloc = alloc_extent_state(GFP_ATOMIC);
518 err = split_state(tree, state, prealloc, start);
519 BUG_ON(err == -EEXIST);
523 if (state->end <= end) {
524 start = state->end + 1;
525 set |= clear_state_bit(tree, state, bits,
528 start = state->start;
533 * | ---- desired range ---- |
535 * We need to split the extent, and clear the bit
538 if (state->start <= end && state->end > end) {
540 prealloc = alloc_extent_state(GFP_ATOMIC);
541 err = split_state(tree, state, prealloc, end + 1);
542 BUG_ON(err == -EEXIST);
546 set |= clear_state_bit(tree, prealloc, bits,
552 start = state->end + 1;
553 set |= clear_state_bit(tree, state, bits, wake, delete);
557 spin_unlock_irqrestore(&tree->lock, flags);
559 free_extent_state(prealloc);
566 spin_unlock_irqrestore(&tree->lock, flags);
567 if (mask & __GFP_WAIT)
571 EXPORT_SYMBOL(clear_extent_bit);
573 static int wait_on_state(struct extent_io_tree *tree,
574 struct extent_state *state)
577 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
578 spin_unlock_irq(&tree->lock);
580 spin_lock_irq(&tree->lock);
581 finish_wait(&state->wq, &wait);
586 * waits for one or more bits to clear on a range in the state tree.
587 * The range [start, end] is inclusive.
588 * The tree lock is taken by this function
590 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
592 struct extent_state *state;
593 struct rb_node *node;
595 spin_lock_irq(&tree->lock);
599 * this search will find all the extents that end after
602 node = tree_search(tree, start);
606 state = rb_entry(node, struct extent_state, rb_node);
608 if (state->start > end)
611 if (state->state & bits) {
612 start = state->start;
613 atomic_inc(&state->refs);
614 wait_on_state(tree, state);
615 free_extent_state(state);
618 start = state->end + 1;
623 if (need_resched()) {
624 spin_unlock_irq(&tree->lock);
626 spin_lock_irq(&tree->lock);
630 spin_unlock_irq(&tree->lock);
633 EXPORT_SYMBOL(wait_extent_bit);
635 static void set_state_bits(struct extent_io_tree *tree,
636 struct extent_state *state,
639 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
640 u64 range = state->end - state->start + 1;
641 tree->dirty_bytes += range;
643 set_state_cb(tree, state, bits);
644 state->state |= bits;
648 * set some bits on a range in the tree. This may require allocations
649 * or sleeping, so the gfp mask is used to indicate what is allowed.
651 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
652 * range already has the desired bits set. The start of the existing
653 * range is returned in failed_start in this case.
655 * [start, end] is inclusive
656 * This takes the tree lock.
658 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
659 int exclusive, u64 *failed_start, gfp_t mask)
661 struct extent_state *state;
662 struct extent_state *prealloc = NULL;
663 struct rb_node *node;
670 if (!prealloc && (mask & __GFP_WAIT)) {
671 prealloc = alloc_extent_state(mask);
676 spin_lock_irqsave(&tree->lock, flags);
678 * this search will find all the extents that end after
681 node = tree_search(tree, start);
683 err = insert_state(tree, prealloc, start, end, bits);
685 BUG_ON(err == -EEXIST);
689 state = rb_entry(node, struct extent_state, rb_node);
690 last_start = state->start;
691 last_end = state->end;
694 * | ---- desired range ---- |
697 * Just lock what we found and keep going
699 if (state->start == start && state->end <= end) {
700 set = state->state & bits;
701 if (set && exclusive) {
702 *failed_start = state->start;
706 set_state_bits(tree, state, bits);
707 start = state->end + 1;
708 merge_state(tree, state);
713 * | ---- desired range ---- |
716 * | ------------- state -------------- |
718 * We need to split the extent we found, and may flip bits on
721 * If the extent we found extends past our
722 * range, we just split and search again. It'll get split
723 * again the next time though.
725 * If the extent we found is inside our range, we set the
728 if (state->start < start) {
729 set = state->state & bits;
730 if (exclusive && set) {
731 *failed_start = start;
735 err = split_state(tree, state, prealloc, start);
736 BUG_ON(err == -EEXIST);
740 if (state->end <= end) {
741 set_state_bits(tree, state, bits);
742 start = state->end + 1;
743 merge_state(tree, state);
745 start = state->start;
750 * | ---- desired range ---- |
751 * | state | or | state |
753 * There's a hole, we need to insert something in it and
754 * ignore the extent we found.
756 if (state->start > start) {
758 if (end < last_start)
761 this_end = last_start -1;
762 err = insert_state(tree, prealloc, start, this_end,
765 BUG_ON(err == -EEXIST);
768 start = this_end + 1;
772 * | ---- desired range ---- |
774 * We need to split the extent, and set the bit
777 if (state->start <= end && state->end > end) {
778 set = state->state & bits;
779 if (exclusive && set) {
780 *failed_start = start;
784 err = split_state(tree, state, prealloc, end + 1);
785 BUG_ON(err == -EEXIST);
787 set_state_bits(tree, prealloc, bits);
788 merge_state(tree, prealloc);
796 spin_unlock_irqrestore(&tree->lock, flags);
798 free_extent_state(prealloc);
805 spin_unlock_irqrestore(&tree->lock, flags);
806 if (mask & __GFP_WAIT)
810 EXPORT_SYMBOL(set_extent_bit);
812 /* wrappers around set/clear extent bit */
813 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
816 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
819 EXPORT_SYMBOL(set_extent_dirty);
821 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
824 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
826 EXPORT_SYMBOL(set_extent_ordered);
828 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
829 int bits, gfp_t mask)
831 return set_extent_bit(tree, start, end, bits, 0, NULL,
834 EXPORT_SYMBOL(set_extent_bits);
836 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
837 int bits, gfp_t mask)
839 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
841 EXPORT_SYMBOL(clear_extent_bits);
843 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
846 return set_extent_bit(tree, start, end,
847 EXTENT_DELALLOC | EXTENT_DIRTY,
850 EXPORT_SYMBOL(set_extent_delalloc);
852 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
855 return clear_extent_bit(tree, start, end,
856 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
858 EXPORT_SYMBOL(clear_extent_dirty);
860 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
863 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
865 EXPORT_SYMBOL(clear_extent_ordered);
867 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
870 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
873 EXPORT_SYMBOL(set_extent_new);
875 int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
878 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
880 EXPORT_SYMBOL(clear_extent_new);
882 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
885 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
888 EXPORT_SYMBOL(set_extent_uptodate);
890 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
893 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
895 EXPORT_SYMBOL(clear_extent_uptodate);
897 int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
900 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
903 EXPORT_SYMBOL(set_extent_writeback);
905 int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
908 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
910 EXPORT_SYMBOL(clear_extent_writeback);
912 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
914 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
916 EXPORT_SYMBOL(wait_on_extent_writeback);
919 * either insert or lock state struct between start and end use mask to tell
920 * us if waiting is desired.
922 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
927 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
928 &failed_start, mask);
929 if (err == -EEXIST && (mask & __GFP_WAIT)) {
930 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
931 start = failed_start;
935 WARN_ON(start > end);
939 EXPORT_SYMBOL(lock_extent);
941 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
947 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
948 &failed_start, mask);
953 EXPORT_SYMBOL(try_lock_extent);
955 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
958 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
960 EXPORT_SYMBOL(unlock_extent);
963 * helper function to set pages and extents in the tree dirty
965 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
967 unsigned long index = start >> PAGE_CACHE_SHIFT;
968 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
971 while (index <= end_index) {
972 page = find_get_page(tree->mapping, index);
974 __set_page_dirty_nobuffers(page);
975 page_cache_release(page);
978 set_extent_dirty(tree, start, end, GFP_NOFS);
981 EXPORT_SYMBOL(set_range_dirty);
984 * helper function to set both pages and extents in the tree writeback
986 int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
988 unsigned long index = start >> PAGE_CACHE_SHIFT;
989 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
992 while (index <= end_index) {
993 page = find_get_page(tree->mapping, index);
995 set_page_writeback(page);
996 page_cache_release(page);
999 set_extent_writeback(tree, start, end, GFP_NOFS);
1002 EXPORT_SYMBOL(set_range_writeback);
1005 * find the first offset in the io tree with 'bits' set. zero is
1006 * returned if we find something, and *start_ret and *end_ret are
1007 * set to reflect the state struct that was found.
1009 * If nothing was found, 1 is returned, < 0 on error
1011 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1012 u64 *start_ret, u64 *end_ret, int bits)
1014 struct rb_node *node;
1015 struct extent_state *state;
1018 spin_lock_irq(&tree->lock);
1020 * this search will find all the extents that end after
1023 node = tree_search(tree, start);
1029 state = rb_entry(node, struct extent_state, rb_node);
1030 if (state->end >= start && (state->state & bits)) {
1031 *start_ret = state->start;
1032 *end_ret = state->end;
1036 node = rb_next(node);
1041 spin_unlock_irq(&tree->lock);
1044 EXPORT_SYMBOL(find_first_extent_bit);
1046 /* find the first state struct with 'bits' set after 'start', and
1047 * return it. tree->lock must be held. NULL will returned if
1048 * nothing was found after 'start'
1050 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1051 u64 start, int bits)
1053 struct rb_node *node;
1054 struct extent_state *state;
1057 * this search will find all the extents that end after
1060 node = tree_search(tree, start);
1066 state = rb_entry(node, struct extent_state, rb_node);
1067 if (state->end >= start && (state->state & bits)) {
1070 node = rb_next(node);
1077 EXPORT_SYMBOL(find_first_extent_bit_state);
1080 * find a contiguous range of bytes in the file marked as delalloc, not
1081 * more than 'max_bytes'. start and end are used to return the range,
1083 * 1 is returned if we find something, 0 if nothing was in the tree
1085 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1086 u64 *start, u64 *end, u64 max_bytes)
1088 struct rb_node *node;
1089 struct extent_state *state;
1090 u64 cur_start = *start;
1092 u64 total_bytes = 0;
1094 spin_lock_irq(&tree->lock);
1097 * this search will find all the extents that end after
1100 node = tree_search(tree, cur_start);
1108 state = rb_entry(node, struct extent_state, rb_node);
1109 if (found && (state->start != cur_start ||
1110 (state->state & EXTENT_BOUNDARY))) {
1113 if (!(state->state & EXTENT_DELALLOC)) {
1119 *start = state->start;
1122 cur_start = state->end + 1;
1123 node = rb_next(node);
1126 total_bytes += state->end - state->start + 1;
1127 if (total_bytes >= max_bytes)
1131 spin_unlock_irq(&tree->lock);
1135 static noinline int __unlock_for_delalloc(struct inode *inode,
1136 struct page *locked_page,
1140 struct page *pages[16];
1141 unsigned long index = start >> PAGE_CACHE_SHIFT;
1142 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1143 unsigned long nr_pages = end_index - index + 1;
1146 if (index == locked_page->index && end_index == index)
1149 while(nr_pages > 0) {
1150 ret = find_get_pages_contig(inode->i_mapping, index,
1151 min(nr_pages, ARRAY_SIZE(pages)), pages);
1152 for (i = 0; i < ret; i++) {
1153 if (pages[i] != locked_page)
1154 unlock_page(pages[i]);
1155 page_cache_release(pages[i]);
1164 static noinline int lock_delalloc_pages(struct inode *inode,
1165 struct page *locked_page,
1169 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1170 unsigned long start_index = index;
1171 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1172 unsigned long pages_locked = 0;
1173 struct page *pages[16];
1174 unsigned long nrpages;
1178 /* the caller is responsible for locking the start index */
1179 if (index == locked_page->index && index == end_index)
1182 /* skip the page at the start index */
1183 nrpages = end_index - index + 1;
1184 while(nrpages > 0) {
1185 ret = find_get_pages_contig(inode->i_mapping, index,
1186 min(nrpages, ARRAY_SIZE(pages)), pages);
1191 /* now we have an array of pages, lock them all */
1192 for (i = 0; i < ret; i++) {
1194 * the caller is taking responsibility for
1197 if (pages[i] != locked_page)
1198 lock_page(pages[i]);
1199 page_cache_release(pages[i]);
1201 pages_locked += ret;
1208 if (ret && pages_locked) {
1209 __unlock_for_delalloc(inode, locked_page,
1211 ((u64)(start_index + pages_locked - 1)) <<
1218 * find a contiguous range of bytes in the file marked as delalloc, not
1219 * more than 'max_bytes'. start and end are used to return the range,
1221 * 1 is returned if we find something, 0 if nothing was in the tree
1223 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1224 struct extent_io_tree *tree,
1225 struct page *locked_page,
1226 u64 *start, u64 *end,
1236 /* step one, find a bunch of delalloc bytes starting at start */
1237 delalloc_start = *start;
1239 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1242 *start = delalloc_start;
1243 *end = delalloc_end;
1248 * make sure to limit the number of pages we try to lock down
1251 if (delalloc_end + 1 - delalloc_start > max_bytes && loops) {
1252 delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) &
1253 ~((u64)PAGE_CACHE_SIZE - 1);
1255 /* step two, lock all the pages after the page that has start */
1256 ret = lock_delalloc_pages(inode, locked_page,
1257 delalloc_start, delalloc_end);
1258 if (ret == -EAGAIN) {
1259 /* some of the pages are gone, lets avoid looping by
1260 * shortening the size of the delalloc range we're searching
1263 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1264 max_bytes = PAGE_CACHE_SIZE - offset;
1274 /* step three, lock the state bits for the whole range */
1275 lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1277 /* then test to make sure it is all still delalloc */
1278 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1279 EXTENT_DELALLOC, 1);
1281 unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1282 __unlock_for_delalloc(inode, locked_page,
1283 delalloc_start, delalloc_end);
1287 *start = delalloc_start;
1288 *end = delalloc_end;
1293 int extent_clear_unlock_delalloc(struct inode *inode,
1294 struct extent_io_tree *tree,
1295 u64 start, u64 end, struct page *locked_page,
1296 int clear_dirty, int set_writeback,
1300 struct page *pages[16];
1301 unsigned long index = start >> PAGE_CACHE_SHIFT;
1302 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1303 unsigned long nr_pages = end_index - index + 1;
1305 int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
1308 clear_bits |= EXTENT_DIRTY;
1310 clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
1312 while(nr_pages > 0) {
1313 ret = find_get_pages_contig(inode->i_mapping, index,
1314 min(nr_pages, ARRAY_SIZE(pages)), pages);
1315 for (i = 0; i < ret; i++) {
1316 if (pages[i] == locked_page) {
1317 page_cache_release(pages[i]);
1321 clear_page_dirty_for_io(pages[i]);
1323 set_page_writeback(pages[i]);
1325 end_page_writeback(pages[i]);
1326 unlock_page(pages[i]);
1327 page_cache_release(pages[i]);
1335 EXPORT_SYMBOL(extent_clear_unlock_delalloc);
1338 * count the number of bytes in the tree that have a given bit(s)
1339 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1340 * cached. The total number found is returned.
1342 u64 count_range_bits(struct extent_io_tree *tree,
1343 u64 *start, u64 search_end, u64 max_bytes,
1346 struct rb_node *node;
1347 struct extent_state *state;
1348 u64 cur_start = *start;
1349 u64 total_bytes = 0;
1352 if (search_end <= cur_start) {
1353 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1358 spin_lock_irq(&tree->lock);
1359 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1360 total_bytes = tree->dirty_bytes;
1364 * this search will find all the extents that end after
1367 node = tree_search(tree, cur_start);
1373 state = rb_entry(node, struct extent_state, rb_node);
1374 if (state->start > search_end)
1376 if (state->end >= cur_start && (state->state & bits)) {
1377 total_bytes += min(search_end, state->end) + 1 -
1378 max(cur_start, state->start);
1379 if (total_bytes >= max_bytes)
1382 *start = state->start;
1386 node = rb_next(node);
1391 spin_unlock_irq(&tree->lock);
1395 * helper function to lock both pages and extents in the tree.
1396 * pages must be locked first.
1398 int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1400 unsigned long index = start >> PAGE_CACHE_SHIFT;
1401 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1405 while (index <= end_index) {
1406 page = grab_cache_page(tree->mapping, index);
1412 err = PTR_ERR(page);
1417 lock_extent(tree, start, end, GFP_NOFS);
1422 * we failed above in getting the page at 'index', so we undo here
1423 * up to but not including the page at 'index'
1426 index = start >> PAGE_CACHE_SHIFT;
1427 while (index < end_index) {
1428 page = find_get_page(tree->mapping, index);
1430 page_cache_release(page);
1435 EXPORT_SYMBOL(lock_range);
1438 * helper function to unlock both pages and extents in the tree.
1440 int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1442 unsigned long index = start >> PAGE_CACHE_SHIFT;
1443 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1446 while (index <= end_index) {
1447 page = find_get_page(tree->mapping, index);
1449 page_cache_release(page);
1452 unlock_extent(tree, start, end, GFP_NOFS);
1455 EXPORT_SYMBOL(unlock_range);
1458 * set the private field for a given byte offset in the tree. If there isn't
1459 * an extent_state there already, this does nothing.
1461 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1463 struct rb_node *node;
1464 struct extent_state *state;
1467 spin_lock_irq(&tree->lock);
1469 * this search will find all the extents that end after
1472 node = tree_search(tree, start);
1477 state = rb_entry(node, struct extent_state, rb_node);
1478 if (state->start != start) {
1482 state->private = private;
1484 spin_unlock_irq(&tree->lock);
1488 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1490 struct rb_node *node;
1491 struct extent_state *state;
1494 spin_lock_irq(&tree->lock);
1496 * this search will find all the extents that end after
1499 node = tree_search(tree, start);
1504 state = rb_entry(node, struct extent_state, rb_node);
1505 if (state->start != start) {
1509 *private = state->private;
1511 spin_unlock_irq(&tree->lock);
1516 * searches a range in the state tree for a given mask.
1517 * If 'filled' == 1, this returns 1 only if every extent in the tree
1518 * has the bits set. Otherwise, 1 is returned if any bit in the
1519 * range is found set.
1521 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1522 int bits, int filled)
1524 struct extent_state *state = NULL;
1525 struct rb_node *node;
1527 unsigned long flags;
1529 spin_lock_irqsave(&tree->lock, flags);
1530 node = tree_search(tree, start);
1531 while (node && start <= end) {
1532 state = rb_entry(node, struct extent_state, rb_node);
1534 if (filled && state->start > start) {
1539 if (state->start > end)
1542 if (state->state & bits) {
1546 } else if (filled) {
1550 start = state->end + 1;
1553 node = rb_next(node);
1560 spin_unlock_irqrestore(&tree->lock, flags);
1563 EXPORT_SYMBOL(test_range_bit);
1566 * helper function to set a given page up to date if all the
1567 * extents in the tree for that page are up to date
1569 static int check_page_uptodate(struct extent_io_tree *tree,
1572 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1573 u64 end = start + PAGE_CACHE_SIZE - 1;
1574 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1575 SetPageUptodate(page);
1580 * helper function to unlock a page if all the extents in the tree
1581 * for that page are unlocked
1583 static int check_page_locked(struct extent_io_tree *tree,
1586 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1587 u64 end = start + PAGE_CACHE_SIZE - 1;
1588 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1594 * helper function to end page writeback if all the extents
1595 * in the tree for that page are done with writeback
1597 static int check_page_writeback(struct extent_io_tree *tree,
1600 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1601 u64 end = start + PAGE_CACHE_SIZE - 1;
1602 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1603 end_page_writeback(page);
1607 /* lots and lots of room for performance fixes in the end_bio funcs */
1610 * after a writepage IO is done, we need to:
1611 * clear the uptodate bits on error
1612 * clear the writeback bits in the extent tree for this IO
1613 * end_page_writeback if the page has no more pending IO
1615 * Scheduling is not allowed, so the extent state tree is expected
1616 * to have one and only one object corresponding to this IO.
1618 static void end_bio_extent_writepage(struct bio *bio, int err)
1620 int uptodate = err == 0;
1621 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1622 struct extent_io_tree *tree;
1629 struct page *page = bvec->bv_page;
1630 tree = &BTRFS_I(page->mapping->host)->io_tree;
1632 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1634 end = start + bvec->bv_len - 1;
1636 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1641 if (--bvec >= bio->bi_io_vec)
1642 prefetchw(&bvec->bv_page->flags);
1643 if (tree->ops && tree->ops->writepage_end_io_hook) {
1644 ret = tree->ops->writepage_end_io_hook(page, start,
1645 end, NULL, uptodate);
1650 if (!uptodate && tree->ops &&
1651 tree->ops->writepage_io_failed_hook) {
1652 ret = tree->ops->writepage_io_failed_hook(bio, page,
1655 uptodate = (err == 0);
1661 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1662 ClearPageUptodate(page);
1666 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1669 end_page_writeback(page);
1671 check_page_writeback(tree, page);
1672 } while (bvec >= bio->bi_io_vec);
1678 * after a readpage IO is done, we need to:
1679 * clear the uptodate bits on error
1680 * set the uptodate bits if things worked
1681 * set the page up to date if all extents in the tree are uptodate
1682 * clear the lock bit in the extent tree
1683 * unlock the page if there are no other extents locked for it
1685 * Scheduling is not allowed, so the extent state tree is expected
1686 * to have one and only one object corresponding to this IO.
1688 static void end_bio_extent_readpage(struct bio *bio, int err)
1690 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1691 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1692 struct extent_io_tree *tree;
1699 struct page *page = bvec->bv_page;
1700 tree = &BTRFS_I(page->mapping->host)->io_tree;
1702 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1704 end = start + bvec->bv_len - 1;
1706 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1711 if (--bvec >= bio->bi_io_vec)
1712 prefetchw(&bvec->bv_page->flags);
1714 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1715 ret = tree->ops->readpage_end_io_hook(page, start, end,
1720 if (!uptodate && tree->ops &&
1721 tree->ops->readpage_io_failed_hook) {
1722 ret = tree->ops->readpage_io_failed_hook(bio, page,
1726 test_bit(BIO_UPTODATE, &bio->bi_flags);
1732 set_extent_uptodate(tree, start, end,
1734 unlock_extent(tree, start, end, GFP_ATOMIC);
1738 SetPageUptodate(page);
1740 ClearPageUptodate(page);
1746 check_page_uptodate(tree, page);
1748 ClearPageUptodate(page);
1751 check_page_locked(tree, page);
1753 } while (bvec >= bio->bi_io_vec);
1759 * IO done from prepare_write is pretty simple, we just unlock
1760 * the structs in the extent tree when done, and set the uptodate bits
1763 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1765 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1766 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1767 struct extent_io_tree *tree;
1772 struct page *page = bvec->bv_page;
1773 tree = &BTRFS_I(page->mapping->host)->io_tree;
1775 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1777 end = start + bvec->bv_len - 1;
1779 if (--bvec >= bio->bi_io_vec)
1780 prefetchw(&bvec->bv_page->flags);
1783 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1785 ClearPageUptodate(page);
1789 unlock_extent(tree, start, end, GFP_ATOMIC);
1791 } while (bvec >= bio->bi_io_vec);
1797 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1802 bio = bio_alloc(gfp_flags, nr_vecs);
1804 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1805 while (!bio && (nr_vecs /= 2))
1806 bio = bio_alloc(gfp_flags, nr_vecs);
1811 bio->bi_bdev = bdev;
1812 bio->bi_sector = first_sector;
1817 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1818 unsigned long bio_flags)
1821 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1822 struct page *page = bvec->bv_page;
1823 struct extent_io_tree *tree = bio->bi_private;
1827 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1828 end = start + bvec->bv_len - 1;
1830 bio->bi_private = NULL;
1834 if (tree->ops && tree->ops->submit_bio_hook)
1835 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1836 mirror_num, bio_flags);
1838 submit_bio(rw, bio);
1839 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1845 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1846 struct page *page, sector_t sector,
1847 size_t size, unsigned long offset,
1848 struct block_device *bdev,
1849 struct bio **bio_ret,
1850 unsigned long max_pages,
1851 bio_end_io_t end_io_func,
1853 unsigned long prev_bio_flags,
1854 unsigned long bio_flags)
1860 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1861 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1862 size_t page_size = min(size, PAGE_CACHE_SIZE);
1864 if (bio_ret && *bio_ret) {
1867 contig = bio->bi_sector == sector;
1869 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1872 if (prev_bio_flags != bio_flags || !contig ||
1873 (tree->ops && tree->ops->merge_bio_hook &&
1874 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1876 bio_add_page(bio, page, page_size, offset) < page_size) {
1877 ret = submit_one_bio(rw, bio, mirror_num,
1884 if (this_compressed)
1887 nr = bio_get_nr_vecs(bdev);
1889 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1891 printk("failed to allocate bio nr %d\n", nr);
1894 bio_add_page(bio, page, page_size, offset);
1895 bio->bi_end_io = end_io_func;
1896 bio->bi_private = tree;
1901 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1907 void set_page_extent_mapped(struct page *page)
1909 if (!PagePrivate(page)) {
1910 SetPagePrivate(page);
1911 page_cache_get(page);
1912 set_page_private(page, EXTENT_PAGE_PRIVATE);
1916 void set_page_extent_head(struct page *page, unsigned long len)
1918 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1922 * basic readpage implementation. Locked extent state structs are inserted
1923 * into the tree that are removed when the IO is done (by the end_io
1926 static int __extent_read_full_page(struct extent_io_tree *tree,
1928 get_extent_t *get_extent,
1929 struct bio **bio, int mirror_num,
1930 unsigned long *bio_flags)
1932 struct inode *inode = page->mapping->host;
1933 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1934 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1938 u64 last_byte = i_size_read(inode);
1942 struct extent_map *em;
1943 struct block_device *bdev;
1946 size_t page_offset = 0;
1948 size_t disk_io_size;
1949 size_t blocksize = inode->i_sb->s_blocksize;
1950 unsigned long this_bio_flag = 0;
1952 set_page_extent_mapped(page);
1955 lock_extent(tree, start, end, GFP_NOFS);
1957 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1959 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1962 iosize = PAGE_CACHE_SIZE - zero_offset;
1963 userpage = kmap_atomic(page, KM_USER0);
1964 memset(userpage + zero_offset, 0, iosize);
1965 flush_dcache_page(page);
1966 kunmap_atomic(userpage, KM_USER0);
1969 while (cur <= end) {
1970 if (cur >= last_byte) {
1972 iosize = PAGE_CACHE_SIZE - page_offset;
1973 userpage = kmap_atomic(page, KM_USER0);
1974 memset(userpage + page_offset, 0, iosize);
1975 flush_dcache_page(page);
1976 kunmap_atomic(userpage, KM_USER0);
1977 set_extent_uptodate(tree, cur, cur + iosize - 1,
1979 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1982 em = get_extent(inode, page, page_offset, cur,
1984 if (IS_ERR(em) || !em) {
1986 unlock_extent(tree, cur, end, GFP_NOFS);
1989 extent_offset = cur - em->start;
1990 if (extent_map_end(em) <= cur) {
1991 printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1993 BUG_ON(extent_map_end(em) <= cur);
1995 printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1999 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2000 this_bio_flag = EXTENT_BIO_COMPRESSED;
2002 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2003 cur_end = min(extent_map_end(em) - 1, end);
2004 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2005 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2006 disk_io_size = em->block_len;
2007 sector = em->block_start >> 9;
2009 sector = (em->block_start + extent_offset) >> 9;
2010 disk_io_size = iosize;
2013 block_start = em->block_start;
2014 free_extent_map(em);
2017 /* we've found a hole, just zero and go on */
2018 if (block_start == EXTENT_MAP_HOLE) {
2020 userpage = kmap_atomic(page, KM_USER0);
2021 memset(userpage + page_offset, 0, iosize);
2022 flush_dcache_page(page);
2023 kunmap_atomic(userpage, KM_USER0);
2025 set_extent_uptodate(tree, cur, cur + iosize - 1,
2027 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2029 page_offset += iosize;
2032 /* the get_extent function already copied into the page */
2033 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
2034 check_page_uptodate(tree, page);
2035 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2037 page_offset += iosize;
2040 /* we have an inline extent but it didn't get marked up
2041 * to date. Error out
2043 if (block_start == EXTENT_MAP_INLINE) {
2045 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2047 page_offset += iosize;
2052 if (tree->ops && tree->ops->readpage_io_hook) {
2053 ret = tree->ops->readpage_io_hook(page, cur,
2057 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2059 ret = submit_extent_page(READ, tree, page,
2060 sector, disk_io_size, page_offset,
2062 end_bio_extent_readpage, mirror_num,
2066 *bio_flags = this_bio_flag;
2071 page_offset += iosize;
2074 if (!PageError(page))
2075 SetPageUptodate(page);
2081 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2082 get_extent_t *get_extent)
2084 struct bio *bio = NULL;
2085 unsigned long bio_flags = 0;
2088 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2091 submit_one_bio(READ, bio, 0, bio_flags);
2094 EXPORT_SYMBOL(extent_read_full_page);
2097 * the writepage semantics are similar to regular writepage. extent
2098 * records are inserted to lock ranges in the tree, and as dirty areas
2099 * are found, they are marked writeback. Then the lock bits are removed
2100 * and the end_io handler clears the writeback ranges
2102 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2105 struct inode *inode = page->mapping->host;
2106 struct extent_page_data *epd = data;
2107 struct extent_io_tree *tree = epd->tree;
2108 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2110 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2114 u64 last_byte = i_size_read(inode);
2119 struct extent_map *em;
2120 struct block_device *bdev;
2123 size_t pg_offset = 0;
2125 loff_t i_size = i_size_read(inode);
2126 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2132 WARN_ON(!PageLocked(page));
2133 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2134 if (page->index > end_index ||
2135 (page->index == end_index && !pg_offset)) {
2136 page->mapping->a_ops->invalidatepage(page, 0);
2141 if (page->index == end_index) {
2144 userpage = kmap_atomic(page, KM_USER0);
2145 memset(userpage + pg_offset, 0,
2146 PAGE_CACHE_SIZE - pg_offset);
2147 kunmap_atomic(userpage, KM_USER0);
2148 flush_dcache_page(page);
2152 set_page_extent_mapped(page);
2154 delalloc_start = start;
2157 while(delalloc_end < page_end) {
2158 nr_delalloc = find_lock_delalloc_range(inode, tree,
2163 if (nr_delalloc == 0) {
2164 delalloc_start = delalloc_end + 1;
2167 tree->ops->fill_delalloc(inode, page, delalloc_start,
2168 delalloc_end, &page_started);
2169 delalloc_start = delalloc_end + 1;
2172 /* did the fill delalloc function already unlock and start the IO? */
2177 lock_extent(tree, start, page_end, GFP_NOFS);
2178 unlock_start = start;
2180 if (tree->ops && tree->ops->writepage_start_hook) {
2181 ret = tree->ops->writepage_start_hook(page, start,
2183 if (ret == -EAGAIN) {
2184 unlock_extent(tree, start, page_end, GFP_NOFS);
2185 redirty_page_for_writepage(wbc, page);
2192 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
2193 printk("found delalloc bits after lock_extent\n");
2196 if (last_byte <= start) {
2197 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2198 unlock_extent(tree, start, page_end, GFP_NOFS);
2199 if (tree->ops && tree->ops->writepage_end_io_hook)
2200 tree->ops->writepage_end_io_hook(page, start,
2202 unlock_start = page_end + 1;
2206 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2207 blocksize = inode->i_sb->s_blocksize;
2209 while (cur <= end) {
2210 if (cur >= last_byte) {
2211 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2212 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2213 if (tree->ops && tree->ops->writepage_end_io_hook)
2214 tree->ops->writepage_end_io_hook(page, cur,
2216 unlock_start = page_end + 1;
2219 em = epd->get_extent(inode, page, pg_offset, cur,
2221 if (IS_ERR(em) || !em) {
2226 extent_offset = cur - em->start;
2227 BUG_ON(extent_map_end(em) <= cur);
2229 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2230 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2231 sector = (em->block_start + extent_offset) >> 9;
2233 block_start = em->block_start;
2234 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2235 free_extent_map(em);
2239 * compressed and inline extents are written through other
2242 if (compressed || block_start == EXTENT_MAP_HOLE ||
2243 block_start == EXTENT_MAP_INLINE) {
2244 clear_extent_dirty(tree, cur,
2245 cur + iosize - 1, GFP_NOFS);
2247 unlock_extent(tree, unlock_start, cur + iosize -1,
2251 * end_io notification does not happen here for
2252 * compressed extents
2254 if (!compressed && tree->ops &&
2255 tree->ops->writepage_end_io_hook)
2256 tree->ops->writepage_end_io_hook(page, cur,
2259 else if (compressed) {
2260 /* we don't want to end_page_writeback on
2261 * a compressed extent. this happens
2268 pg_offset += iosize;
2272 /* leave this out until we have a page_mkwrite call */
2273 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2276 pg_offset += iosize;
2280 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2281 if (tree->ops && tree->ops->writepage_io_hook) {
2282 ret = tree->ops->writepage_io_hook(page, cur,
2290 unsigned long max_nr = end_index + 1;
2292 set_range_writeback(tree, cur, cur + iosize - 1);
2293 if (!PageWriteback(page)) {
2294 printk("warning page %lu not writeback, "
2295 "cur %llu end %llu\n", page->index,
2296 (unsigned long long)cur,
2297 (unsigned long long)end);
2300 ret = submit_extent_page(WRITE, tree, page, sector,
2301 iosize, pg_offset, bdev,
2303 end_bio_extent_writepage,
2309 pg_offset += iosize;
2314 /* make sure the mapping tag for page dirty gets cleared */
2315 set_page_writeback(page);
2316 end_page_writeback(page);
2318 if (unlock_start <= page_end)
2319 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2325 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2326 * @mapping: address space structure to write
2327 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2328 * @writepage: function called for each page
2329 * @data: data passed to writepage function
2331 * If a page is already under I/O, write_cache_pages() skips it, even
2332 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2333 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2334 * and msync() need to guarantee that all the data which was dirty at the time
2335 * the call was made get new I/O started against them. If wbc->sync_mode is
2336 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2337 * existing IO to complete.
2339 int extent_write_cache_pages(struct extent_io_tree *tree,
2340 struct address_space *mapping,
2341 struct writeback_control *wbc,
2342 writepage_t writepage, void *data)
2344 struct backing_dev_info *bdi = mapping->backing_dev_info;
2347 struct pagevec pvec;
2350 pgoff_t end; /* Inclusive */
2352 int range_whole = 0;
2354 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2355 wbc->encountered_congestion = 1;
2359 pagevec_init(&pvec, 0);
2360 if (wbc->range_cyclic) {
2361 index = mapping->writeback_index; /* Start from prev offset */
2364 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2365 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2366 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2371 while (!done && (index <= end) &&
2372 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2373 PAGECACHE_TAG_DIRTY,
2374 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2378 for (i = 0; i < nr_pages; i++) {
2379 struct page *page = pvec.pages[i];
2382 * At this point we hold neither mapping->tree_lock nor
2383 * lock on the page itself: the page may be truncated or
2384 * invalidated (changing page->mapping to NULL), or even
2385 * swizzled back from swapper_space to tmpfs file
2388 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2389 tree->ops->write_cache_pages_lock_hook(page);
2393 if (unlikely(page->mapping != mapping)) {
2398 if (!wbc->range_cyclic && page->index > end) {
2404 if (wbc->sync_mode != WB_SYNC_NONE)
2405 wait_on_page_writeback(page);
2407 if (PageWriteback(page) ||
2408 !clear_page_dirty_for_io(page)) {
2413 ret = (*writepage)(page, wbc, data);
2415 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2419 if (ret || (--(wbc->nr_to_write) <= 0))
2421 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2422 wbc->encountered_congestion = 1;
2426 pagevec_release(&pvec);
2429 if (!scanned && !done) {
2431 * We hit the last page and there is more work to be done: wrap
2432 * back to the start of the file
2438 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2439 mapping->writeback_index = index;
2441 if (wbc->range_cont)
2442 wbc->range_start = index << PAGE_CACHE_SHIFT;
2445 EXPORT_SYMBOL(extent_write_cache_pages);
2447 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2448 get_extent_t *get_extent,
2449 struct writeback_control *wbc)
2452 struct address_space *mapping = page->mapping;
2453 struct extent_page_data epd = {
2456 .get_extent = get_extent,
2458 struct writeback_control wbc_writepages = {
2460 .sync_mode = WB_SYNC_NONE,
2461 .older_than_this = NULL,
2463 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2464 .range_end = (loff_t)-1,
2468 ret = __extent_writepage(page, wbc, &epd);
2470 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2471 __extent_writepage, &epd);
2473 submit_one_bio(WRITE, epd.bio, 0, 0);
2477 EXPORT_SYMBOL(extent_write_full_page);
2480 int extent_writepages(struct extent_io_tree *tree,
2481 struct address_space *mapping,
2482 get_extent_t *get_extent,
2483 struct writeback_control *wbc)
2486 struct extent_page_data epd = {
2489 .get_extent = get_extent,
2492 ret = extent_write_cache_pages(tree, mapping, wbc,
2493 __extent_writepage, &epd);
2495 submit_one_bio(WRITE, epd.bio, 0, 0);
2499 EXPORT_SYMBOL(extent_writepages);
2501 int extent_readpages(struct extent_io_tree *tree,
2502 struct address_space *mapping,
2503 struct list_head *pages, unsigned nr_pages,
2504 get_extent_t get_extent)
2506 struct bio *bio = NULL;
2508 struct pagevec pvec;
2509 unsigned long bio_flags = 0;
2511 pagevec_init(&pvec, 0);
2512 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2513 struct page *page = list_entry(pages->prev, struct page, lru);
2515 prefetchw(&page->flags);
2516 list_del(&page->lru);
2518 * what we want to do here is call add_to_page_cache_lru,
2519 * but that isn't exported, so we reproduce it here
2521 if (!add_to_page_cache(page, mapping,
2522 page->index, GFP_KERNEL)) {
2524 /* open coding of lru_cache_add, also not exported */
2525 page_cache_get(page);
2526 if (!pagevec_add(&pvec, page))
2527 __pagevec_lru_add(&pvec);
2528 __extent_read_full_page(tree, page, get_extent,
2529 &bio, 0, &bio_flags);
2531 page_cache_release(page);
2533 if (pagevec_count(&pvec))
2534 __pagevec_lru_add(&pvec);
2535 BUG_ON(!list_empty(pages));
2537 submit_one_bio(READ, bio, 0, bio_flags);
2540 EXPORT_SYMBOL(extent_readpages);
2543 * basic invalidatepage code, this waits on any locked or writeback
2544 * ranges corresponding to the page, and then deletes any extent state
2545 * records from the tree
2547 int extent_invalidatepage(struct extent_io_tree *tree,
2548 struct page *page, unsigned long offset)
2550 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2551 u64 end = start + PAGE_CACHE_SIZE - 1;
2552 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2554 start += (offset + blocksize -1) & ~(blocksize - 1);
2558 lock_extent(tree, start, end, GFP_NOFS);
2559 wait_on_extent_writeback(tree, start, end);
2560 clear_extent_bit(tree, start, end,
2561 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2565 EXPORT_SYMBOL(extent_invalidatepage);
2568 * simple commit_write call, set_range_dirty is used to mark both
2569 * the pages and the extent records as dirty
2571 int extent_commit_write(struct extent_io_tree *tree,
2572 struct inode *inode, struct page *page,
2573 unsigned from, unsigned to)
2575 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2577 set_page_extent_mapped(page);
2578 set_page_dirty(page);
2580 if (pos > inode->i_size) {
2581 i_size_write(inode, pos);
2582 mark_inode_dirty(inode);
2586 EXPORT_SYMBOL(extent_commit_write);
2588 int extent_prepare_write(struct extent_io_tree *tree,
2589 struct inode *inode, struct page *page,
2590 unsigned from, unsigned to, get_extent_t *get_extent)
2592 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2593 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2595 u64 orig_block_start;
2598 struct extent_map *em;
2599 unsigned blocksize = 1 << inode->i_blkbits;
2600 size_t page_offset = 0;
2601 size_t block_off_start;
2602 size_t block_off_end;
2608 set_page_extent_mapped(page);
2610 block_start = (page_start + from) & ~((u64)blocksize - 1);
2611 block_end = (page_start + to - 1) | (blocksize - 1);
2612 orig_block_start = block_start;
2614 lock_extent(tree, page_start, page_end, GFP_NOFS);
2615 while(block_start <= block_end) {
2616 em = get_extent(inode, page, page_offset, block_start,
2617 block_end - block_start + 1, 1);
2618 if (IS_ERR(em) || !em) {
2621 cur_end = min(block_end, extent_map_end(em) - 1);
2622 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2623 block_off_end = block_off_start + blocksize;
2624 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2626 if (!PageUptodate(page) && isnew &&
2627 (block_off_end > to || block_off_start < from)) {
2630 kaddr = kmap_atomic(page, KM_USER0);
2631 if (block_off_end > to)
2632 memset(kaddr + to, 0, block_off_end - to);
2633 if (block_off_start < from)
2634 memset(kaddr + block_off_start, 0,
2635 from - block_off_start);
2636 flush_dcache_page(page);
2637 kunmap_atomic(kaddr, KM_USER0);
2639 if ((em->block_start != EXTENT_MAP_HOLE &&
2640 em->block_start != EXTENT_MAP_INLINE) &&
2641 !isnew && !PageUptodate(page) &&
2642 (block_off_end > to || block_off_start < from) &&
2643 !test_range_bit(tree, block_start, cur_end,
2644 EXTENT_UPTODATE, 1)) {
2646 u64 extent_offset = block_start - em->start;
2648 sector = (em->block_start + extent_offset) >> 9;
2649 iosize = (cur_end - block_start + blocksize) &
2650 ~((u64)blocksize - 1);
2652 * we've already got the extent locked, but we
2653 * need to split the state such that our end_bio
2654 * handler can clear the lock.
2656 set_extent_bit(tree, block_start,
2657 block_start + iosize - 1,
2658 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2659 ret = submit_extent_page(READ, tree, page,
2660 sector, iosize, page_offset, em->bdev,
2662 end_bio_extent_preparewrite, 0,
2665 block_start = block_start + iosize;
2667 set_extent_uptodate(tree, block_start, cur_end,
2669 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2670 block_start = cur_end + 1;
2672 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2673 free_extent_map(em);
2676 wait_extent_bit(tree, orig_block_start,
2677 block_end, EXTENT_LOCKED);
2679 check_page_uptodate(tree, page);
2681 /* FIXME, zero out newly allocated blocks on error */
2684 EXPORT_SYMBOL(extent_prepare_write);
2687 * a helper for releasepage, this tests for areas of the page that
2688 * are locked or under IO and drops the related state bits if it is safe
2691 int try_release_extent_state(struct extent_map_tree *map,
2692 struct extent_io_tree *tree, struct page *page,
2695 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2696 u64 end = start + PAGE_CACHE_SIZE - 1;
2699 if (test_range_bit(tree, start, end,
2700 EXTENT_IOBITS | EXTENT_ORDERED, 0))
2703 if ((mask & GFP_NOFS) == GFP_NOFS)
2705 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2710 EXPORT_SYMBOL(try_release_extent_state);
2713 * a helper for releasepage. As long as there are no locked extents
2714 * in the range corresponding to the page, both state records and extent
2715 * map records are removed
2717 int try_release_extent_mapping(struct extent_map_tree *map,
2718 struct extent_io_tree *tree, struct page *page,
2721 struct extent_map *em;
2722 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2723 u64 end = start + PAGE_CACHE_SIZE - 1;
2725 if ((mask & __GFP_WAIT) &&
2726 page->mapping->host->i_size > 16 * 1024 * 1024) {
2728 while (start <= end) {
2729 len = end - start + 1;
2730 spin_lock(&map->lock);
2731 em = lookup_extent_mapping(map, start, len);
2732 if (!em || IS_ERR(em)) {
2733 spin_unlock(&map->lock);
2736 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2737 em->start != start) {
2738 spin_unlock(&map->lock);
2739 free_extent_map(em);
2742 if (!test_range_bit(tree, em->start,
2743 extent_map_end(em) - 1,
2744 EXTENT_LOCKED | EXTENT_WRITEBACK |
2747 remove_extent_mapping(map, em);
2748 /* once for the rb tree */
2749 free_extent_map(em);
2751 start = extent_map_end(em);
2752 spin_unlock(&map->lock);
2755 free_extent_map(em);
2758 return try_release_extent_state(map, tree, page, mask);
2760 EXPORT_SYMBOL(try_release_extent_mapping);
2762 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2763 get_extent_t *get_extent)
2765 struct inode *inode = mapping->host;
2766 u64 start = iblock << inode->i_blkbits;
2767 sector_t sector = 0;
2768 struct extent_map *em;
2770 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2771 if (!em || IS_ERR(em))
2774 if (em->block_start == EXTENT_MAP_INLINE ||
2775 em->block_start == EXTENT_MAP_HOLE)
2778 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2780 free_extent_map(em);
2784 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2788 struct address_space *mapping;
2791 return eb->first_page;
2792 i += eb->start >> PAGE_CACHE_SHIFT;
2793 mapping = eb->first_page->mapping;
2798 * extent_buffer_page is only called after pinning the page
2799 * by increasing the reference count. So we know the page must
2800 * be in the radix tree.
2803 p = radix_tree_lookup(&mapping->page_tree, i);
2809 static inline unsigned long num_extent_pages(u64 start, u64 len)
2811 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2812 (start >> PAGE_CACHE_SHIFT);
2815 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2820 struct extent_buffer *eb = NULL;
2822 unsigned long flags;
2825 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2828 mutex_init(&eb->mutex);
2830 spin_lock_irqsave(&leak_lock, flags);
2831 list_add(&eb->leak_list, &buffers);
2832 spin_unlock_irqrestore(&leak_lock, flags);
2834 atomic_set(&eb->refs, 1);
2839 static void __free_extent_buffer(struct extent_buffer *eb)
2842 unsigned long flags;
2843 spin_lock_irqsave(&leak_lock, flags);
2844 list_del(&eb->leak_list);
2845 spin_unlock_irqrestore(&leak_lock, flags);
2847 kmem_cache_free(extent_buffer_cache, eb);
2850 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2851 u64 start, unsigned long len,
2855 unsigned long num_pages = num_extent_pages(start, len);
2857 unsigned long index = start >> PAGE_CACHE_SHIFT;
2858 struct extent_buffer *eb;
2859 struct extent_buffer *exists = NULL;
2861 struct address_space *mapping = tree->mapping;
2864 spin_lock(&tree->buffer_lock);
2865 eb = buffer_search(tree, start);
2867 atomic_inc(&eb->refs);
2868 spin_unlock(&tree->buffer_lock);
2869 mark_page_accessed(eb->first_page);
2872 spin_unlock(&tree->buffer_lock);
2874 eb = __alloc_extent_buffer(tree, start, len, mask);
2879 eb->first_page = page0;
2882 page_cache_get(page0);
2883 mark_page_accessed(page0);
2884 set_page_extent_mapped(page0);
2885 set_page_extent_head(page0, len);
2886 uptodate = PageUptodate(page0);
2890 for (; i < num_pages; i++, index++) {
2891 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2896 set_page_extent_mapped(p);
2897 mark_page_accessed(p);
2900 set_page_extent_head(p, len);
2902 set_page_private(p, EXTENT_PAGE_PRIVATE);
2904 if (!PageUptodate(p))
2909 eb->flags |= EXTENT_UPTODATE;
2910 eb->flags |= EXTENT_BUFFER_FILLED;
2912 spin_lock(&tree->buffer_lock);
2913 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2915 /* add one reference for the caller */
2916 atomic_inc(&exists->refs);
2917 spin_unlock(&tree->buffer_lock);
2920 spin_unlock(&tree->buffer_lock);
2922 /* add one reference for the tree */
2923 atomic_inc(&eb->refs);
2927 if (!atomic_dec_and_test(&eb->refs))
2929 for (index = 1; index < i; index++)
2930 page_cache_release(extent_buffer_page(eb, index));
2931 page_cache_release(extent_buffer_page(eb, 0));
2932 __free_extent_buffer(eb);
2935 EXPORT_SYMBOL(alloc_extent_buffer);
2937 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2938 u64 start, unsigned long len,
2941 struct extent_buffer *eb;
2943 spin_lock(&tree->buffer_lock);
2944 eb = buffer_search(tree, start);
2946 atomic_inc(&eb->refs);
2947 spin_unlock(&tree->buffer_lock);
2950 mark_page_accessed(eb->first_page);
2954 EXPORT_SYMBOL(find_extent_buffer);
2956 void free_extent_buffer(struct extent_buffer *eb)
2961 if (!atomic_dec_and_test(&eb->refs))
2966 EXPORT_SYMBOL(free_extent_buffer);
2968 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2969 struct extent_buffer *eb)
2973 unsigned long num_pages;
2976 u64 start = eb->start;
2977 u64 end = start + eb->len - 1;
2979 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2980 num_pages = num_extent_pages(eb->start, eb->len);
2982 for (i = 0; i < num_pages; i++) {
2983 page = extent_buffer_page(eb, i);
2986 set_page_extent_head(page, eb->len);
2988 set_page_private(page, EXTENT_PAGE_PRIVATE);
2991 * if we're on the last page or the first page and the
2992 * block isn't aligned on a page boundary, do extra checks
2993 * to make sure we don't clean page that is partially dirty
2995 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2996 ((i == num_pages - 1) &&
2997 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2998 start = (u64)page->index << PAGE_CACHE_SHIFT;
2999 end = start + PAGE_CACHE_SIZE - 1;
3000 if (test_range_bit(tree, start, end,
3006 clear_page_dirty_for_io(page);
3007 spin_lock_irq(&page->mapping->tree_lock);
3008 if (!PageDirty(page)) {
3009 radix_tree_tag_clear(&page->mapping->page_tree,
3011 PAGECACHE_TAG_DIRTY);
3013 spin_unlock_irq(&page->mapping->tree_lock);
3018 EXPORT_SYMBOL(clear_extent_buffer_dirty);
3020 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3021 struct extent_buffer *eb)
3023 return wait_on_extent_writeback(tree, eb->start,
3024 eb->start + eb->len - 1);
3026 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
3028 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3029 struct extent_buffer *eb)
3032 unsigned long num_pages;
3034 num_pages = num_extent_pages(eb->start, eb->len);
3035 for (i = 0; i < num_pages; i++) {
3036 struct page *page = extent_buffer_page(eb, i);
3037 /* writepage may need to do something special for the
3038 * first page, we have to make sure page->private is
3039 * properly set. releasepage may drop page->private
3040 * on us if the page isn't already dirty.
3044 set_page_extent_head(page, eb->len);
3045 } else if (PagePrivate(page) &&
3046 page->private != EXTENT_PAGE_PRIVATE) {
3047 set_page_extent_mapped(page);
3049 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3050 set_extent_dirty(tree, page_offset(page),
3051 page_offset(page) + PAGE_CACHE_SIZE -1,
3057 EXPORT_SYMBOL(set_extent_buffer_dirty);
3059 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3060 struct extent_buffer *eb)
3064 unsigned long num_pages;
3066 num_pages = num_extent_pages(eb->start, eb->len);
3067 eb->flags &= ~EXTENT_UPTODATE;
3069 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3071 for (i = 0; i < num_pages; i++) {
3072 page = extent_buffer_page(eb, i);
3074 ClearPageUptodate(page);
3079 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3080 struct extent_buffer *eb)
3084 unsigned long num_pages;
3086 num_pages = num_extent_pages(eb->start, eb->len);
3088 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3090 for (i = 0; i < num_pages; i++) {
3091 page = extent_buffer_page(eb, i);
3092 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3093 ((i == num_pages - 1) &&
3094 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3095 check_page_uptodate(tree, page);
3098 SetPageUptodate(page);
3102 EXPORT_SYMBOL(set_extent_buffer_uptodate);
3104 int extent_range_uptodate(struct extent_io_tree *tree,
3109 int pg_uptodate = 1;
3111 unsigned long index;
3113 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3116 while(start <= end) {
3117 index = start >> PAGE_CACHE_SHIFT;
3118 page = find_get_page(tree->mapping, index);
3119 uptodate = PageUptodate(page);
3120 page_cache_release(page);
3125 start += PAGE_CACHE_SIZE;
3130 int extent_buffer_uptodate(struct extent_io_tree *tree,
3131 struct extent_buffer *eb)
3134 unsigned long num_pages;
3137 int pg_uptodate = 1;
3139 if (eb->flags & EXTENT_UPTODATE)
3142 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3143 EXTENT_UPTODATE, 1);
3147 num_pages = num_extent_pages(eb->start, eb->len);
3148 for (i = 0; i < num_pages; i++) {
3149 page = extent_buffer_page(eb, i);
3150 if (!PageUptodate(page)) {
3157 EXPORT_SYMBOL(extent_buffer_uptodate);
3159 int read_extent_buffer_pages(struct extent_io_tree *tree,
3160 struct extent_buffer *eb,
3161 u64 start, int wait,
3162 get_extent_t *get_extent, int mirror_num)
3165 unsigned long start_i;
3169 int locked_pages = 0;
3170 int all_uptodate = 1;
3171 int inc_all_pages = 0;
3172 unsigned long num_pages;
3173 struct bio *bio = NULL;
3174 unsigned long bio_flags = 0;
3176 if (eb->flags & EXTENT_UPTODATE)
3179 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3180 EXTENT_UPTODATE, 1)) {
3185 WARN_ON(start < eb->start);
3186 start_i = (start >> PAGE_CACHE_SHIFT) -
3187 (eb->start >> PAGE_CACHE_SHIFT);
3192 num_pages = num_extent_pages(eb->start, eb->len);
3193 for (i = start_i; i < num_pages; i++) {
3194 page = extent_buffer_page(eb, i);
3196 if (!trylock_page(page))
3202 if (!PageUptodate(page)) {
3208 eb->flags |= EXTENT_UPTODATE;
3210 printk("all up to date but ret is %d\n", ret);
3215 for (i = start_i; i < num_pages; i++) {
3216 page = extent_buffer_page(eb, i);
3218 page_cache_get(page);
3219 if (!PageUptodate(page)) {
3222 ClearPageError(page);
3223 err = __extent_read_full_page(tree, page,
3225 mirror_num, &bio_flags);
3228 printk("err %d from __extent_read_full_page\n", ret);
3236 submit_one_bio(READ, bio, mirror_num, bio_flags);
3240 printk("ret %d wait %d returning\n", ret, wait);
3243 for (i = start_i; i < num_pages; i++) {
3244 page = extent_buffer_page(eb, i);
3245 wait_on_page_locked(page);
3246 if (!PageUptodate(page)) {
3247 printk("page not uptodate after wait_on_page_locked\n");
3252 eb->flags |= EXTENT_UPTODATE;
3257 while(locked_pages > 0) {
3258 page = extent_buffer_page(eb, i);
3265 EXPORT_SYMBOL(read_extent_buffer_pages);
3267 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3268 unsigned long start,
3275 char *dst = (char *)dstv;
3276 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3277 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3279 WARN_ON(start > eb->len);
3280 WARN_ON(start + len > eb->start + eb->len);
3282 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3285 page = extent_buffer_page(eb, i);
3287 cur = min(len, (PAGE_CACHE_SIZE - offset));
3288 kaddr = kmap_atomic(page, KM_USER1);
3289 memcpy(dst, kaddr + offset, cur);
3290 kunmap_atomic(kaddr, KM_USER1);
3298 EXPORT_SYMBOL(read_extent_buffer);
3300 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3301 unsigned long min_len, char **token, char **map,
3302 unsigned long *map_start,
3303 unsigned long *map_len, int km)
3305 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3308 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3309 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3310 unsigned long end_i = (start_offset + start + min_len - 1) >>
3317 offset = start_offset;
3321 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3323 if (start + min_len > eb->len) {
3324 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3328 p = extent_buffer_page(eb, i);
3329 kaddr = kmap_atomic(p, km);
3331 *map = kaddr + offset;
3332 *map_len = PAGE_CACHE_SIZE - offset;
3335 EXPORT_SYMBOL(map_private_extent_buffer);
3337 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3338 unsigned long min_len,
3339 char **token, char **map,
3340 unsigned long *map_start,
3341 unsigned long *map_len, int km)
3345 if (eb->map_token) {
3346 unmap_extent_buffer(eb, eb->map_token, km);
3347 eb->map_token = NULL;
3350 err = map_private_extent_buffer(eb, start, min_len, token, map,
3351 map_start, map_len, km);
3353 eb->map_token = *token;
3355 eb->map_start = *map_start;
3356 eb->map_len = *map_len;
3360 EXPORT_SYMBOL(map_extent_buffer);
3362 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3364 kunmap_atomic(token, km);
3366 EXPORT_SYMBOL(unmap_extent_buffer);
3368 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3369 unsigned long start,
3376 char *ptr = (char *)ptrv;
3377 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3378 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3381 WARN_ON(start > eb->len);
3382 WARN_ON(start + len > eb->start + eb->len);
3384 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3387 page = extent_buffer_page(eb, i);
3389 cur = min(len, (PAGE_CACHE_SIZE - offset));
3391 kaddr = kmap_atomic(page, KM_USER0);
3392 ret = memcmp(ptr, kaddr + offset, cur);
3393 kunmap_atomic(kaddr, KM_USER0);
3404 EXPORT_SYMBOL(memcmp_extent_buffer);
3406 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3407 unsigned long start, unsigned long len)
3413 char *src = (char *)srcv;
3414 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3415 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3417 WARN_ON(start > eb->len);
3418 WARN_ON(start + len > eb->start + eb->len);
3420 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3423 page = extent_buffer_page(eb, i);
3424 WARN_ON(!PageUptodate(page));
3426 cur = min(len, PAGE_CACHE_SIZE - offset);
3427 kaddr = kmap_atomic(page, KM_USER1);
3428 memcpy(kaddr + offset, src, cur);
3429 kunmap_atomic(kaddr, KM_USER1);
3437 EXPORT_SYMBOL(write_extent_buffer);
3439 void memset_extent_buffer(struct extent_buffer *eb, char c,
3440 unsigned long start, unsigned long len)
3446 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3447 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3449 WARN_ON(start > eb->len);
3450 WARN_ON(start + len > eb->start + eb->len);
3452 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3455 page = extent_buffer_page(eb, i);
3456 WARN_ON(!PageUptodate(page));
3458 cur = min(len, PAGE_CACHE_SIZE - offset);
3459 kaddr = kmap_atomic(page, KM_USER0);
3460 memset(kaddr + offset, c, cur);
3461 kunmap_atomic(kaddr, KM_USER0);
3468 EXPORT_SYMBOL(memset_extent_buffer);
3470 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3471 unsigned long dst_offset, unsigned long src_offset,
3474 u64 dst_len = dst->len;
3479 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3480 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3482 WARN_ON(src->len != dst_len);
3484 offset = (start_offset + dst_offset) &
3485 ((unsigned long)PAGE_CACHE_SIZE - 1);
3488 page = extent_buffer_page(dst, i);
3489 WARN_ON(!PageUptodate(page));
3491 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3493 kaddr = kmap_atomic(page, KM_USER0);
3494 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3495 kunmap_atomic(kaddr, KM_USER0);
3503 EXPORT_SYMBOL(copy_extent_buffer);
3505 static void move_pages(struct page *dst_page, struct page *src_page,
3506 unsigned long dst_off, unsigned long src_off,
3509 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3510 if (dst_page == src_page) {
3511 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3513 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3514 char *p = dst_kaddr + dst_off + len;
3515 char *s = src_kaddr + src_off + len;
3520 kunmap_atomic(src_kaddr, KM_USER1);
3522 kunmap_atomic(dst_kaddr, KM_USER0);
3525 static void copy_pages(struct page *dst_page, struct page *src_page,
3526 unsigned long dst_off, unsigned long src_off,
3529 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3532 if (dst_page != src_page)
3533 src_kaddr = kmap_atomic(src_page, KM_USER1);
3535 src_kaddr = dst_kaddr;
3537 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3538 kunmap_atomic(dst_kaddr, KM_USER0);
3539 if (dst_page != src_page)
3540 kunmap_atomic(src_kaddr, KM_USER1);
3543 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3544 unsigned long src_offset, unsigned long len)
3547 size_t dst_off_in_page;
3548 size_t src_off_in_page;
3549 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3550 unsigned long dst_i;
3551 unsigned long src_i;
3553 if (src_offset + len > dst->len) {
3554 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3555 src_offset, len, dst->len);
3558 if (dst_offset + len > dst->len) {
3559 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3560 dst_offset, len, dst->len);
3565 dst_off_in_page = (start_offset + dst_offset) &
3566 ((unsigned long)PAGE_CACHE_SIZE - 1);
3567 src_off_in_page = (start_offset + src_offset) &
3568 ((unsigned long)PAGE_CACHE_SIZE - 1);
3570 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3571 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3573 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3575 cur = min_t(unsigned long, cur,
3576 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3578 copy_pages(extent_buffer_page(dst, dst_i),
3579 extent_buffer_page(dst, src_i),
3580 dst_off_in_page, src_off_in_page, cur);
3587 EXPORT_SYMBOL(memcpy_extent_buffer);
3589 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3590 unsigned long src_offset, unsigned long len)
3593 size_t dst_off_in_page;
3594 size_t src_off_in_page;
3595 unsigned long dst_end = dst_offset + len - 1;
3596 unsigned long src_end = src_offset + len - 1;
3597 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3598 unsigned long dst_i;
3599 unsigned long src_i;
3601 if (src_offset + len > dst->len) {
3602 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3603 src_offset, len, dst->len);
3606 if (dst_offset + len > dst->len) {
3607 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3608 dst_offset, len, dst->len);
3611 if (dst_offset < src_offset) {
3612 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3616 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3617 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3619 dst_off_in_page = (start_offset + dst_end) &
3620 ((unsigned long)PAGE_CACHE_SIZE - 1);
3621 src_off_in_page = (start_offset + src_end) &
3622 ((unsigned long)PAGE_CACHE_SIZE - 1);
3624 cur = min_t(unsigned long, len, src_off_in_page + 1);
3625 cur = min(cur, dst_off_in_page + 1);
3626 move_pages(extent_buffer_page(dst, dst_i),
3627 extent_buffer_page(dst, src_i),
3628 dst_off_in_page - cur + 1,
3629 src_off_in_page - cur + 1, cur);
3636 EXPORT_SYMBOL(memmove_extent_buffer);
3638 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3640 u64 start = page_offset(page);
3641 struct extent_buffer *eb;
3644 unsigned long num_pages;
3646 spin_lock(&tree->buffer_lock);
3647 eb = buffer_search(tree, start);
3651 if (atomic_read(&eb->refs) > 1) {
3655 /* at this point we can safely release the extent buffer */
3656 num_pages = num_extent_pages(eb->start, eb->len);
3657 for (i = 0; i < num_pages; i++)
3658 page_cache_release(extent_buffer_page(eb, i));
3659 rb_erase(&eb->rb_node, &tree->buffer);
3660 __free_extent_buffer(eb);
3662 spin_unlock(&tree->buffer_lock);
3665 EXPORT_SYMBOL(try_release_extent_buffer);