1 #include <linux/bitops.h>
2 #include <linux/slab.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include <linux/prefetch.h>
14 #include <linux/cleancache.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
19 #include "btrfs_inode.h"
21 #include "check-integrity.h"
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
32 static DEFINE_SPINLOCK(leak_lock);
35 #define BUFFER_LRU_MAX 64
40 struct rb_node rb_node;
43 struct extent_page_data {
45 struct extent_io_tree *tree;
46 get_extent_t *get_extent;
48 /* tells writepage not to lock the state bits for this range
49 * it still does the unlocking
51 unsigned int extent_locked:1;
53 /* tells the submit_bio code to use a WRITE_SYNC */
54 unsigned int sync_io:1;
57 static noinline void flush_write_bio(void *data);
59 int __init extent_io_init(void)
61 extent_state_cache = kmem_cache_create("extent_state",
62 sizeof(struct extent_state), 0,
63 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
64 if (!extent_state_cache)
67 extent_buffer_cache = kmem_cache_create("extent_buffers",
68 sizeof(struct extent_buffer), 0,
69 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
70 if (!extent_buffer_cache)
71 goto free_state_cache;
75 kmem_cache_destroy(extent_state_cache);
79 void extent_io_exit(void)
81 struct extent_state *state;
82 struct extent_buffer *eb;
84 while (!list_empty(&states)) {
85 state = list_entry(states.next, struct extent_state, leak_list);
86 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
87 "state %lu in tree %p refs %d\n",
88 (unsigned long long)state->start,
89 (unsigned long long)state->end,
90 state->state, state->tree, atomic_read(&state->refs));
91 list_del(&state->leak_list);
92 kmem_cache_free(extent_state_cache, state);
96 while (!list_empty(&buffers)) {
97 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
98 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
99 "refs %d\n", (unsigned long long)eb->start,
100 eb->len, atomic_read(&eb->refs));
101 list_del(&eb->leak_list);
102 kmem_cache_free(extent_buffer_cache, eb);
104 if (extent_state_cache)
105 kmem_cache_destroy(extent_state_cache);
106 if (extent_buffer_cache)
107 kmem_cache_destroy(extent_buffer_cache);
110 void extent_io_tree_init(struct extent_io_tree *tree,
111 struct address_space *mapping)
113 tree->state = RB_ROOT;
114 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
116 tree->dirty_bytes = 0;
117 spin_lock_init(&tree->lock);
118 spin_lock_init(&tree->buffer_lock);
119 tree->mapping = mapping;
122 static struct extent_state *alloc_extent_state(gfp_t mask)
124 struct extent_state *state;
129 state = kmem_cache_alloc(extent_state_cache, mask);
136 spin_lock_irqsave(&leak_lock, flags);
137 list_add(&state->leak_list, &states);
138 spin_unlock_irqrestore(&leak_lock, flags);
140 atomic_set(&state->refs, 1);
141 init_waitqueue_head(&state->wq);
145 void free_extent_state(struct extent_state *state)
149 if (atomic_dec_and_test(&state->refs)) {
153 WARN_ON(state->tree);
155 spin_lock_irqsave(&leak_lock, flags);
156 list_del(&state->leak_list);
157 spin_unlock_irqrestore(&leak_lock, flags);
159 kmem_cache_free(extent_state_cache, state);
163 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
164 struct rb_node *node)
166 struct rb_node **p = &root->rb_node;
167 struct rb_node *parent = NULL;
168 struct tree_entry *entry;
172 entry = rb_entry(parent, struct tree_entry, rb_node);
174 if (offset < entry->start)
176 else if (offset > entry->end)
182 entry = rb_entry(node, struct tree_entry, rb_node);
183 rb_link_node(node, parent, p);
184 rb_insert_color(node, root);
188 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
189 struct rb_node **prev_ret,
190 struct rb_node **next_ret)
192 struct rb_root *root = &tree->state;
193 struct rb_node *n = root->rb_node;
194 struct rb_node *prev = NULL;
195 struct rb_node *orig_prev = NULL;
196 struct tree_entry *entry;
197 struct tree_entry *prev_entry = NULL;
200 entry = rb_entry(n, struct tree_entry, rb_node);
204 if (offset < entry->start)
206 else if (offset > entry->end)
214 while (prev && offset > prev_entry->end) {
215 prev = rb_next(prev);
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
223 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224 while (prev && offset < prev_entry->start) {
225 prev = rb_prev(prev);
226 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
233 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
236 struct rb_node *prev = NULL;
239 ret = __etree_search(tree, offset, &prev, NULL);
245 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
246 struct extent_state *other)
248 if (tree->ops && tree->ops->merge_extent_hook)
249 tree->ops->merge_extent_hook(tree->mapping->host, new,
254 * utility function to look for merge candidates inside a given range.
255 * Any extents with matching state are merged together into a single
256 * extent in the tree. Extents with EXTENT_IO in their state field
257 * are not merged because the end_io handlers need to be able to do
258 * operations on them without sleeping (or doing allocations/splits).
260 * This should be called with the tree lock held.
262 static void merge_state(struct extent_io_tree *tree,
263 struct extent_state *state)
265 struct extent_state *other;
266 struct rb_node *other_node;
268 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
271 other_node = rb_prev(&state->rb_node);
273 other = rb_entry(other_node, struct extent_state, rb_node);
274 if (other->end == state->start - 1 &&
275 other->state == state->state) {
276 merge_cb(tree, state, other);
277 state->start = other->start;
279 rb_erase(&other->rb_node, &tree->state);
280 free_extent_state(other);
283 other_node = rb_next(&state->rb_node);
285 other = rb_entry(other_node, struct extent_state, rb_node);
286 if (other->start == state->end + 1 &&
287 other->state == state->state) {
288 merge_cb(tree, state, other);
289 state->end = other->end;
291 rb_erase(&other->rb_node, &tree->state);
292 free_extent_state(other);
297 static void set_state_cb(struct extent_io_tree *tree,
298 struct extent_state *state, int *bits)
300 if (tree->ops && tree->ops->set_bit_hook)
301 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
304 static void clear_state_cb(struct extent_io_tree *tree,
305 struct extent_state *state, int *bits)
307 if (tree->ops && tree->ops->clear_bit_hook)
308 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
311 static void set_state_bits(struct extent_io_tree *tree,
312 struct extent_state *state, int *bits);
315 * insert an extent_state struct into the tree. 'bits' are set on the
316 * struct before it is inserted.
318 * This may return -EEXIST if the extent is already there, in which case the
319 * state struct is freed.
321 * The tree lock is not taken internally. This is a utility function and
322 * probably isn't what you want to call (see set/clear_extent_bit).
324 static int insert_state(struct extent_io_tree *tree,
325 struct extent_state *state, u64 start, u64 end,
328 struct rb_node *node;
331 printk(KERN_ERR "btrfs end < start %llu %llu\n",
332 (unsigned long long)end,
333 (unsigned long long)start);
336 state->start = start;
339 set_state_bits(tree, state, bits);
341 node = tree_insert(&tree->state, end, &state->rb_node);
343 struct extent_state *found;
344 found = rb_entry(node, struct extent_state, rb_node);
345 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
346 "%llu %llu\n", (unsigned long long)found->start,
347 (unsigned long long)found->end,
348 (unsigned long long)start, (unsigned long long)end);
352 merge_state(tree, state);
356 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
359 if (tree->ops && tree->ops->split_extent_hook)
360 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
364 * split a given extent state struct in two, inserting the preallocated
365 * struct 'prealloc' as the newly created second half. 'split' indicates an
366 * offset inside 'orig' where it should be split.
369 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
370 * are two extent state structs in the tree:
371 * prealloc: [orig->start, split - 1]
372 * orig: [ split, orig->end ]
374 * The tree locks are not taken by this function. They need to be held
377 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
378 struct extent_state *prealloc, u64 split)
380 struct rb_node *node;
382 split_cb(tree, orig, split);
384 prealloc->start = orig->start;
385 prealloc->end = split - 1;
386 prealloc->state = orig->state;
389 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
391 free_extent_state(prealloc);
394 prealloc->tree = tree;
399 * utility function to clear some bits in an extent state struct.
400 * it will optionally wake up any one waiting on this state (wake == 1), or
401 * forcibly remove the state from the tree (delete == 1).
403 * If no bits are set on the state struct after clearing things, the
404 * struct is freed and removed from the tree
406 static int clear_state_bit(struct extent_io_tree *tree,
407 struct extent_state *state,
410 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
411 int ret = state->state & bits_to_clear;
413 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
414 u64 range = state->end - state->start + 1;
415 WARN_ON(range > tree->dirty_bytes);
416 tree->dirty_bytes -= range;
418 clear_state_cb(tree, state, bits);
419 state->state &= ~bits_to_clear;
422 if (state->state == 0) {
424 rb_erase(&state->rb_node, &tree->state);
426 free_extent_state(state);
431 merge_state(tree, state);
436 static struct extent_state *
437 alloc_extent_state_atomic(struct extent_state *prealloc)
440 prealloc = alloc_extent_state(GFP_ATOMIC);
446 * clear some bits on a range in the tree. This may require splitting
447 * or inserting elements in the tree, so the gfp mask is used to
448 * indicate which allocations or sleeping are allowed.
450 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
451 * the given range from the tree regardless of state (ie for truncate).
453 * the range [start, end] is inclusive.
455 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
456 * bits were already set, or zero if none of the bits were already set.
458 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
459 int bits, int wake, int delete,
460 struct extent_state **cached_state,
463 struct extent_state *state;
464 struct extent_state *cached;
465 struct extent_state *prealloc = NULL;
466 struct rb_node *next_node;
467 struct rb_node *node;
474 bits |= ~EXTENT_CTLBITS;
475 bits |= EXTENT_FIRST_DELALLOC;
477 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
480 if (!prealloc && (mask & __GFP_WAIT)) {
481 prealloc = alloc_extent_state(mask);
486 spin_lock(&tree->lock);
488 cached = *cached_state;
491 *cached_state = NULL;
495 if (cached && cached->tree && cached->start <= start &&
496 cached->end > start) {
498 atomic_dec(&cached->refs);
503 free_extent_state(cached);
506 * this search will find the extents that end after
509 node = tree_search(tree, start);
512 state = rb_entry(node, struct extent_state, rb_node);
514 if (state->start > end)
516 WARN_ON(state->end < start);
517 last_end = state->end;
519 if (state->end < end && !need_resched())
520 next_node = rb_next(&state->rb_node);
524 /* the state doesn't have the wanted bits, go ahead */
525 if (!(state->state & bits))
529 * | ---- desired range ---- |
531 * | ------------- state -------------- |
533 * We need to split the extent we found, and may flip
534 * bits on second half.
536 * If the extent we found extends past our range, we
537 * just split and search again. It'll get split again
538 * the next time though.
540 * If the extent we found is inside our range, we clear
541 * the desired bit on it.
544 if (state->start < start) {
545 prealloc = alloc_extent_state_atomic(prealloc);
547 err = split_state(tree, state, prealloc, start);
548 BUG_ON(err == -EEXIST);
552 if (state->end <= end) {
553 set |= clear_state_bit(tree, state, &bits, wake);
554 if (last_end == (u64)-1)
556 start = last_end + 1;
561 * | ---- desired range ---- |
563 * We need to split the extent, and clear the bit
566 if (state->start <= end && state->end > end) {
567 prealloc = alloc_extent_state_atomic(prealloc);
569 err = split_state(tree, state, prealloc, end + 1);
570 BUG_ON(err == -EEXIST);
574 set |= clear_state_bit(tree, prealloc, &bits, wake);
580 set |= clear_state_bit(tree, state, &bits, wake);
582 if (last_end == (u64)-1)
584 start = last_end + 1;
585 if (start <= end && next_node) {
586 state = rb_entry(next_node, struct extent_state,
593 spin_unlock(&tree->lock);
595 free_extent_state(prealloc);
602 spin_unlock(&tree->lock);
603 if (mask & __GFP_WAIT)
608 static int wait_on_state(struct extent_io_tree *tree,
609 struct extent_state *state)
610 __releases(tree->lock)
611 __acquires(tree->lock)
614 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
615 spin_unlock(&tree->lock);
617 spin_lock(&tree->lock);
618 finish_wait(&state->wq, &wait);
623 * waits for one or more bits to clear on a range in the state tree.
624 * The range [start, end] is inclusive.
625 * The tree lock is taken by this function
627 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
629 struct extent_state *state;
630 struct rb_node *node;
632 spin_lock(&tree->lock);
636 * this search will find all the extents that end after
639 node = tree_search(tree, start);
643 state = rb_entry(node, struct extent_state, rb_node);
645 if (state->start > end)
648 if (state->state & bits) {
649 start = state->start;
650 atomic_inc(&state->refs);
651 wait_on_state(tree, state);
652 free_extent_state(state);
655 start = state->end + 1;
660 cond_resched_lock(&tree->lock);
663 spin_unlock(&tree->lock);
667 static void set_state_bits(struct extent_io_tree *tree,
668 struct extent_state *state,
671 int bits_to_set = *bits & ~EXTENT_CTLBITS;
673 set_state_cb(tree, state, bits);
674 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
675 u64 range = state->end - state->start + 1;
676 tree->dirty_bytes += range;
678 state->state |= bits_to_set;
681 static void cache_state(struct extent_state *state,
682 struct extent_state **cached_ptr)
684 if (cached_ptr && !(*cached_ptr)) {
685 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
687 atomic_inc(&state->refs);
692 static void uncache_state(struct extent_state **cached_ptr)
694 if (cached_ptr && (*cached_ptr)) {
695 struct extent_state *state = *cached_ptr;
697 free_extent_state(state);
702 * set some bits on a range in the tree. This may require allocations or
703 * sleeping, so the gfp mask is used to indicate what is allowed.
705 * If any of the exclusive bits are set, this will fail with -EEXIST if some
706 * part of the range already has the desired bits set. The start of the
707 * existing range is returned in failed_start in this case.
709 * [start, end] is inclusive This takes the tree lock.
712 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
713 int bits, int exclusive_bits, u64 *failed_start,
714 struct extent_state **cached_state, gfp_t mask)
716 struct extent_state *state;
717 struct extent_state *prealloc = NULL;
718 struct rb_node *node;
723 bits |= EXTENT_FIRST_DELALLOC;
725 if (!prealloc && (mask & __GFP_WAIT)) {
726 prealloc = alloc_extent_state(mask);
730 spin_lock(&tree->lock);
731 if (cached_state && *cached_state) {
732 state = *cached_state;
733 if (state->start <= start && state->end > start &&
735 node = &state->rb_node;
740 * this search will find all the extents that end after
743 node = tree_search(tree, start);
745 prealloc = alloc_extent_state_atomic(prealloc);
747 err = insert_state(tree, prealloc, start, end, &bits);
749 BUG_ON(err == -EEXIST);
752 state = rb_entry(node, struct extent_state, rb_node);
754 last_start = state->start;
755 last_end = state->end;
758 * | ---- desired range ---- |
761 * Just lock what we found and keep going
763 if (state->start == start && state->end <= end) {
764 struct rb_node *next_node;
765 if (state->state & exclusive_bits) {
766 *failed_start = state->start;
771 set_state_bits(tree, state, &bits);
773 cache_state(state, cached_state);
774 merge_state(tree, state);
775 if (last_end == (u64)-1)
778 start = last_end + 1;
779 next_node = rb_next(&state->rb_node);
780 if (next_node && start < end && prealloc && !need_resched()) {
781 state = rb_entry(next_node, struct extent_state,
783 if (state->start == start)
790 * | ---- desired range ---- |
793 * | ------------- state -------------- |
795 * We need to split the extent we found, and may flip bits on
798 * If the extent we found extends past our
799 * range, we just split and search again. It'll get split
800 * again the next time though.
802 * If the extent we found is inside our range, we set the
805 if (state->start < start) {
806 if (state->state & exclusive_bits) {
807 *failed_start = start;
812 prealloc = alloc_extent_state_atomic(prealloc);
814 err = split_state(tree, state, prealloc, start);
815 BUG_ON(err == -EEXIST);
819 if (state->end <= end) {
820 set_state_bits(tree, state, &bits);
821 cache_state(state, cached_state);
822 merge_state(tree, state);
823 if (last_end == (u64)-1)
825 start = last_end + 1;
830 * | ---- desired range ---- |
831 * | state | or | state |
833 * There's a hole, we need to insert something in it and
834 * ignore the extent we found.
836 if (state->start > start) {
838 if (end < last_start)
841 this_end = last_start - 1;
843 prealloc = alloc_extent_state_atomic(prealloc);
847 * Avoid to free 'prealloc' if it can be merged with
850 err = insert_state(tree, prealloc, start, this_end,
852 BUG_ON(err == -EEXIST);
854 free_extent_state(prealloc);
858 cache_state(prealloc, cached_state);
860 start = this_end + 1;
864 * | ---- desired range ---- |
866 * We need to split the extent, and set the bit
869 if (state->start <= end && state->end > end) {
870 if (state->state & exclusive_bits) {
871 *failed_start = start;
876 prealloc = alloc_extent_state_atomic(prealloc);
878 err = split_state(tree, state, prealloc, end + 1);
879 BUG_ON(err == -EEXIST);
881 set_state_bits(tree, prealloc, &bits);
882 cache_state(prealloc, cached_state);
883 merge_state(tree, prealloc);
891 spin_unlock(&tree->lock);
893 free_extent_state(prealloc);
900 spin_unlock(&tree->lock);
901 if (mask & __GFP_WAIT)
907 * convert_extent - convert all bits in a given range from one bit to another
908 * @tree: the io tree to search
909 * @start: the start offset in bytes
910 * @end: the end offset in bytes (inclusive)
911 * @bits: the bits to set in this range
912 * @clear_bits: the bits to clear in this range
913 * @mask: the allocation mask
915 * This will go through and set bits for the given range. If any states exist
916 * already in this range they are set with the given bit and cleared of the
917 * clear_bits. This is only meant to be used by things that are mergeable, ie
918 * converting from say DELALLOC to DIRTY. This is not meant to be used with
919 * boundary bits like LOCK.
921 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
922 int bits, int clear_bits, gfp_t mask)
924 struct extent_state *state;
925 struct extent_state *prealloc = NULL;
926 struct rb_node *node;
932 if (!prealloc && (mask & __GFP_WAIT)) {
933 prealloc = alloc_extent_state(mask);
938 spin_lock(&tree->lock);
940 * this search will find all the extents that end after
943 node = tree_search(tree, start);
945 prealloc = alloc_extent_state_atomic(prealloc);
950 err = insert_state(tree, prealloc, start, end, &bits);
952 BUG_ON(err == -EEXIST);
955 state = rb_entry(node, struct extent_state, rb_node);
957 last_start = state->start;
958 last_end = state->end;
961 * | ---- desired range ---- |
964 * Just lock what we found and keep going
966 if (state->start == start && state->end <= end) {
967 struct rb_node *next_node;
969 set_state_bits(tree, state, &bits);
970 clear_state_bit(tree, state, &clear_bits, 0);
971 if (last_end == (u64)-1)
974 start = last_end + 1;
975 next_node = rb_next(&state->rb_node);
976 if (next_node && start < end && prealloc && !need_resched()) {
977 state = rb_entry(next_node, struct extent_state,
979 if (state->start == start)
986 * | ---- desired range ---- |
989 * | ------------- state -------------- |
991 * We need to split the extent we found, and may flip bits on
994 * If the extent we found extends past our
995 * range, we just split and search again. It'll get split
996 * again the next time though.
998 * If the extent we found is inside our range, we set the
1001 if (state->start < start) {
1002 prealloc = alloc_extent_state_atomic(prealloc);
1007 err = split_state(tree, state, prealloc, start);
1008 BUG_ON(err == -EEXIST);
1012 if (state->end <= end) {
1013 set_state_bits(tree, state, &bits);
1014 clear_state_bit(tree, state, &clear_bits, 0);
1015 if (last_end == (u64)-1)
1017 start = last_end + 1;
1022 * | ---- desired range ---- |
1023 * | state | or | state |
1025 * There's a hole, we need to insert something in it and
1026 * ignore the extent we found.
1028 if (state->start > start) {
1030 if (end < last_start)
1033 this_end = last_start - 1;
1035 prealloc = alloc_extent_state_atomic(prealloc);
1042 * Avoid to free 'prealloc' if it can be merged with
1045 err = insert_state(tree, prealloc, start, this_end,
1047 BUG_ON(err == -EEXIST);
1049 free_extent_state(prealloc);
1054 start = this_end + 1;
1058 * | ---- desired range ---- |
1060 * We need to split the extent, and set the bit
1063 if (state->start <= end && state->end > end) {
1064 prealloc = alloc_extent_state_atomic(prealloc);
1070 err = split_state(tree, state, prealloc, end + 1);
1071 BUG_ON(err == -EEXIST);
1073 set_state_bits(tree, prealloc, &bits);
1074 clear_state_bit(tree, prealloc, &clear_bits, 0);
1082 spin_unlock(&tree->lock);
1084 free_extent_state(prealloc);
1091 spin_unlock(&tree->lock);
1092 if (mask & __GFP_WAIT)
1097 /* wrappers around set/clear extent bit */
1098 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1101 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
1105 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1106 int bits, gfp_t mask)
1108 return set_extent_bit(tree, start, end, bits, 0, NULL,
1112 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1113 int bits, gfp_t mask)
1115 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1118 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1119 struct extent_state **cached_state, gfp_t mask)
1121 return set_extent_bit(tree, start, end,
1122 EXTENT_DELALLOC | EXTENT_UPTODATE,
1123 0, NULL, cached_state, mask);
1126 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1129 return clear_extent_bit(tree, start, end,
1130 EXTENT_DIRTY | EXTENT_DELALLOC |
1131 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1134 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1137 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
1141 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1142 struct extent_state **cached_state, gfp_t mask)
1144 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1145 NULL, cached_state, mask);
1148 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
1149 u64 end, struct extent_state **cached_state,
1152 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1153 cached_state, mask);
1157 * either insert or lock state struct between start and end use mask to tell
1158 * us if waiting is desired.
1160 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1161 int bits, struct extent_state **cached_state, gfp_t mask)
1166 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1167 EXTENT_LOCKED, &failed_start,
1168 cached_state, mask);
1169 if (err == -EEXIST && (mask & __GFP_WAIT)) {
1170 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1171 start = failed_start;
1175 WARN_ON(start > end);
1180 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1182 return lock_extent_bits(tree, start, end, 0, NULL, mask);
1185 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1191 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1192 &failed_start, NULL, mask);
1193 if (err == -EEXIST) {
1194 if (failed_start > start)
1195 clear_extent_bit(tree, start, failed_start - 1,
1196 EXTENT_LOCKED, 1, 0, NULL, mask);
1202 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1203 struct extent_state **cached, gfp_t mask)
1205 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1209 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1211 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1216 * helper function to set both pages and extents in the tree writeback
1218 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1220 unsigned long index = start >> PAGE_CACHE_SHIFT;
1221 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1224 while (index <= end_index) {
1225 page = find_get_page(tree->mapping, index);
1227 set_page_writeback(page);
1228 page_cache_release(page);
1234 /* find the first state struct with 'bits' set after 'start', and
1235 * return it. tree->lock must be held. NULL will returned if
1236 * nothing was found after 'start'
1238 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1239 u64 start, int bits)
1241 struct rb_node *node;
1242 struct extent_state *state;
1245 * this search will find all the extents that end after
1248 node = tree_search(tree, start);
1253 state = rb_entry(node, struct extent_state, rb_node);
1254 if (state->end >= start && (state->state & bits))
1257 node = rb_next(node);
1266 * find the first offset in the io tree with 'bits' set. zero is
1267 * returned if we find something, and *start_ret and *end_ret are
1268 * set to reflect the state struct that was found.
1270 * If nothing was found, 1 is returned, < 0 on error
1272 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1273 u64 *start_ret, u64 *end_ret, int bits)
1275 struct extent_state *state;
1278 spin_lock(&tree->lock);
1279 state = find_first_extent_bit_state(tree, start, bits);
1281 *start_ret = state->start;
1282 *end_ret = state->end;
1285 spin_unlock(&tree->lock);
1290 * find a contiguous range of bytes in the file marked as delalloc, not
1291 * more than 'max_bytes'. start and end are used to return the range,
1293 * 1 is returned if we find something, 0 if nothing was in the tree
1295 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1296 u64 *start, u64 *end, u64 max_bytes,
1297 struct extent_state **cached_state)
1299 struct rb_node *node;
1300 struct extent_state *state;
1301 u64 cur_start = *start;
1303 u64 total_bytes = 0;
1305 spin_lock(&tree->lock);
1308 * this search will find all the extents that end after
1311 node = tree_search(tree, cur_start);
1319 state = rb_entry(node, struct extent_state, rb_node);
1320 if (found && (state->start != cur_start ||
1321 (state->state & EXTENT_BOUNDARY))) {
1324 if (!(state->state & EXTENT_DELALLOC)) {
1330 *start = state->start;
1331 *cached_state = state;
1332 atomic_inc(&state->refs);
1336 cur_start = state->end + 1;
1337 node = rb_next(node);
1340 total_bytes += state->end - state->start + 1;
1341 if (total_bytes >= max_bytes)
1345 spin_unlock(&tree->lock);
1349 static noinline int __unlock_for_delalloc(struct inode *inode,
1350 struct page *locked_page,
1354 struct page *pages[16];
1355 unsigned long index = start >> PAGE_CACHE_SHIFT;
1356 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1357 unsigned long nr_pages = end_index - index + 1;
1360 if (index == locked_page->index && end_index == index)
1363 while (nr_pages > 0) {
1364 ret = find_get_pages_contig(inode->i_mapping, index,
1365 min_t(unsigned long, nr_pages,
1366 ARRAY_SIZE(pages)), pages);
1367 for (i = 0; i < ret; i++) {
1368 if (pages[i] != locked_page)
1369 unlock_page(pages[i]);
1370 page_cache_release(pages[i]);
1379 static noinline int lock_delalloc_pages(struct inode *inode,
1380 struct page *locked_page,
1384 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1385 unsigned long start_index = index;
1386 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1387 unsigned long pages_locked = 0;
1388 struct page *pages[16];
1389 unsigned long nrpages;
1393 /* the caller is responsible for locking the start index */
1394 if (index == locked_page->index && index == end_index)
1397 /* skip the page at the start index */
1398 nrpages = end_index - index + 1;
1399 while (nrpages > 0) {
1400 ret = find_get_pages_contig(inode->i_mapping, index,
1401 min_t(unsigned long,
1402 nrpages, ARRAY_SIZE(pages)), pages);
1407 /* now we have an array of pages, lock them all */
1408 for (i = 0; i < ret; i++) {
1410 * the caller is taking responsibility for
1413 if (pages[i] != locked_page) {
1414 lock_page(pages[i]);
1415 if (!PageDirty(pages[i]) ||
1416 pages[i]->mapping != inode->i_mapping) {
1418 unlock_page(pages[i]);
1419 page_cache_release(pages[i]);
1423 page_cache_release(pages[i]);
1432 if (ret && pages_locked) {
1433 __unlock_for_delalloc(inode, locked_page,
1435 ((u64)(start_index + pages_locked - 1)) <<
1442 * find a contiguous range of bytes in the file marked as delalloc, not
1443 * more than 'max_bytes'. start and end are used to return the range,
1445 * 1 is returned if we find something, 0 if nothing was in the tree
1447 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1448 struct extent_io_tree *tree,
1449 struct page *locked_page,
1450 u64 *start, u64 *end,
1456 struct extent_state *cached_state = NULL;
1461 /* step one, find a bunch of delalloc bytes starting at start */
1462 delalloc_start = *start;
1464 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1465 max_bytes, &cached_state);
1466 if (!found || delalloc_end <= *start) {
1467 *start = delalloc_start;
1468 *end = delalloc_end;
1469 free_extent_state(cached_state);
1474 * start comes from the offset of locked_page. We have to lock
1475 * pages in order, so we can't process delalloc bytes before
1478 if (delalloc_start < *start)
1479 delalloc_start = *start;
1482 * make sure to limit the number of pages we try to lock down
1485 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1486 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1488 /* step two, lock all the pages after the page that has start */
1489 ret = lock_delalloc_pages(inode, locked_page,
1490 delalloc_start, delalloc_end);
1491 if (ret == -EAGAIN) {
1492 /* some of the pages are gone, lets avoid looping by
1493 * shortening the size of the delalloc range we're searching
1495 free_extent_state(cached_state);
1497 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1498 max_bytes = PAGE_CACHE_SIZE - offset;
1508 /* step three, lock the state bits for the whole range */
1509 lock_extent_bits(tree, delalloc_start, delalloc_end,
1510 0, &cached_state, GFP_NOFS);
1512 /* then test to make sure it is all still delalloc */
1513 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1514 EXTENT_DELALLOC, 1, cached_state);
1516 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1517 &cached_state, GFP_NOFS);
1518 __unlock_for_delalloc(inode, locked_page,
1519 delalloc_start, delalloc_end);
1523 free_extent_state(cached_state);
1524 *start = delalloc_start;
1525 *end = delalloc_end;
1530 int extent_clear_unlock_delalloc(struct inode *inode,
1531 struct extent_io_tree *tree,
1532 u64 start, u64 end, struct page *locked_page,
1536 struct page *pages[16];
1537 unsigned long index = start >> PAGE_CACHE_SHIFT;
1538 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1539 unsigned long nr_pages = end_index - index + 1;
1543 if (op & EXTENT_CLEAR_UNLOCK)
1544 clear_bits |= EXTENT_LOCKED;
1545 if (op & EXTENT_CLEAR_DIRTY)
1546 clear_bits |= EXTENT_DIRTY;
1548 if (op & EXTENT_CLEAR_DELALLOC)
1549 clear_bits |= EXTENT_DELALLOC;
1551 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1552 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1553 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1554 EXTENT_SET_PRIVATE2)))
1557 while (nr_pages > 0) {
1558 ret = find_get_pages_contig(inode->i_mapping, index,
1559 min_t(unsigned long,
1560 nr_pages, ARRAY_SIZE(pages)), pages);
1561 for (i = 0; i < ret; i++) {
1563 if (op & EXTENT_SET_PRIVATE2)
1564 SetPagePrivate2(pages[i]);
1566 if (pages[i] == locked_page) {
1567 page_cache_release(pages[i]);
1570 if (op & EXTENT_CLEAR_DIRTY)
1571 clear_page_dirty_for_io(pages[i]);
1572 if (op & EXTENT_SET_WRITEBACK)
1573 set_page_writeback(pages[i]);
1574 if (op & EXTENT_END_WRITEBACK)
1575 end_page_writeback(pages[i]);
1576 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1577 unlock_page(pages[i]);
1578 page_cache_release(pages[i]);
1588 * count the number of bytes in the tree that have a given bit(s)
1589 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1590 * cached. The total number found is returned.
1592 u64 count_range_bits(struct extent_io_tree *tree,
1593 u64 *start, u64 search_end, u64 max_bytes,
1594 unsigned long bits, int contig)
1596 struct rb_node *node;
1597 struct extent_state *state;
1598 u64 cur_start = *start;
1599 u64 total_bytes = 0;
1603 if (search_end <= cur_start) {
1608 spin_lock(&tree->lock);
1609 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1610 total_bytes = tree->dirty_bytes;
1614 * this search will find all the extents that end after
1617 node = tree_search(tree, cur_start);
1622 state = rb_entry(node, struct extent_state, rb_node);
1623 if (state->start > search_end)
1625 if (contig && found && state->start > last + 1)
1627 if (state->end >= cur_start && (state->state & bits) == bits) {
1628 total_bytes += min(search_end, state->end) + 1 -
1629 max(cur_start, state->start);
1630 if (total_bytes >= max_bytes)
1633 *start = max(cur_start, state->start);
1637 } else if (contig && found) {
1640 node = rb_next(node);
1645 spin_unlock(&tree->lock);
1650 * set the private field for a given byte offset in the tree. If there isn't
1651 * an extent_state there already, this does nothing.
1653 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1655 struct rb_node *node;
1656 struct extent_state *state;
1659 spin_lock(&tree->lock);
1661 * this search will find all the extents that end after
1664 node = tree_search(tree, start);
1669 state = rb_entry(node, struct extent_state, rb_node);
1670 if (state->start != start) {
1674 state->private = private;
1676 spin_unlock(&tree->lock);
1680 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1682 struct rb_node *node;
1683 struct extent_state *state;
1686 spin_lock(&tree->lock);
1688 * this search will find all the extents that end after
1691 node = tree_search(tree, start);
1696 state = rb_entry(node, struct extent_state, rb_node);
1697 if (state->start != start) {
1701 *private = state->private;
1703 spin_unlock(&tree->lock);
1708 * searches a range in the state tree for a given mask.
1709 * If 'filled' == 1, this returns 1 only if every extent in the tree
1710 * has the bits set. Otherwise, 1 is returned if any bit in the
1711 * range is found set.
1713 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1714 int bits, int filled, struct extent_state *cached)
1716 struct extent_state *state = NULL;
1717 struct rb_node *node;
1720 spin_lock(&tree->lock);
1721 if (cached && cached->tree && cached->start <= start &&
1722 cached->end > start)
1723 node = &cached->rb_node;
1725 node = tree_search(tree, start);
1726 while (node && start <= end) {
1727 state = rb_entry(node, struct extent_state, rb_node);
1729 if (filled && state->start > start) {
1734 if (state->start > end)
1737 if (state->state & bits) {
1741 } else if (filled) {
1746 if (state->end == (u64)-1)
1749 start = state->end + 1;
1752 node = rb_next(node);
1759 spin_unlock(&tree->lock);
1764 * helper function to set a given page up to date if all the
1765 * extents in the tree for that page are up to date
1767 static int check_page_uptodate(struct extent_io_tree *tree,
1770 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1771 u64 end = start + PAGE_CACHE_SIZE - 1;
1772 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1773 SetPageUptodate(page);
1778 * helper function to unlock a page if all the extents in the tree
1779 * for that page are unlocked
1781 static int check_page_locked(struct extent_io_tree *tree,
1784 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1785 u64 end = start + PAGE_CACHE_SIZE - 1;
1786 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1792 * helper function to end page writeback if all the extents
1793 * in the tree for that page are done with writeback
1795 static int check_page_writeback(struct extent_io_tree *tree,
1798 end_page_writeback(page);
1803 * When IO fails, either with EIO or csum verification fails, we
1804 * try other mirrors that might have a good copy of the data. This
1805 * io_failure_record is used to record state as we go through all the
1806 * mirrors. If another mirror has good data, the page is set up to date
1807 * and things continue. If a good mirror can't be found, the original
1808 * bio end_io callback is called to indicate things have failed.
1810 struct io_failure_record {
1815 unsigned long bio_flags;
1821 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1826 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1828 set_state_private(failure_tree, rec->start, 0);
1829 ret = clear_extent_bits(failure_tree, rec->start,
1830 rec->start + rec->len - 1,
1831 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1836 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1837 rec->start + rec->len - 1,
1838 EXTENT_DAMAGED, GFP_NOFS);
1847 static void repair_io_failure_callback(struct bio *bio, int err)
1849 complete(bio->bi_private);
1853 * this bypasses the standard btrfs submit functions deliberately, as
1854 * the standard behavior is to write all copies in a raid setup. here we only
1855 * want to write the one bad copy. so we do the mapping for ourselves and issue
1856 * submit_bio directly.
1857 * to avoid any synchonization issues, wait for the data after writing, which
1858 * actually prevents the read that triggered the error from finishing.
1859 * currently, there can be no more than two copies of every data bit. thus,
1860 * exactly one rewrite is required.
1862 int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1863 u64 length, u64 logical, struct page *page,
1867 struct btrfs_device *dev;
1868 DECLARE_COMPLETION_ONSTACK(compl);
1871 struct btrfs_bio *bbio = NULL;
1874 BUG_ON(!mirror_num);
1876 bio = bio_alloc(GFP_NOFS, 1);
1879 bio->bi_private = &compl;
1880 bio->bi_end_io = repair_io_failure_callback;
1882 map_length = length;
1884 ret = btrfs_map_block(map_tree, WRITE, logical,
1885 &map_length, &bbio, mirror_num);
1890 BUG_ON(mirror_num != bbio->mirror_num);
1891 sector = bbio->stripes[mirror_num-1].physical >> 9;
1892 bio->bi_sector = sector;
1893 dev = bbio->stripes[mirror_num-1].dev;
1895 if (!dev || !dev->bdev || !dev->writeable) {
1899 bio->bi_bdev = dev->bdev;
1900 bio_add_page(bio, page, length, start-page_offset(page));
1901 btrfsic_submit_bio(WRITE_SYNC, bio);
1902 wait_for_completion(&compl);
1904 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1905 /* try to remap that extent elsewhere? */
1910 printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
1911 "sector %llu)\n", page->mapping->host->i_ino, start,
1918 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1921 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1922 u64 start = eb->start;
1923 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
1926 for (i = 0; i < num_pages; i++) {
1927 struct page *p = extent_buffer_page(eb, i);
1928 ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
1929 start, p, mirror_num);
1932 start += PAGE_CACHE_SIZE;
1939 * each time an IO finishes, we do a fast check in the IO failure tree
1940 * to see if we need to process or clean up an io_failure_record
1942 static int clean_io_failure(u64 start, struct page *page)
1945 u64 private_failure;
1946 struct io_failure_record *failrec;
1947 struct btrfs_mapping_tree *map_tree;
1948 struct extent_state *state;
1952 struct inode *inode = page->mapping->host;
1955 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1956 (u64)-1, 1, EXTENT_DIRTY, 0);
1960 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1965 failrec = (struct io_failure_record *)(unsigned long) private_failure;
1966 BUG_ON(!failrec->this_mirror);
1968 if (failrec->in_validation) {
1969 /* there was no real error, just free the record */
1970 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1976 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1977 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1980 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1982 if (state && state->start == failrec->start) {
1983 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
1984 num_copies = btrfs_num_copies(map_tree, failrec->logical,
1986 if (num_copies > 1) {
1987 ret = repair_io_failure(map_tree, start, failrec->len,
1988 failrec->logical, page,
1989 failrec->failed_mirror);
1996 ret = free_io_failure(inode, failrec, did_repair);
2002 * this is a generic handler for readpage errors (default
2003 * readpage_io_failed_hook). if other copies exist, read those and write back
2004 * good data to the failed position. does not investigate in remapping the
2005 * failed extent elsewhere, hoping the device will be smart enough to do this as
2009 static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2010 u64 start, u64 end, int failed_mirror,
2011 struct extent_state *state)
2013 struct io_failure_record *failrec = NULL;
2015 struct extent_map *em;
2016 struct inode *inode = page->mapping->host;
2017 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2018 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2019 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2026 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2028 ret = get_state_private(failure_tree, start, &private);
2030 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2033 failrec->start = start;
2034 failrec->len = end - start + 1;
2035 failrec->this_mirror = 0;
2036 failrec->bio_flags = 0;
2037 failrec->in_validation = 0;
2039 read_lock(&em_tree->lock);
2040 em = lookup_extent_mapping(em_tree, start, failrec->len);
2042 read_unlock(&em_tree->lock);
2047 if (em->start > start || em->start + em->len < start) {
2048 free_extent_map(em);
2051 read_unlock(&em_tree->lock);
2053 if (!em || IS_ERR(em)) {
2057 logical = start - em->start;
2058 logical = em->block_start + logical;
2059 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2060 logical = em->block_start;
2061 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2062 extent_set_compress_type(&failrec->bio_flags,
2065 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2066 "len=%llu\n", logical, start, failrec->len);
2067 failrec->logical = logical;
2068 free_extent_map(em);
2070 /* set the bits in the private failure tree */
2071 ret = set_extent_bits(failure_tree, start, end,
2072 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2074 ret = set_state_private(failure_tree, start,
2075 (u64)(unsigned long)failrec);
2076 /* set the bits in the inode's tree */
2078 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2085 failrec = (struct io_failure_record *)(unsigned long)private;
2086 pr_debug("bio_readpage_error: (found) logical=%llu, "
2087 "start=%llu, len=%llu, validation=%d\n",
2088 failrec->logical, failrec->start, failrec->len,
2089 failrec->in_validation);
2091 * when data can be on disk more than twice, add to failrec here
2092 * (e.g. with a list for failed_mirror) to make
2093 * clean_io_failure() clean all those errors at once.
2096 num_copies = btrfs_num_copies(
2097 &BTRFS_I(inode)->root->fs_info->mapping_tree,
2098 failrec->logical, failrec->len);
2099 if (num_copies == 1) {
2101 * we only have a single copy of the data, so don't bother with
2102 * all the retry and error correction code that follows. no
2103 * matter what the error is, it is very likely to persist.
2105 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2106 "state=%p, num_copies=%d, next_mirror %d, "
2107 "failed_mirror %d\n", state, num_copies,
2108 failrec->this_mirror, failed_mirror);
2109 free_io_failure(inode, failrec, 0);
2114 spin_lock(&tree->lock);
2115 state = find_first_extent_bit_state(tree, failrec->start,
2117 if (state && state->start != failrec->start)
2119 spin_unlock(&tree->lock);
2123 * there are two premises:
2124 * a) deliver good data to the caller
2125 * b) correct the bad sectors on disk
2127 if (failed_bio->bi_vcnt > 1) {
2129 * to fulfill b), we need to know the exact failing sectors, as
2130 * we don't want to rewrite any more than the failed ones. thus,
2131 * we need separate read requests for the failed bio
2133 * if the following BUG_ON triggers, our validation request got
2134 * merged. we need separate requests for our algorithm to work.
2136 BUG_ON(failrec->in_validation);
2137 failrec->in_validation = 1;
2138 failrec->this_mirror = failed_mirror;
2139 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2142 * we're ready to fulfill a) and b) alongside. get a good copy
2143 * of the failed sector and if we succeed, we have setup
2144 * everything for repair_io_failure to do the rest for us.
2146 if (failrec->in_validation) {
2147 BUG_ON(failrec->this_mirror != failed_mirror);
2148 failrec->in_validation = 0;
2149 failrec->this_mirror = 0;
2151 failrec->failed_mirror = failed_mirror;
2152 failrec->this_mirror++;
2153 if (failrec->this_mirror == failed_mirror)
2154 failrec->this_mirror++;
2155 read_mode = READ_SYNC;
2158 if (!state || failrec->this_mirror > num_copies) {
2159 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2160 "next_mirror %d, failed_mirror %d\n", state,
2161 num_copies, failrec->this_mirror, failed_mirror);
2162 free_io_failure(inode, failrec, 0);
2166 bio = bio_alloc(GFP_NOFS, 1);
2167 bio->bi_private = state;
2168 bio->bi_end_io = failed_bio->bi_end_io;
2169 bio->bi_sector = failrec->logical >> 9;
2170 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2173 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2175 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2176 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2177 failrec->this_mirror, num_copies, failrec->in_validation);
2179 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2180 failrec->this_mirror,
2181 failrec->bio_flags, 0);
2185 /* lots and lots of room for performance fixes in the end_bio funcs */
2187 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2189 int uptodate = (err == 0);
2190 struct extent_io_tree *tree;
2193 tree = &BTRFS_I(page->mapping->host)->io_tree;
2195 if (tree->ops && tree->ops->writepage_end_io_hook) {
2196 ret = tree->ops->writepage_end_io_hook(page, start,
2197 end, NULL, uptodate);
2202 if (!uptodate && tree->ops &&
2203 tree->ops->writepage_io_failed_hook) {
2204 ret = tree->ops->writepage_io_failed_hook(NULL, page,
2206 /* Writeback already completed */
2212 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2213 ClearPageUptodate(page);
2220 * after a writepage IO is done, we need to:
2221 * clear the uptodate bits on error
2222 * clear the writeback bits in the extent tree for this IO
2223 * end_page_writeback if the page has no more pending IO
2225 * Scheduling is not allowed, so the extent state tree is expected
2226 * to have one and only one object corresponding to this IO.
2228 static void end_bio_extent_writepage(struct bio *bio, int err)
2230 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2231 struct extent_io_tree *tree;
2237 struct page *page = bvec->bv_page;
2238 tree = &BTRFS_I(page->mapping->host)->io_tree;
2240 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2242 end = start + bvec->bv_len - 1;
2244 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2249 if (--bvec >= bio->bi_io_vec)
2250 prefetchw(&bvec->bv_page->flags);
2252 if (end_extent_writepage(page, err, start, end))
2256 end_page_writeback(page);
2258 check_page_writeback(tree, page);
2259 } while (bvec >= bio->bi_io_vec);
2265 * after a readpage IO is done, we need to:
2266 * clear the uptodate bits on error
2267 * set the uptodate bits if things worked
2268 * set the page up to date if all extents in the tree are uptodate
2269 * clear the lock bit in the extent tree
2270 * unlock the page if there are no other extents locked for it
2272 * Scheduling is not allowed, so the extent state tree is expected
2273 * to have one and only one object corresponding to this IO.
2275 static void end_bio_extent_readpage(struct bio *bio, int err)
2277 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2278 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2279 struct bio_vec *bvec = bio->bi_io_vec;
2280 struct extent_io_tree *tree;
2291 struct page *page = bvec->bv_page;
2292 struct extent_state *cached = NULL;
2293 struct extent_state *state;
2295 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2296 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2297 (long int)bio->bi_bdev);
2298 tree = &BTRFS_I(page->mapping->host)->io_tree;
2300 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2302 end = start + bvec->bv_len - 1;
2304 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2309 if (++bvec <= bvec_end)
2310 prefetchw(&bvec->bv_page->flags);
2312 spin_lock(&tree->lock);
2313 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
2314 if (state && state->start == start) {
2316 * take a reference on the state, unlock will drop
2319 cache_state(state, &cached);
2321 spin_unlock(&tree->lock);
2323 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2324 ret = tree->ops->readpage_end_io_hook(page, start, end,
2329 clean_io_failure(start, page);
2333 failed_mirror = (int)(unsigned long)bio->bi_bdev;
2335 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
2336 ret = tree->ops->readpage_io_failed_hook(page, failed_mirror);
2338 test_bit(BIO_UPTODATE, &bio->bi_flags))
2340 } else if (!uptodate) {
2342 * The generic bio_readpage_error handles errors the
2343 * following way: If possible, new read requests are
2344 * created and submitted and will end up in
2345 * end_bio_extent_readpage as well (if we're lucky, not
2346 * in the !uptodate case). In that case it returns 0 and
2347 * we just go on with the next page in our bio. If it
2348 * can't handle the error it will return -EIO and we
2349 * remain responsible for that page.
2351 ret = bio_readpage_error(bio, page, start, end,
2352 failed_mirror, NULL);
2355 test_bit(BIO_UPTODATE, &bio->bi_flags);
2358 uncache_state(&cached);
2363 if (uptodate && tree->track_uptodate) {
2364 set_extent_uptodate(tree, start, end, &cached,
2367 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2371 SetPageUptodate(page);
2373 ClearPageUptodate(page);
2379 check_page_uptodate(tree, page);
2381 ClearPageUptodate(page);
2384 check_page_locked(tree, page);
2386 } while (bvec <= bvec_end);
2392 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2397 bio = bio_alloc(gfp_flags, nr_vecs);
2399 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2400 while (!bio && (nr_vecs /= 2))
2401 bio = bio_alloc(gfp_flags, nr_vecs);
2406 bio->bi_bdev = bdev;
2407 bio->bi_sector = first_sector;
2412 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
2413 unsigned long bio_flags)
2416 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2417 struct page *page = bvec->bv_page;
2418 struct extent_io_tree *tree = bio->bi_private;
2421 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
2423 bio->bi_private = NULL;
2427 if (tree->ops && tree->ops->submit_bio_hook)
2428 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2429 mirror_num, bio_flags, start);
2431 btrfsic_submit_bio(rw, bio);
2433 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2439 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2440 struct page *page, sector_t sector,
2441 size_t size, unsigned long offset,
2442 struct block_device *bdev,
2443 struct bio **bio_ret,
2444 unsigned long max_pages,
2445 bio_end_io_t end_io_func,
2447 unsigned long prev_bio_flags,
2448 unsigned long bio_flags)
2454 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2455 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2456 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2458 if (bio_ret && *bio_ret) {
2461 contig = bio->bi_sector == sector;
2463 contig = bio->bi_sector + (bio->bi_size >> 9) ==
2466 if (prev_bio_flags != bio_flags || !contig ||
2467 (tree->ops && tree->ops->merge_bio_hook &&
2468 tree->ops->merge_bio_hook(page, offset, page_size, bio,
2470 bio_add_page(bio, page, page_size, offset) < page_size) {
2471 ret = submit_one_bio(rw, bio, mirror_num,
2478 if (this_compressed)
2481 nr = bio_get_nr_vecs(bdev);
2483 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2487 bio_add_page(bio, page, page_size, offset);
2488 bio->bi_end_io = end_io_func;
2489 bio->bi_private = tree;
2494 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2499 void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2501 if (!PagePrivate(page)) {
2502 SetPagePrivate(page);
2503 page_cache_get(page);
2504 set_page_private(page, (unsigned long)eb);
2506 WARN_ON(page->private != (unsigned long)eb);
2510 void set_page_extent_mapped(struct page *page)
2512 if (!PagePrivate(page)) {
2513 SetPagePrivate(page);
2514 page_cache_get(page);
2515 set_page_private(page, EXTENT_PAGE_PRIVATE);
2520 * basic readpage implementation. Locked extent state structs are inserted
2521 * into the tree that are removed when the IO is done (by the end_io
2524 static int __extent_read_full_page(struct extent_io_tree *tree,
2526 get_extent_t *get_extent,
2527 struct bio **bio, int mirror_num,
2528 unsigned long *bio_flags)
2530 struct inode *inode = page->mapping->host;
2531 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2532 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2536 u64 last_byte = i_size_read(inode);
2540 struct extent_map *em;
2541 struct block_device *bdev;
2542 struct btrfs_ordered_extent *ordered;
2545 size_t pg_offset = 0;
2547 size_t disk_io_size;
2548 size_t blocksize = inode->i_sb->s_blocksize;
2549 unsigned long this_bio_flag = 0;
2551 set_page_extent_mapped(page);
2553 if (!PageUptodate(page)) {
2554 if (cleancache_get_page(page) == 0) {
2555 BUG_ON(blocksize != PAGE_SIZE);
2562 lock_extent(tree, start, end, GFP_NOFS);
2563 ordered = btrfs_lookup_ordered_extent(inode, start);
2566 unlock_extent(tree, start, end, GFP_NOFS);
2567 btrfs_start_ordered_extent(inode, ordered, 1);
2568 btrfs_put_ordered_extent(ordered);
2571 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2573 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2576 iosize = PAGE_CACHE_SIZE - zero_offset;
2577 userpage = kmap_atomic(page, KM_USER0);
2578 memset(userpage + zero_offset, 0, iosize);
2579 flush_dcache_page(page);
2580 kunmap_atomic(userpage, KM_USER0);
2583 while (cur <= end) {
2584 if (cur >= last_byte) {
2586 struct extent_state *cached = NULL;
2588 iosize = PAGE_CACHE_SIZE - pg_offset;
2589 userpage = kmap_atomic(page, KM_USER0);
2590 memset(userpage + pg_offset, 0, iosize);
2591 flush_dcache_page(page);
2592 kunmap_atomic(userpage, KM_USER0);
2593 set_extent_uptodate(tree, cur, cur + iosize - 1,
2595 unlock_extent_cached(tree, cur, cur + iosize - 1,
2599 em = get_extent(inode, page, pg_offset, cur,
2601 if (IS_ERR_OR_NULL(em)) {
2603 unlock_extent(tree, cur, end, GFP_NOFS);
2606 extent_offset = cur - em->start;
2607 BUG_ON(extent_map_end(em) <= cur);
2610 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2611 this_bio_flag = EXTENT_BIO_COMPRESSED;
2612 extent_set_compress_type(&this_bio_flag,
2616 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2617 cur_end = min(extent_map_end(em) - 1, end);
2618 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2619 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2620 disk_io_size = em->block_len;
2621 sector = em->block_start >> 9;
2623 sector = (em->block_start + extent_offset) >> 9;
2624 disk_io_size = iosize;
2627 block_start = em->block_start;
2628 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2629 block_start = EXTENT_MAP_HOLE;
2630 free_extent_map(em);
2633 /* we've found a hole, just zero and go on */
2634 if (block_start == EXTENT_MAP_HOLE) {
2636 struct extent_state *cached = NULL;
2638 userpage = kmap_atomic(page, KM_USER0);
2639 memset(userpage + pg_offset, 0, iosize);
2640 flush_dcache_page(page);
2641 kunmap_atomic(userpage, KM_USER0);
2643 set_extent_uptodate(tree, cur, cur + iosize - 1,
2645 unlock_extent_cached(tree, cur, cur + iosize - 1,
2648 pg_offset += iosize;
2651 /* the get_extent function already copied into the page */
2652 if (test_range_bit(tree, cur, cur_end,
2653 EXTENT_UPTODATE, 1, NULL)) {
2654 check_page_uptodate(tree, page);
2655 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2657 pg_offset += iosize;
2660 /* we have an inline extent but it didn't get marked up
2661 * to date. Error out
2663 if (block_start == EXTENT_MAP_INLINE) {
2665 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2667 pg_offset += iosize;
2672 if (tree->ops && tree->ops->readpage_io_hook) {
2673 ret = tree->ops->readpage_io_hook(page, cur,
2677 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2679 ret = submit_extent_page(READ, tree, page,
2680 sector, disk_io_size, pg_offset,
2682 end_bio_extent_readpage, mirror_num,
2686 *bio_flags = this_bio_flag;
2691 pg_offset += iosize;
2695 if (!PageError(page))
2696 SetPageUptodate(page);
2702 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2703 get_extent_t *get_extent, int mirror_num)
2705 struct bio *bio = NULL;
2706 unsigned long bio_flags = 0;
2709 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
2712 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
2716 static noinline void update_nr_written(struct page *page,
2717 struct writeback_control *wbc,
2718 unsigned long nr_written)
2720 wbc->nr_to_write -= nr_written;
2721 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2722 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2723 page->mapping->writeback_index = page->index + nr_written;
2727 * the writepage semantics are similar to regular writepage. extent
2728 * records are inserted to lock ranges in the tree, and as dirty areas
2729 * are found, they are marked writeback. Then the lock bits are removed
2730 * and the end_io handler clears the writeback ranges
2732 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2735 struct inode *inode = page->mapping->host;
2736 struct extent_page_data *epd = data;
2737 struct extent_io_tree *tree = epd->tree;
2738 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2740 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2744 u64 last_byte = i_size_read(inode);
2748 struct extent_state *cached_state = NULL;
2749 struct extent_map *em;
2750 struct block_device *bdev;
2753 size_t pg_offset = 0;
2755 loff_t i_size = i_size_read(inode);
2756 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2762 unsigned long nr_written = 0;
2763 bool fill_delalloc = true;
2765 if (wbc->sync_mode == WB_SYNC_ALL)
2766 write_flags = WRITE_SYNC;
2768 write_flags = WRITE;
2770 trace___extent_writepage(page, inode, wbc);
2772 WARN_ON(!PageLocked(page));
2774 ClearPageError(page);
2776 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2777 if (page->index > end_index ||
2778 (page->index == end_index && !pg_offset)) {
2779 page->mapping->a_ops->invalidatepage(page, 0);
2784 if (page->index == end_index) {
2787 userpage = kmap_atomic(page, KM_USER0);
2788 memset(userpage + pg_offset, 0,
2789 PAGE_CACHE_SIZE - pg_offset);
2790 kunmap_atomic(userpage, KM_USER0);
2791 flush_dcache_page(page);
2795 set_page_extent_mapped(page);
2797 if (!tree->ops || !tree->ops->fill_delalloc)
2798 fill_delalloc = false;
2800 delalloc_start = start;
2803 if (!epd->extent_locked && fill_delalloc) {
2804 u64 delalloc_to_write = 0;
2806 * make sure the wbc mapping index is at least updated
2809 update_nr_written(page, wbc, 0);
2811 while (delalloc_end < page_end) {
2812 nr_delalloc = find_lock_delalloc_range(inode, tree,
2817 if (nr_delalloc == 0) {
2818 delalloc_start = delalloc_end + 1;
2821 ret = tree->ops->fill_delalloc(inode, page,
2828 * delalloc_end is already one less than the total
2829 * length, so we don't subtract one from
2832 delalloc_to_write += (delalloc_end - delalloc_start +
2835 delalloc_start = delalloc_end + 1;
2837 if (wbc->nr_to_write < delalloc_to_write) {
2840 if (delalloc_to_write < thresh * 2)
2841 thresh = delalloc_to_write;
2842 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2846 /* did the fill delalloc function already unlock and start
2852 * we've unlocked the page, so we can't update
2853 * the mapping's writeback index, just update
2856 wbc->nr_to_write -= nr_written;
2860 if (tree->ops && tree->ops->writepage_start_hook) {
2861 ret = tree->ops->writepage_start_hook(page, start,
2864 /* Fixup worker will requeue */
2866 wbc->pages_skipped++;
2868 redirty_page_for_writepage(wbc, page);
2869 update_nr_written(page, wbc, nr_written);
2877 * we don't want to touch the inode after unlocking the page,
2878 * so we update the mapping writeback index now
2880 update_nr_written(page, wbc, nr_written + 1);
2883 if (last_byte <= start) {
2884 if (tree->ops && tree->ops->writepage_end_io_hook)
2885 tree->ops->writepage_end_io_hook(page, start,
2890 blocksize = inode->i_sb->s_blocksize;
2892 while (cur <= end) {
2893 if (cur >= last_byte) {
2894 if (tree->ops && tree->ops->writepage_end_io_hook)
2895 tree->ops->writepage_end_io_hook(page, cur,
2899 em = epd->get_extent(inode, page, pg_offset, cur,
2901 if (IS_ERR_OR_NULL(em)) {
2906 extent_offset = cur - em->start;
2907 BUG_ON(extent_map_end(em) <= cur);
2909 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2910 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2911 sector = (em->block_start + extent_offset) >> 9;
2913 block_start = em->block_start;
2914 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2915 free_extent_map(em);
2919 * compressed and inline extents are written through other
2922 if (compressed || block_start == EXTENT_MAP_HOLE ||
2923 block_start == EXTENT_MAP_INLINE) {
2925 * end_io notification does not happen here for
2926 * compressed extents
2928 if (!compressed && tree->ops &&
2929 tree->ops->writepage_end_io_hook)
2930 tree->ops->writepage_end_io_hook(page, cur,
2933 else if (compressed) {
2934 /* we don't want to end_page_writeback on
2935 * a compressed extent. this happens
2942 pg_offset += iosize;
2945 /* leave this out until we have a page_mkwrite call */
2946 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2947 EXTENT_DIRTY, 0, NULL)) {
2949 pg_offset += iosize;
2953 if (tree->ops && tree->ops->writepage_io_hook) {
2954 ret = tree->ops->writepage_io_hook(page, cur,
2962 unsigned long max_nr = end_index + 1;
2964 set_range_writeback(tree, cur, cur + iosize - 1);
2965 if (!PageWriteback(page)) {
2966 printk(KERN_ERR "btrfs warning page %lu not "
2967 "writeback, cur %llu end %llu\n",
2968 page->index, (unsigned long long)cur,
2969 (unsigned long long)end);
2972 ret = submit_extent_page(write_flags, tree, page,
2973 sector, iosize, pg_offset,
2974 bdev, &epd->bio, max_nr,
2975 end_bio_extent_writepage,
2981 pg_offset += iosize;
2986 /* make sure the mapping tag for page dirty gets cleared */
2987 set_page_writeback(page);
2988 end_page_writeback(page);
2994 /* drop our reference on any cached states */
2995 free_extent_state(cached_state);
2999 static int eb_wait(void *word)
3005 static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3007 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3008 TASK_UNINTERRUPTIBLE);
3011 static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3012 struct btrfs_fs_info *fs_info,
3013 struct extent_page_data *epd)
3015 unsigned long i, num_pages;
3019 if (!btrfs_try_tree_write_lock(eb)) {
3021 flush_write_bio(epd);
3022 btrfs_tree_lock(eb);
3025 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3026 btrfs_tree_unlock(eb);
3030 flush_write_bio(epd);
3034 wait_on_extent_buffer_writeback(eb);
3035 btrfs_tree_lock(eb);
3036 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3038 btrfs_tree_unlock(eb);
3042 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3043 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3044 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3045 spin_lock(&fs_info->delalloc_lock);
3046 if (fs_info->dirty_metadata_bytes >= eb->len)
3047 fs_info->dirty_metadata_bytes -= eb->len;
3050 spin_unlock(&fs_info->delalloc_lock);
3054 btrfs_tree_unlock(eb);
3059 num_pages = num_extent_pages(eb->start, eb->len);
3060 for (i = 0; i < num_pages; i++) {
3061 struct page *p = extent_buffer_page(eb, i);
3063 if (!trylock_page(p)) {
3065 flush_write_bio(epd);
3075 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3077 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3078 smp_mb__after_clear_bit();
3079 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3082 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3084 int uptodate = err == 0;
3085 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3086 struct extent_buffer *eb;
3090 struct page *page = bvec->bv_page;
3093 eb = (struct extent_buffer *)page->private;
3095 done = atomic_dec_and_test(&eb->io_pages);
3097 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3098 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3099 ClearPageUptodate(page);
3103 end_page_writeback(page);
3108 end_extent_buffer_writeback(eb);
3109 } while (bvec >= bio->bi_io_vec);
3115 static int write_one_eb(struct extent_buffer *eb,
3116 struct btrfs_fs_info *fs_info,
3117 struct writeback_control *wbc,
3118 struct extent_page_data *epd)
3120 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3121 u64 offset = eb->start;
3122 unsigned long i, num_pages;
3123 int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3126 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3127 num_pages = num_extent_pages(eb->start, eb->len);
3128 atomic_set(&eb->io_pages, num_pages);
3129 for (i = 0; i < num_pages; i++) {
3130 struct page *p = extent_buffer_page(eb, i);
3132 clear_page_dirty_for_io(p);
3133 set_page_writeback(p);
3134 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3135 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3136 -1, end_bio_extent_buffer_writepage,
3139 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3141 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3142 end_extent_buffer_writeback(eb);
3146 offset += PAGE_CACHE_SIZE;
3147 update_nr_written(p, wbc, 1);
3151 if (unlikely(ret)) {
3152 for (; i < num_pages; i++) {
3153 struct page *p = extent_buffer_page(eb, i);
3161 int btree_write_cache_pages(struct address_space *mapping,
3162 struct writeback_control *wbc)
3164 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3165 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3166 struct extent_buffer *eb, *prev_eb = NULL;
3167 struct extent_page_data epd = {
3171 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3175 int nr_to_write_done = 0;
3176 struct pagevec pvec;
3179 pgoff_t end; /* Inclusive */
3183 pagevec_init(&pvec, 0);
3184 if (wbc->range_cyclic) {
3185 index = mapping->writeback_index; /* Start from prev offset */
3188 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3189 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3192 if (wbc->sync_mode == WB_SYNC_ALL)
3193 tag = PAGECACHE_TAG_TOWRITE;
3195 tag = PAGECACHE_TAG_DIRTY;
3197 if (wbc->sync_mode == WB_SYNC_ALL)
3198 tag_pages_for_writeback(mapping, index, end);
3199 while (!done && !nr_to_write_done && (index <= end) &&
3200 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3201 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3205 for (i = 0; i < nr_pages; i++) {
3206 struct page *page = pvec.pages[i];
3208 if (!PagePrivate(page))
3211 if (!wbc->range_cyclic && page->index > end) {
3216 eb = (struct extent_buffer *)page->private;
3225 if (!atomic_inc_not_zero(&eb->refs)) {
3231 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3233 free_extent_buffer(eb);
3237 ret = write_one_eb(eb, fs_info, wbc, &epd);
3240 free_extent_buffer(eb);
3243 free_extent_buffer(eb);
3246 * the filesystem may choose to bump up nr_to_write.
3247 * We have to make sure to honor the new nr_to_write
3250 nr_to_write_done = wbc->nr_to_write <= 0;
3252 pagevec_release(&pvec);
3255 if (!scanned && !done) {
3257 * We hit the last page and there is more work to be done: wrap
3258 * back to the start of the file
3264 flush_write_bio(&epd);
3269 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3270 * @mapping: address space structure to write
3271 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3272 * @writepage: function called for each page
3273 * @data: data passed to writepage function
3275 * If a page is already under I/O, write_cache_pages() skips it, even
3276 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3277 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3278 * and msync() need to guarantee that all the data which was dirty at the time
3279 * the call was made get new I/O started against them. If wbc->sync_mode is
3280 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3281 * existing IO to complete.
3283 static int extent_write_cache_pages(struct extent_io_tree *tree,
3284 struct address_space *mapping,
3285 struct writeback_control *wbc,
3286 writepage_t writepage, void *data,
3287 void (*flush_fn)(void *))
3291 int nr_to_write_done = 0;
3292 struct pagevec pvec;
3295 pgoff_t end; /* Inclusive */
3299 pagevec_init(&pvec, 0);
3300 if (wbc->range_cyclic) {
3301 index = mapping->writeback_index; /* Start from prev offset */
3304 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3305 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3308 if (wbc->sync_mode == WB_SYNC_ALL)
3309 tag = PAGECACHE_TAG_TOWRITE;
3311 tag = PAGECACHE_TAG_DIRTY;
3313 if (wbc->sync_mode == WB_SYNC_ALL)
3314 tag_pages_for_writeback(mapping, index, end);
3315 while (!done && !nr_to_write_done && (index <= end) &&
3316 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3317 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3321 for (i = 0; i < nr_pages; i++) {
3322 struct page *page = pvec.pages[i];
3325 * At this point we hold neither mapping->tree_lock nor
3326 * lock on the page itself: the page may be truncated or
3327 * invalidated (changing page->mapping to NULL), or even
3328 * swizzled back from swapper_space to tmpfs file
3332 tree->ops->write_cache_pages_lock_hook) {
3333 tree->ops->write_cache_pages_lock_hook(page,
3336 if (!trylock_page(page)) {
3342 if (unlikely(page->mapping != mapping)) {
3347 if (!wbc->range_cyclic && page->index > end) {
3353 if (wbc->sync_mode != WB_SYNC_NONE) {
3354 if (PageWriteback(page))
3356 wait_on_page_writeback(page);
3359 if (PageWriteback(page) ||
3360 !clear_page_dirty_for_io(page)) {
3365 ret = (*writepage)(page, wbc, data);
3367 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3375 * the filesystem may choose to bump up nr_to_write.
3376 * We have to make sure to honor the new nr_to_write
3379 nr_to_write_done = wbc->nr_to_write <= 0;
3381 pagevec_release(&pvec);
3384 if (!scanned && !done) {
3386 * We hit the last page and there is more work to be done: wrap
3387 * back to the start of the file
3396 static void flush_epd_write_bio(struct extent_page_data *epd)
3400 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
3402 submit_one_bio(WRITE, epd->bio, 0, 0);
3407 static noinline void flush_write_bio(void *data)
3409 struct extent_page_data *epd = data;
3410 flush_epd_write_bio(epd);
3413 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3414 get_extent_t *get_extent,
3415 struct writeback_control *wbc)
3418 struct extent_page_data epd = {
3421 .get_extent = get_extent,
3423 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3426 ret = __extent_writepage(page, wbc, &epd);
3428 flush_epd_write_bio(&epd);
3432 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3433 u64 start, u64 end, get_extent_t *get_extent,
3437 struct address_space *mapping = inode->i_mapping;
3439 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3442 struct extent_page_data epd = {
3445 .get_extent = get_extent,
3447 .sync_io = mode == WB_SYNC_ALL,
3449 struct writeback_control wbc_writepages = {
3451 .nr_to_write = nr_pages * 2,
3452 .range_start = start,
3453 .range_end = end + 1,
3456 while (start <= end) {
3457 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3458 if (clear_page_dirty_for_io(page))
3459 ret = __extent_writepage(page, &wbc_writepages, &epd);
3461 if (tree->ops && tree->ops->writepage_end_io_hook)
3462 tree->ops->writepage_end_io_hook(page, start,
3463 start + PAGE_CACHE_SIZE - 1,
3467 page_cache_release(page);
3468 start += PAGE_CACHE_SIZE;
3471 flush_epd_write_bio(&epd);
3475 int extent_writepages(struct extent_io_tree *tree,
3476 struct address_space *mapping,
3477 get_extent_t *get_extent,
3478 struct writeback_control *wbc)
3481 struct extent_page_data epd = {
3484 .get_extent = get_extent,
3486 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3489 ret = extent_write_cache_pages(tree, mapping, wbc,
3490 __extent_writepage, &epd,
3492 flush_epd_write_bio(&epd);
3496 int extent_readpages(struct extent_io_tree *tree,
3497 struct address_space *mapping,
3498 struct list_head *pages, unsigned nr_pages,
3499 get_extent_t get_extent)
3501 struct bio *bio = NULL;
3503 unsigned long bio_flags = 0;
3505 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3506 struct page *page = list_entry(pages->prev, struct page, lru);
3508 prefetchw(&page->flags);
3509 list_del(&page->lru);
3510 if (!add_to_page_cache_lru(page, mapping,
3511 page->index, GFP_NOFS)) {
3512 __extent_read_full_page(tree, page, get_extent,
3513 &bio, 0, &bio_flags);
3515 page_cache_release(page);
3517 BUG_ON(!list_empty(pages));
3519 submit_one_bio(READ, bio, 0, bio_flags);
3524 * basic invalidatepage code, this waits on any locked or writeback
3525 * ranges corresponding to the page, and then deletes any extent state
3526 * records from the tree
3528 int extent_invalidatepage(struct extent_io_tree *tree,
3529 struct page *page, unsigned long offset)
3531 struct extent_state *cached_state = NULL;
3532 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3533 u64 end = start + PAGE_CACHE_SIZE - 1;
3534 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3536 start += (offset + blocksize - 1) & ~(blocksize - 1);
3540 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
3541 wait_on_page_writeback(page);
3542 clear_extent_bit(tree, start, end,
3543 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3544 EXTENT_DO_ACCOUNTING,
3545 1, 1, &cached_state, GFP_NOFS);
3550 * a helper for releasepage, this tests for areas of the page that
3551 * are locked or under IO and drops the related state bits if it is safe
3554 int try_release_extent_state(struct extent_map_tree *map,
3555 struct extent_io_tree *tree, struct page *page,
3558 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3559 u64 end = start + PAGE_CACHE_SIZE - 1;
3562 if (test_range_bit(tree, start, end,
3563 EXTENT_IOBITS, 0, NULL))
3566 if ((mask & GFP_NOFS) == GFP_NOFS)
3569 * at this point we can safely clear everything except the
3570 * locked bit and the nodatasum bit
3572 ret = clear_extent_bit(tree, start, end,
3573 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3576 /* if clear_extent_bit failed for enomem reasons,
3577 * we can't allow the release to continue.
3588 * a helper for releasepage. As long as there are no locked extents
3589 * in the range corresponding to the page, both state records and extent
3590 * map records are removed
3592 int try_release_extent_mapping(struct extent_map_tree *map,
3593 struct extent_io_tree *tree, struct page *page,
3596 struct extent_map *em;
3597 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3598 u64 end = start + PAGE_CACHE_SIZE - 1;
3600 if ((mask & __GFP_WAIT) &&
3601 page->mapping->host->i_size > 16 * 1024 * 1024) {
3603 while (start <= end) {
3604 len = end - start + 1;
3605 write_lock(&map->lock);
3606 em = lookup_extent_mapping(map, start, len);
3608 write_unlock(&map->lock);
3611 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3612 em->start != start) {
3613 write_unlock(&map->lock);
3614 free_extent_map(em);
3617 if (!test_range_bit(tree, em->start,
3618 extent_map_end(em) - 1,
3619 EXTENT_LOCKED | EXTENT_WRITEBACK,
3621 remove_extent_mapping(map, em);
3622 /* once for the rb tree */
3623 free_extent_map(em);
3625 start = extent_map_end(em);
3626 write_unlock(&map->lock);
3629 free_extent_map(em);
3632 return try_release_extent_state(map, tree, page, mask);
3636 * helper function for fiemap, which doesn't want to see any holes.
3637 * This maps until we find something past 'last'
3639 static struct extent_map *get_extent_skip_holes(struct inode *inode,
3642 get_extent_t *get_extent)
3644 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3645 struct extent_map *em;
3652 len = last - offset;
3655 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3656 em = get_extent(inode, NULL, 0, offset, len, 0);
3657 if (IS_ERR_OR_NULL(em))
3660 /* if this isn't a hole return it */
3661 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3662 em->block_start != EXTENT_MAP_HOLE) {
3666 /* this is a hole, advance to the next extent */
3667 offset = extent_map_end(em);
3668 free_extent_map(em);
3675 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3676 __u64 start, __u64 len, get_extent_t *get_extent)
3680 u64 max = start + len;
3684 u64 last_for_get_extent = 0;
3686 u64 isize = i_size_read(inode);
3687 struct btrfs_key found_key;
3688 struct extent_map *em = NULL;
3689 struct extent_state *cached_state = NULL;
3690 struct btrfs_path *path;
3691 struct btrfs_file_extent_item *item;
3696 unsigned long emflags;
3701 path = btrfs_alloc_path();
3704 path->leave_spinning = 1;
3706 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3707 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3710 * lookup the last file extent. We're not using i_size here
3711 * because there might be preallocation past i_size
3713 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3714 path, btrfs_ino(inode), -1, 0);
3716 btrfs_free_path(path);
3721 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3722 struct btrfs_file_extent_item);
3723 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3724 found_type = btrfs_key_type(&found_key);
3726 /* No extents, but there might be delalloc bits */
3727 if (found_key.objectid != btrfs_ino(inode) ||
3728 found_type != BTRFS_EXTENT_DATA_KEY) {
3729 /* have to trust i_size as the end */
3731 last_for_get_extent = isize;
3734 * remember the start of the last extent. There are a
3735 * bunch of different factors that go into the length of the
3736 * extent, so its much less complex to remember where it started
3738 last = found_key.offset;
3739 last_for_get_extent = last + 1;
3741 btrfs_free_path(path);
3744 * we might have some extents allocated but more delalloc past those
3745 * extents. so, we trust isize unless the start of the last extent is
3750 last_for_get_extent = isize;
3753 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3754 &cached_state, GFP_NOFS);
3756 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3766 u64 offset_in_extent;
3768 /* break if the extent we found is outside the range */
3769 if (em->start >= max || extent_map_end(em) < off)
3773 * get_extent may return an extent that starts before our
3774 * requested range. We have to make sure the ranges
3775 * we return to fiemap always move forward and don't
3776 * overlap, so adjust the offsets here
3778 em_start = max(em->start, off);
3781 * record the offset from the start of the extent
3782 * for adjusting the disk offset below
3784 offset_in_extent = em_start - em->start;
3785 em_end = extent_map_end(em);
3786 em_len = em_end - em_start;
3787 emflags = em->flags;
3792 * bump off for our next call to get_extent
3794 off = extent_map_end(em);
3798 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3800 flags |= FIEMAP_EXTENT_LAST;
3801 } else if (em->block_start == EXTENT_MAP_INLINE) {
3802 flags |= (FIEMAP_EXTENT_DATA_INLINE |
3803 FIEMAP_EXTENT_NOT_ALIGNED);
3804 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
3805 flags |= (FIEMAP_EXTENT_DELALLOC |
3806 FIEMAP_EXTENT_UNKNOWN);
3808 disko = em->block_start + offset_in_extent;
3810 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3811 flags |= FIEMAP_EXTENT_ENCODED;
3813 free_extent_map(em);
3815 if ((em_start >= last) || em_len == (u64)-1 ||
3816 (last == (u64)-1 && isize <= em_end)) {
3817 flags |= FIEMAP_EXTENT_LAST;
3821 /* now scan forward to see if this is really the last extent. */
3822 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3829 flags |= FIEMAP_EXTENT_LAST;
3832 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3838 free_extent_map(em);
3840 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3841 &cached_state, GFP_NOFS);
3845 inline struct page *extent_buffer_page(struct extent_buffer *eb,
3848 return eb->pages[i];
3851 inline unsigned long num_extent_pages(u64 start, u64 len)
3853 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3854 (start >> PAGE_CACHE_SHIFT);
3857 static void __free_extent_buffer(struct extent_buffer *eb)
3860 unsigned long flags;
3861 spin_lock_irqsave(&leak_lock, flags);
3862 list_del(&eb->leak_list);
3863 spin_unlock_irqrestore(&leak_lock, flags);
3865 if (eb->pages && eb->pages != eb->inline_pages)
3867 kmem_cache_free(extent_buffer_cache, eb);
3870 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3875 struct extent_buffer *eb = NULL;
3877 unsigned long flags;
3880 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3886 rwlock_init(&eb->lock);
3887 atomic_set(&eb->write_locks, 0);
3888 atomic_set(&eb->read_locks, 0);
3889 atomic_set(&eb->blocking_readers, 0);
3890 atomic_set(&eb->blocking_writers, 0);
3891 atomic_set(&eb->spinning_readers, 0);
3892 atomic_set(&eb->spinning_writers, 0);
3893 eb->lock_nested = 0;
3894 init_waitqueue_head(&eb->write_lock_wq);
3895 init_waitqueue_head(&eb->read_lock_wq);
3898 spin_lock_irqsave(&leak_lock, flags);
3899 list_add(&eb->leak_list, &buffers);
3900 spin_unlock_irqrestore(&leak_lock, flags);
3902 spin_lock_init(&eb->refs_lock);
3903 atomic_set(&eb->refs, 1);
3904 atomic_set(&eb->io_pages, 0);
3906 if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
3907 struct page **pages;
3908 int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
3910 pages = kzalloc(num_pages, mask);
3912 __free_extent_buffer(eb);
3917 eb->pages = eb->inline_pages;
3923 static int extent_buffer_under_io(struct extent_buffer *eb)
3925 return (atomic_read(&eb->io_pages) ||
3926 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3927 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3931 * Helper for releasing extent buffer page.
3933 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3934 unsigned long start_idx)
3936 unsigned long index;
3939 BUG_ON(extent_buffer_under_io(eb));
3941 index = num_extent_pages(eb->start, eb->len);
3942 if (start_idx >= index)
3947 page = extent_buffer_page(eb, index);
3949 spin_lock(&page->mapping->private_lock);
3951 * We do this since we'll remove the pages after we've
3952 * removed the eb from the radix tree, so we could race
3953 * and have this page now attached to the new eb. So
3954 * only clear page_private if it's still connected to
3957 if (PagePrivate(page) &&
3958 page->private == (unsigned long)eb) {
3959 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3960 BUG_ON(PageDirty(page));
3961 BUG_ON(PageWriteback(page));
3963 * We need to make sure we haven't be attached
3966 ClearPagePrivate(page);
3967 set_page_private(page, 0);
3968 /* One for the page private */
3969 page_cache_release(page);
3971 spin_unlock(&page->mapping->private_lock);
3973 /* One for when we alloced the page */
3974 page_cache_release(page);
3976 } while (index != start_idx);
3980 * Helper for releasing the extent buffer.
3982 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3984 btrfs_release_extent_buffer_page(eb, 0);
3985 __free_extent_buffer(eb);
3988 static void check_buffer_tree_ref(struct extent_buffer *eb)
3990 /* the ref bit is tricky. We have to make sure it is set
3991 * if we have the buffer dirty. Otherwise the
3992 * code to free a buffer can end up dropping a dirty
3995 * Once the ref bit is set, it won't go away while the
3996 * buffer is dirty or in writeback, and it also won't
3997 * go away while we have the reference count on the
4000 * We can't just set the ref bit without bumping the
4001 * ref on the eb because free_extent_buffer might
4002 * see the ref bit and try to clear it. If this happens
4003 * free_extent_buffer might end up dropping our original
4004 * ref by mistake and freeing the page before we are able
4005 * to add one more ref.
4007 * So bump the ref count first, then set the bit. If someone
4008 * beat us to it, drop the ref we added.
4010 if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4011 atomic_inc(&eb->refs);
4012 if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4013 atomic_dec(&eb->refs);
4017 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4019 unsigned long num_pages, i;
4021 check_buffer_tree_ref(eb);
4023 num_pages = num_extent_pages(eb->start, eb->len);
4024 for (i = 0; i < num_pages; i++) {
4025 struct page *p = extent_buffer_page(eb, i);
4026 mark_page_accessed(p);
4030 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
4031 u64 start, unsigned long len)
4033 unsigned long num_pages = num_extent_pages(start, len);
4035 unsigned long index = start >> PAGE_CACHE_SHIFT;
4036 struct extent_buffer *eb;
4037 struct extent_buffer *exists = NULL;
4039 struct address_space *mapping = tree->mapping;
4044 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4045 if (eb && atomic_inc_not_zero(&eb->refs)) {
4047 mark_extent_buffer_accessed(eb);
4052 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
4056 for (i = 0; i < num_pages; i++, index++) {
4057 p = find_or_create_page(mapping, index, GFP_NOFS);
4063 spin_lock(&mapping->private_lock);
4064 if (PagePrivate(p)) {
4066 * We could have already allocated an eb for this page
4067 * and attached one so lets see if we can get a ref on
4068 * the existing eb, and if we can we know it's good and
4069 * we can just return that one, else we know we can just
4070 * overwrite page->private.
4072 exists = (struct extent_buffer *)p->private;
4073 if (atomic_inc_not_zero(&exists->refs)) {
4074 spin_unlock(&mapping->private_lock);
4076 mark_extent_buffer_accessed(exists);
4081 * Do this so attach doesn't complain and we need to
4082 * drop the ref the old guy had.
4084 ClearPagePrivate(p);
4085 WARN_ON(PageDirty(p));
4086 page_cache_release(p);
4088 attach_extent_buffer_page(eb, p);
4089 spin_unlock(&mapping->private_lock);
4090 WARN_ON(PageDirty(p));
4091 mark_page_accessed(p);
4093 if (!PageUptodate(p))
4097 * see below about how we avoid a nasty race with release page
4098 * and why we unlock later
4102 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4104 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4108 spin_lock(&tree->buffer_lock);
4109 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4110 if (ret == -EEXIST) {
4111 exists = radix_tree_lookup(&tree->buffer,
4112 start >> PAGE_CACHE_SHIFT);
4113 if (!atomic_inc_not_zero(&exists->refs)) {
4114 spin_unlock(&tree->buffer_lock);
4115 radix_tree_preload_end();
4119 spin_unlock(&tree->buffer_lock);
4120 radix_tree_preload_end();
4121 mark_extent_buffer_accessed(exists);
4124 /* add one reference for the tree */
4125 spin_lock(&eb->refs_lock);
4126 check_buffer_tree_ref(eb);
4127 spin_unlock(&eb->refs_lock);
4128 spin_unlock(&tree->buffer_lock);
4129 radix_tree_preload_end();
4132 * there is a race where release page may have
4133 * tried to find this extent buffer in the radix
4134 * but failed. It will tell the VM it is safe to
4135 * reclaim the, and it will clear the page private bit.
4136 * We must make sure to set the page private bit properly
4137 * after the extent buffer is in the radix tree so
4138 * it doesn't get lost
4140 SetPageChecked(eb->pages[0]);
4141 for (i = 1; i < num_pages; i++) {
4142 p = extent_buffer_page(eb, i);
4143 ClearPageChecked(p);
4146 unlock_page(eb->pages[0]);
4150 for (i = 0; i < num_pages; i++) {
4152 unlock_page(eb->pages[i]);
4155 if (!atomic_dec_and_test(&eb->refs))
4157 btrfs_release_extent_buffer(eb);
4161 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
4162 u64 start, unsigned long len)
4164 struct extent_buffer *eb;
4167 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4168 if (eb && atomic_inc_not_zero(&eb->refs)) {
4170 mark_extent_buffer_accessed(eb);
4178 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4180 struct extent_buffer *eb =
4181 container_of(head, struct extent_buffer, rcu_head);
4183 __free_extent_buffer(eb);
4186 /* Expects to have eb->eb_lock already held */
4187 static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4189 WARN_ON(atomic_read(&eb->refs) == 0);
4190 if (atomic_dec_and_test(&eb->refs)) {
4191 struct extent_io_tree *tree = eb->tree;
4193 spin_unlock(&eb->refs_lock);
4195 spin_lock(&tree->buffer_lock);
4196 radix_tree_delete(&tree->buffer,
4197 eb->start >> PAGE_CACHE_SHIFT);
4198 spin_unlock(&tree->buffer_lock);
4200 /* Should be safe to release our pages at this point */
4201 btrfs_release_extent_buffer_page(eb, 0);
4203 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4206 spin_unlock(&eb->refs_lock);
4209 void free_extent_buffer(struct extent_buffer *eb)
4214 spin_lock(&eb->refs_lock);
4215 if (atomic_read(&eb->refs) == 2 &&
4216 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4217 !extent_buffer_under_io(eb) &&
4218 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4219 atomic_dec(&eb->refs);
4222 * I know this is terrible, but it's temporary until we stop tracking
4223 * the uptodate bits and such for the extent buffers.
4225 release_extent_buffer(eb, GFP_ATOMIC);
4228 void free_extent_buffer_stale(struct extent_buffer *eb)
4233 spin_lock(&eb->refs_lock);
4234 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4236 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4237 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4238 atomic_dec(&eb->refs);
4239 release_extent_buffer(eb, GFP_NOFS);
4242 int clear_extent_buffer_dirty(struct extent_buffer *eb)
4245 unsigned long num_pages;
4248 num_pages = num_extent_pages(eb->start, eb->len);
4249 WARN_ON(atomic_read(&eb->refs) == 0);
4251 for (i = 0; i < num_pages; i++) {
4252 page = extent_buffer_page(eb, i);
4253 if (!PageDirty(page))
4257 WARN_ON(!PagePrivate(page));
4259 clear_page_dirty_for_io(page);
4260 spin_lock_irq(&page->mapping->tree_lock);
4261 if (!PageDirty(page)) {
4262 radix_tree_tag_clear(&page->mapping->page_tree,
4264 PAGECACHE_TAG_DIRTY);
4266 spin_unlock_irq(&page->mapping->tree_lock);
4267 ClearPageError(page);
4270 WARN_ON(atomic_read(&eb->refs) == 0);
4274 int set_extent_buffer_dirty(struct extent_buffer *eb)
4277 unsigned long num_pages;
4280 check_buffer_tree_ref(eb);
4282 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4284 num_pages = num_extent_pages(eb->start, eb->len);
4285 WARN_ON(atomic_read(&eb->refs) == 0);
4286 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4288 for (i = 0; i < num_pages; i++)
4289 set_page_dirty(extent_buffer_page(eb, i));
4293 static int range_straddles_pages(u64 start, u64 len)
4295 if (len < PAGE_CACHE_SIZE)
4297 if (start & (PAGE_CACHE_SIZE - 1))
4299 if ((start + len) & (PAGE_CACHE_SIZE - 1))
4304 int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4308 unsigned long num_pages;
4310 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4311 num_pages = num_extent_pages(eb->start, eb->len);
4312 for (i = 0; i < num_pages; i++) {
4313 page = extent_buffer_page(eb, i);
4315 ClearPageUptodate(page);
4320 int set_extent_buffer_uptodate(struct extent_buffer *eb)
4324 unsigned long num_pages;
4326 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4327 num_pages = num_extent_pages(eb->start, eb->len);
4328 for (i = 0; i < num_pages; i++) {
4329 page = extent_buffer_page(eb, i);
4330 SetPageUptodate(page);
4335 int extent_range_uptodate(struct extent_io_tree *tree,
4340 int pg_uptodate = 1;
4342 unsigned long index;
4344 if (range_straddles_pages(start, end - start + 1)) {
4345 ret = test_range_bit(tree, start, end,
4346 EXTENT_UPTODATE, 1, NULL);
4350 while (start <= end) {
4351 index = start >> PAGE_CACHE_SHIFT;
4352 page = find_get_page(tree->mapping, index);
4355 uptodate = PageUptodate(page);
4356 page_cache_release(page);
4361 start += PAGE_CACHE_SIZE;
4366 int extent_buffer_uptodate(struct extent_buffer *eb)
4368 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4371 int read_extent_buffer_pages(struct extent_io_tree *tree,
4372 struct extent_buffer *eb, u64 start, int wait,
4373 get_extent_t *get_extent, int mirror_num)
4376 unsigned long start_i;
4380 int locked_pages = 0;
4381 int all_uptodate = 1;
4382 unsigned long num_pages;
4383 unsigned long num_reads = 0;
4384 struct bio *bio = NULL;
4385 unsigned long bio_flags = 0;
4387 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4391 WARN_ON(start < eb->start);
4392 start_i = (start >> PAGE_CACHE_SHIFT) -
4393 (eb->start >> PAGE_CACHE_SHIFT);
4398 num_pages = num_extent_pages(eb->start, eb->len);
4399 for (i = start_i; i < num_pages; i++) {
4400 page = extent_buffer_page(eb, i);
4401 if (wait == WAIT_NONE) {
4402 if (!trylock_page(page))
4408 if (!PageUptodate(page)) {
4415 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4419 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4420 eb->failed_mirror = 0;
4421 atomic_set(&eb->io_pages, num_reads);
4422 for (i = start_i; i < num_pages; i++) {
4423 page = extent_buffer_page(eb, i);
4424 if (!PageUptodate(page)) {
4425 ClearPageError(page);
4426 err = __extent_read_full_page(tree, page,
4428 mirror_num, &bio_flags);
4437 submit_one_bio(READ, bio, mirror_num, bio_flags);
4439 if (ret || wait != WAIT_COMPLETE)
4442 for (i = start_i; i < num_pages; i++) {
4443 page = extent_buffer_page(eb, i);
4444 wait_on_page_locked(page);
4445 if (!PageUptodate(page))
4453 while (locked_pages > 0) {
4454 page = extent_buffer_page(eb, i);
4462 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4463 unsigned long start,
4470 char *dst = (char *)dstv;
4471 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4472 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4474 WARN_ON(start > eb->len);
4475 WARN_ON(start + len > eb->start + eb->len);
4477 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4480 page = extent_buffer_page(eb, i);
4482 cur = min(len, (PAGE_CACHE_SIZE - offset));
4483 kaddr = page_address(page);
4484 memcpy(dst, kaddr + offset, cur);
4493 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4494 unsigned long min_len, char **map,
4495 unsigned long *map_start,
4496 unsigned long *map_len)
4498 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4501 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4502 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4503 unsigned long end_i = (start_offset + start + min_len - 1) >>
4510 offset = start_offset;
4514 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4517 if (start + min_len > eb->len) {
4518 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4519 "wanted %lu %lu\n", (unsigned long long)eb->start,
4520 eb->len, start, min_len);
4525 p = extent_buffer_page(eb, i);
4526 kaddr = page_address(p);
4527 *map = kaddr + offset;
4528 *map_len = PAGE_CACHE_SIZE - offset;
4532 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4533 unsigned long start,
4540 char *ptr = (char *)ptrv;
4541 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4542 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4545 WARN_ON(start > eb->len);
4546 WARN_ON(start + len > eb->start + eb->len);
4548 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4551 page = extent_buffer_page(eb, i);
4553 cur = min(len, (PAGE_CACHE_SIZE - offset));
4555 kaddr = page_address(page);
4556 ret = memcmp(ptr, kaddr + offset, cur);
4568 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4569 unsigned long start, unsigned long len)
4575 char *src = (char *)srcv;
4576 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4577 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4579 WARN_ON(start > eb->len);
4580 WARN_ON(start + len > eb->start + eb->len);
4582 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4585 page = extent_buffer_page(eb, i);
4586 WARN_ON(!PageUptodate(page));
4588 cur = min(len, PAGE_CACHE_SIZE - offset);
4589 kaddr = page_address(page);
4590 memcpy(kaddr + offset, src, cur);
4599 void memset_extent_buffer(struct extent_buffer *eb, char c,
4600 unsigned long start, unsigned long len)
4606 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4607 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4609 WARN_ON(start > eb->len);
4610 WARN_ON(start + len > eb->start + eb->len);
4612 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4615 page = extent_buffer_page(eb, i);
4616 WARN_ON(!PageUptodate(page));
4618 cur = min(len, PAGE_CACHE_SIZE - offset);
4619 kaddr = page_address(page);
4620 memset(kaddr + offset, c, cur);
4628 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4629 unsigned long dst_offset, unsigned long src_offset,
4632 u64 dst_len = dst->len;
4637 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4638 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4640 WARN_ON(src->len != dst_len);
4642 offset = (start_offset + dst_offset) &
4643 ((unsigned long)PAGE_CACHE_SIZE - 1);
4646 page = extent_buffer_page(dst, i);
4647 WARN_ON(!PageUptodate(page));
4649 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4651 kaddr = page_address(page);
4652 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4661 static void move_pages(struct page *dst_page, struct page *src_page,
4662 unsigned long dst_off, unsigned long src_off,
4665 char *dst_kaddr = page_address(dst_page);
4666 if (dst_page == src_page) {
4667 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4669 char *src_kaddr = page_address(src_page);
4670 char *p = dst_kaddr + dst_off + len;
4671 char *s = src_kaddr + src_off + len;
4678 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4680 unsigned long distance = (src > dst) ? src - dst : dst - src;
4681 return distance < len;
4684 static void copy_pages(struct page *dst_page, struct page *src_page,
4685 unsigned long dst_off, unsigned long src_off,
4688 char *dst_kaddr = page_address(dst_page);
4690 int must_memmove = 0;
4692 if (dst_page != src_page) {
4693 src_kaddr = page_address(src_page);
4695 src_kaddr = dst_kaddr;
4696 if (areas_overlap(src_off, dst_off, len))
4701 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4703 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4706 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4707 unsigned long src_offset, unsigned long len)
4710 size_t dst_off_in_page;
4711 size_t src_off_in_page;
4712 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4713 unsigned long dst_i;
4714 unsigned long src_i;
4716 if (src_offset + len > dst->len) {
4717 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4718 "len %lu dst len %lu\n", src_offset, len, dst->len);
4721 if (dst_offset + len > dst->len) {
4722 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4723 "len %lu dst len %lu\n", dst_offset, len, dst->len);
4728 dst_off_in_page = (start_offset + dst_offset) &
4729 ((unsigned long)PAGE_CACHE_SIZE - 1);
4730 src_off_in_page = (start_offset + src_offset) &
4731 ((unsigned long)PAGE_CACHE_SIZE - 1);
4733 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4734 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4736 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4738 cur = min_t(unsigned long, cur,
4739 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4741 copy_pages(extent_buffer_page(dst, dst_i),
4742 extent_buffer_page(dst, src_i),
4743 dst_off_in_page, src_off_in_page, cur);
4751 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4752 unsigned long src_offset, unsigned long len)
4755 size_t dst_off_in_page;
4756 size_t src_off_in_page;
4757 unsigned long dst_end = dst_offset + len - 1;
4758 unsigned long src_end = src_offset + len - 1;
4759 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4760 unsigned long dst_i;
4761 unsigned long src_i;
4763 if (src_offset + len > dst->len) {
4764 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4765 "len %lu len %lu\n", src_offset, len, dst->len);
4768 if (dst_offset + len > dst->len) {
4769 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4770 "len %lu len %lu\n", dst_offset, len, dst->len);
4773 if (dst_offset < src_offset) {
4774 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4778 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4779 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4781 dst_off_in_page = (start_offset + dst_end) &
4782 ((unsigned long)PAGE_CACHE_SIZE - 1);
4783 src_off_in_page = (start_offset + src_end) &
4784 ((unsigned long)PAGE_CACHE_SIZE - 1);
4786 cur = min_t(unsigned long, len, src_off_in_page + 1);
4787 cur = min(cur, dst_off_in_page + 1);
4788 move_pages(extent_buffer_page(dst, dst_i),
4789 extent_buffer_page(dst, src_i),
4790 dst_off_in_page - cur + 1,
4791 src_off_in_page - cur + 1, cur);
4799 int try_release_extent_buffer(struct page *page, gfp_t mask)
4801 struct extent_buffer *eb;
4804 * We need to make sure noboody is attaching this page to an eb right
4807 spin_lock(&page->mapping->private_lock);
4808 if (!PagePrivate(page)) {
4809 spin_unlock(&page->mapping->private_lock);
4813 eb = (struct extent_buffer *)page->private;
4817 * This is a little awful but should be ok, we need to make sure that
4818 * the eb doesn't disappear out from under us while we're looking at
4821 spin_lock(&eb->refs_lock);
4822 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4823 spin_unlock(&eb->refs_lock);
4824 spin_unlock(&page->mapping->private_lock);
4827 spin_unlock(&page->mapping->private_lock);
4829 if ((mask & GFP_NOFS) == GFP_NOFS)
4833 * If tree ref isn't set then we know the ref on this eb is a real ref,
4834 * so just return, this page will likely be freed soon anyway.
4836 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4837 spin_unlock(&eb->refs_lock);
4840 release_extent_buffer(eb, mask);