btrfs: return void in functions without error conditions
[linux-2.6-block.git] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/module.h>
8 #include <linux/spinlock.h>
9 #include <linux/blkdev.h>
10 #include <linux/swap.h>
11 #include <linux/writeback.h>
12 #include <linux/pagevec.h>
13 #include <linux/prefetch.h>
14 #include <linux/cleancache.h>
15 #include "extent_io.h"
16 #include "extent_map.h"
17 #include "compat.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20 #include "volumes.h"
21 #include "check-integrity.h"
22
23 static struct kmem_cache *extent_state_cache;
24 static struct kmem_cache *extent_buffer_cache;
25
26 static LIST_HEAD(buffers);
27 static LIST_HEAD(states);
28
29 #define LEAK_DEBUG 0
30 #if LEAK_DEBUG
31 static DEFINE_SPINLOCK(leak_lock);
32 #endif
33
34 #define BUFFER_LRU_MAX 64
35
36 struct tree_entry {
37         u64 start;
38         u64 end;
39         struct rb_node rb_node;
40 };
41
42 struct extent_page_data {
43         struct bio *bio;
44         struct extent_io_tree *tree;
45         get_extent_t *get_extent;
46
47         /* tells writepage not to lock the state bits for this range
48          * it still does the unlocking
49          */
50         unsigned int extent_locked:1;
51
52         /* tells the submit_bio code to use a WRITE_SYNC */
53         unsigned int sync_io:1;
54 };
55
56 static inline struct btrfs_fs_info *
57 tree_fs_info(struct extent_io_tree *tree)
58 {
59         return btrfs_sb(tree->mapping->host->i_sb);
60 }
61
62 int __init extent_io_init(void)
63 {
64         extent_state_cache = kmem_cache_create("extent_state",
65                         sizeof(struct extent_state), 0,
66                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
67         if (!extent_state_cache)
68                 return -ENOMEM;
69
70         extent_buffer_cache = kmem_cache_create("extent_buffers",
71                         sizeof(struct extent_buffer), 0,
72                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
73         if (!extent_buffer_cache)
74                 goto free_state_cache;
75         return 0;
76
77 free_state_cache:
78         kmem_cache_destroy(extent_state_cache);
79         return -ENOMEM;
80 }
81
82 void extent_io_exit(void)
83 {
84         struct extent_state *state;
85         struct extent_buffer *eb;
86
87         while (!list_empty(&states)) {
88                 state = list_entry(states.next, struct extent_state, leak_list);
89                 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
90                        "state %lu in tree %p refs %d\n",
91                        (unsigned long long)state->start,
92                        (unsigned long long)state->end,
93                        state->state, state->tree, atomic_read(&state->refs));
94                 list_del(&state->leak_list);
95                 kmem_cache_free(extent_state_cache, state);
96
97         }
98
99         while (!list_empty(&buffers)) {
100                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
101                 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
102                        "refs %d\n", (unsigned long long)eb->start,
103                        eb->len, atomic_read(&eb->refs));
104                 list_del(&eb->leak_list);
105                 kmem_cache_free(extent_buffer_cache, eb);
106         }
107         if (extent_state_cache)
108                 kmem_cache_destroy(extent_state_cache);
109         if (extent_buffer_cache)
110                 kmem_cache_destroy(extent_buffer_cache);
111 }
112
113 void extent_io_tree_init(struct extent_io_tree *tree,
114                          struct address_space *mapping)
115 {
116         tree->state = RB_ROOT;
117         INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
118         tree->ops = NULL;
119         tree->dirty_bytes = 0;
120         spin_lock_init(&tree->lock);
121         spin_lock_init(&tree->buffer_lock);
122         tree->mapping = mapping;
123 }
124
125 static struct extent_state *alloc_extent_state(gfp_t mask)
126 {
127         struct extent_state *state;
128 #if LEAK_DEBUG
129         unsigned long flags;
130 #endif
131
132         state = kmem_cache_alloc(extent_state_cache, mask);
133         if (!state)
134                 return state;
135         state->state = 0;
136         state->private = 0;
137         state->tree = NULL;
138 #if LEAK_DEBUG
139         spin_lock_irqsave(&leak_lock, flags);
140         list_add(&state->leak_list, &states);
141         spin_unlock_irqrestore(&leak_lock, flags);
142 #endif
143         atomic_set(&state->refs, 1);
144         init_waitqueue_head(&state->wq);
145         trace_alloc_extent_state(state, mask, _RET_IP_);
146         return state;
147 }
148
149 void free_extent_state(struct extent_state *state)
150 {
151         if (!state)
152                 return;
153         if (atomic_dec_and_test(&state->refs)) {
154 #if LEAK_DEBUG
155                 unsigned long flags;
156 #endif
157                 WARN_ON(state->tree);
158 #if LEAK_DEBUG
159                 spin_lock_irqsave(&leak_lock, flags);
160                 list_del(&state->leak_list);
161                 spin_unlock_irqrestore(&leak_lock, flags);
162 #endif
163                 trace_free_extent_state(state, _RET_IP_);
164                 kmem_cache_free(extent_state_cache, state);
165         }
166 }
167
168 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
169                                    struct rb_node *node)
170 {
171         struct rb_node **p = &root->rb_node;
172         struct rb_node *parent = NULL;
173         struct tree_entry *entry;
174
175         while (*p) {
176                 parent = *p;
177                 entry = rb_entry(parent, struct tree_entry, rb_node);
178
179                 if (offset < entry->start)
180                         p = &(*p)->rb_left;
181                 else if (offset > entry->end)
182                         p = &(*p)->rb_right;
183                 else
184                         return parent;
185         }
186
187         entry = rb_entry(node, struct tree_entry, rb_node);
188         rb_link_node(node, parent, p);
189         rb_insert_color(node, root);
190         return NULL;
191 }
192
193 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
194                                      struct rb_node **prev_ret,
195                                      struct rb_node **next_ret)
196 {
197         struct rb_root *root = &tree->state;
198         struct rb_node *n = root->rb_node;
199         struct rb_node *prev = NULL;
200         struct rb_node *orig_prev = NULL;
201         struct tree_entry *entry;
202         struct tree_entry *prev_entry = NULL;
203
204         while (n) {
205                 entry = rb_entry(n, struct tree_entry, rb_node);
206                 prev = n;
207                 prev_entry = entry;
208
209                 if (offset < entry->start)
210                         n = n->rb_left;
211                 else if (offset > entry->end)
212                         n = n->rb_right;
213                 else
214                         return n;
215         }
216
217         if (prev_ret) {
218                 orig_prev = prev;
219                 while (prev && offset > prev_entry->end) {
220                         prev = rb_next(prev);
221                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
222                 }
223                 *prev_ret = prev;
224                 prev = orig_prev;
225         }
226
227         if (next_ret) {
228                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
229                 while (prev && offset < prev_entry->start) {
230                         prev = rb_prev(prev);
231                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
232                 }
233                 *next_ret = prev;
234         }
235         return NULL;
236 }
237
238 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
239                                           u64 offset)
240 {
241         struct rb_node *prev = NULL;
242         struct rb_node *ret;
243
244         ret = __etree_search(tree, offset, &prev, NULL);
245         if (!ret)
246                 return prev;
247         return ret;
248 }
249
250 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
251                      struct extent_state *other)
252 {
253         if (tree->ops && tree->ops->merge_extent_hook)
254                 tree->ops->merge_extent_hook(tree->mapping->host, new,
255                                              other);
256 }
257
258 /*
259  * utility function to look for merge candidates inside a given range.
260  * Any extents with matching state are merged together into a single
261  * extent in the tree.  Extents with EXTENT_IO in their state field
262  * are not merged because the end_io handlers need to be able to do
263  * operations on them without sleeping (or doing allocations/splits).
264  *
265  * This should be called with the tree lock held.
266  */
267 static void merge_state(struct extent_io_tree *tree,
268                         struct extent_state *state)
269 {
270         struct extent_state *other;
271         struct rb_node *other_node;
272
273         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
274                 return;
275
276         other_node = rb_prev(&state->rb_node);
277         if (other_node) {
278                 other = rb_entry(other_node, struct extent_state, rb_node);
279                 if (other->end == state->start - 1 &&
280                     other->state == state->state) {
281                         merge_cb(tree, state, other);
282                         state->start = other->start;
283                         other->tree = NULL;
284                         rb_erase(&other->rb_node, &tree->state);
285                         free_extent_state(other);
286                 }
287         }
288         other_node = rb_next(&state->rb_node);
289         if (other_node) {
290                 other = rb_entry(other_node, struct extent_state, rb_node);
291                 if (other->start == state->end + 1 &&
292                     other->state == state->state) {
293                         merge_cb(tree, state, other);
294                         state->end = other->end;
295                         other->tree = NULL;
296                         rb_erase(&other->rb_node, &tree->state);
297                         free_extent_state(other);
298                 }
299         }
300 }
301
302 static void set_state_cb(struct extent_io_tree *tree,
303                          struct extent_state *state, int *bits)
304 {
305         if (tree->ops && tree->ops->set_bit_hook)
306                 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
307 }
308
309 static void clear_state_cb(struct extent_io_tree *tree,
310                            struct extent_state *state, int *bits)
311 {
312         if (tree->ops && tree->ops->clear_bit_hook)
313                 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
314 }
315
316 static void set_state_bits(struct extent_io_tree *tree,
317                            struct extent_state *state, int *bits);
318
319 /*
320  * insert an extent_state struct into the tree.  'bits' are set on the
321  * struct before it is inserted.
322  *
323  * This may return -EEXIST if the extent is already there, in which case the
324  * state struct is freed.
325  *
326  * The tree lock is not taken internally.  This is a utility function and
327  * probably isn't what you want to call (see set/clear_extent_bit).
328  */
329 static int insert_state(struct extent_io_tree *tree,
330                         struct extent_state *state, u64 start, u64 end,
331                         int *bits)
332 {
333         struct rb_node *node;
334
335         if (end < start) {
336                 printk(KERN_ERR "btrfs end < start %llu %llu\n",
337                        (unsigned long long)end,
338                        (unsigned long long)start);
339                 WARN_ON(1);
340         }
341         state->start = start;
342         state->end = end;
343
344         set_state_bits(tree, state, bits);
345
346         node = tree_insert(&tree->state, end, &state->rb_node);
347         if (node) {
348                 struct extent_state *found;
349                 found = rb_entry(node, struct extent_state, rb_node);
350                 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
351                        "%llu %llu\n", (unsigned long long)found->start,
352                        (unsigned long long)found->end,
353                        (unsigned long long)start, (unsigned long long)end);
354                 return -EEXIST;
355         }
356         state->tree = tree;
357         merge_state(tree, state);
358         return 0;
359 }
360
361 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
362                      u64 split)
363 {
364         if (tree->ops && tree->ops->split_extent_hook)
365                 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
366 }
367
368 /*
369  * split a given extent state struct in two, inserting the preallocated
370  * struct 'prealloc' as the newly created second half.  'split' indicates an
371  * offset inside 'orig' where it should be split.
372  *
373  * Before calling,
374  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
375  * are two extent state structs in the tree:
376  * prealloc: [orig->start, split - 1]
377  * orig: [ split, orig->end ]
378  *
379  * The tree locks are not taken by this function. They need to be held
380  * by the caller.
381  */
382 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
383                        struct extent_state *prealloc, u64 split)
384 {
385         struct rb_node *node;
386
387         split_cb(tree, orig, split);
388
389         prealloc->start = orig->start;
390         prealloc->end = split - 1;
391         prealloc->state = orig->state;
392         orig->start = split;
393
394         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
395         if (node) {
396                 free_extent_state(prealloc);
397                 return -EEXIST;
398         }
399         prealloc->tree = tree;
400         return 0;
401 }
402
403 /*
404  * utility function to clear some bits in an extent state struct.
405  * it will optionally wake up any one waiting on this state (wake == 1), or
406  * forcibly remove the state from the tree (delete == 1).
407  *
408  * If no bits are set on the state struct after clearing things, the
409  * struct is freed and removed from the tree
410  */
411 static int clear_state_bit(struct extent_io_tree *tree,
412                             struct extent_state *state,
413                             int *bits, int wake)
414 {
415         int bits_to_clear = *bits & ~EXTENT_CTLBITS;
416         int ret = state->state & bits_to_clear;
417
418         if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
419                 u64 range = state->end - state->start + 1;
420                 WARN_ON(range > tree->dirty_bytes);
421                 tree->dirty_bytes -= range;
422         }
423         clear_state_cb(tree, state, bits);
424         state->state &= ~bits_to_clear;
425         if (wake)
426                 wake_up(&state->wq);
427         if (state->state == 0) {
428                 if (state->tree) {
429                         rb_erase(&state->rb_node, &tree->state);
430                         state->tree = NULL;
431                         free_extent_state(state);
432                 } else {
433                         WARN_ON(1);
434                 }
435         } else {
436                 merge_state(tree, state);
437         }
438         return ret;
439 }
440
441 static struct extent_state *
442 alloc_extent_state_atomic(struct extent_state *prealloc)
443 {
444         if (!prealloc)
445                 prealloc = alloc_extent_state(GFP_ATOMIC);
446
447         return prealloc;
448 }
449
450 void extent_io_tree_panic(struct extent_io_tree *tree, int err)
451 {
452         btrfs_panic(tree_fs_info(tree), err, "Locking error: "
453                     "Extent tree was modified by another "
454                     "thread while locked.");
455 }
456
457 /*
458  * clear some bits on a range in the tree.  This may require splitting
459  * or inserting elements in the tree, so the gfp mask is used to
460  * indicate which allocations or sleeping are allowed.
461  *
462  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
463  * the given range from the tree regardless of state (ie for truncate).
464  *
465  * the range [start, end] is inclusive.
466  *
467  * This takes the tree lock, and returns 0 on success and < 0 on error.
468  */
469 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
470                      int bits, int wake, int delete,
471                      struct extent_state **cached_state,
472                      gfp_t mask)
473 {
474         struct extent_state *state;
475         struct extent_state *cached;
476         struct extent_state *prealloc = NULL;
477         struct rb_node *next_node;
478         struct rb_node *node;
479         u64 last_end;
480         int err;
481         int clear = 0;
482
483         if (delete)
484                 bits |= ~EXTENT_CTLBITS;
485         bits |= EXTENT_FIRST_DELALLOC;
486
487         if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
488                 clear = 1;
489 again:
490         if (!prealloc && (mask & __GFP_WAIT)) {
491                 prealloc = alloc_extent_state(mask);
492                 if (!prealloc)
493                         return -ENOMEM;
494         }
495
496         spin_lock(&tree->lock);
497         if (cached_state) {
498                 cached = *cached_state;
499
500                 if (clear) {
501                         *cached_state = NULL;
502                         cached_state = NULL;
503                 }
504
505                 if (cached && cached->tree && cached->start <= start &&
506                     cached->end > start) {
507                         if (clear)
508                                 atomic_dec(&cached->refs);
509                         state = cached;
510                         goto hit_next;
511                 }
512                 if (clear)
513                         free_extent_state(cached);
514         }
515         /*
516          * this search will find the extents that end after
517          * our range starts
518          */
519         node = tree_search(tree, start);
520         if (!node)
521                 goto out;
522         state = rb_entry(node, struct extent_state, rb_node);
523 hit_next:
524         if (state->start > end)
525                 goto out;
526         WARN_ON(state->end < start);
527         last_end = state->end;
528
529         if (state->end < end && !need_resched())
530                 next_node = rb_next(&state->rb_node);
531         else
532                 next_node = NULL;
533
534         /* the state doesn't have the wanted bits, go ahead */
535         if (!(state->state & bits))
536                 goto next;
537
538         /*
539          *     | ---- desired range ---- |
540          *  | state | or
541          *  | ------------- state -------------- |
542          *
543          * We need to split the extent we found, and may flip
544          * bits on second half.
545          *
546          * If the extent we found extends past our range, we
547          * just split and search again.  It'll get split again
548          * the next time though.
549          *
550          * If the extent we found is inside our range, we clear
551          * the desired bit on it.
552          */
553
554         if (state->start < start) {
555                 prealloc = alloc_extent_state_atomic(prealloc);
556                 BUG_ON(!prealloc);
557                 err = split_state(tree, state, prealloc, start);
558                 if (err)
559                         extent_io_tree_panic(tree, err);
560
561                 prealloc = NULL;
562                 if (err)
563                         goto out;
564                 if (state->end <= end) {
565                         clear_state_bit(tree, state, &bits, wake);
566                         if (last_end == (u64)-1)
567                                 goto out;
568                         start = last_end + 1;
569                 }
570                 goto search_again;
571         }
572         /*
573          * | ---- desired range ---- |
574          *                        | state |
575          * We need to split the extent, and clear the bit
576          * on the first half
577          */
578         if (state->start <= end && state->end > end) {
579                 prealloc = alloc_extent_state_atomic(prealloc);
580                 BUG_ON(!prealloc);
581                 err = split_state(tree, state, prealloc, end + 1);
582                 if (err)
583                         extent_io_tree_panic(tree, err);
584
585                 if (wake)
586                         wake_up(&state->wq);
587
588                 clear_state_bit(tree, prealloc, &bits, wake);
589
590                 prealloc = NULL;
591                 goto out;
592         }
593
594         clear_state_bit(tree, state, &bits, wake);
595 next:
596         if (last_end == (u64)-1)
597                 goto out;
598         start = last_end + 1;
599         if (start <= end && next_node) {
600                 state = rb_entry(next_node, struct extent_state,
601                                  rb_node);
602                 goto hit_next;
603         }
604         goto search_again;
605
606 out:
607         spin_unlock(&tree->lock);
608         if (prealloc)
609                 free_extent_state(prealloc);
610
611         return 0;
612
613 search_again:
614         if (start > end)
615                 goto out;
616         spin_unlock(&tree->lock);
617         if (mask & __GFP_WAIT)
618                 cond_resched();
619         goto again;
620 }
621
622 static void wait_on_state(struct extent_io_tree *tree,
623                           struct extent_state *state)
624                 __releases(tree->lock)
625                 __acquires(tree->lock)
626 {
627         DEFINE_WAIT(wait);
628         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
629         spin_unlock(&tree->lock);
630         schedule();
631         spin_lock(&tree->lock);
632         finish_wait(&state->wq, &wait);
633 }
634
635 /*
636  * waits for one or more bits to clear on a range in the state tree.
637  * The range [start, end] is inclusive.
638  * The tree lock is taken by this function
639  */
640 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
641 {
642         struct extent_state *state;
643         struct rb_node *node;
644
645         spin_lock(&tree->lock);
646 again:
647         while (1) {
648                 /*
649                  * this search will find all the extents that end after
650                  * our range starts
651                  */
652                 node = tree_search(tree, start);
653                 if (!node)
654                         break;
655
656                 state = rb_entry(node, struct extent_state, rb_node);
657
658                 if (state->start > end)
659                         goto out;
660
661                 if (state->state & bits) {
662                         start = state->start;
663                         atomic_inc(&state->refs);
664                         wait_on_state(tree, state);
665                         free_extent_state(state);
666                         goto again;
667                 }
668                 start = state->end + 1;
669
670                 if (start > end)
671                         break;
672
673                 cond_resched_lock(&tree->lock);
674         }
675 out:
676         spin_unlock(&tree->lock);
677 }
678
679 static void set_state_bits(struct extent_io_tree *tree,
680                            struct extent_state *state,
681                            int *bits)
682 {
683         int bits_to_set = *bits & ~EXTENT_CTLBITS;
684
685         set_state_cb(tree, state, bits);
686         if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
687                 u64 range = state->end - state->start + 1;
688                 tree->dirty_bytes += range;
689         }
690         state->state |= bits_to_set;
691 }
692
693 static void cache_state(struct extent_state *state,
694                         struct extent_state **cached_ptr)
695 {
696         if (cached_ptr && !(*cached_ptr)) {
697                 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
698                         *cached_ptr = state;
699                         atomic_inc(&state->refs);
700                 }
701         }
702 }
703
704 static void uncache_state(struct extent_state **cached_ptr)
705 {
706         if (cached_ptr && (*cached_ptr)) {
707                 struct extent_state *state = *cached_ptr;
708                 *cached_ptr = NULL;
709                 free_extent_state(state);
710         }
711 }
712
713 /*
714  * set some bits on a range in the tree.  This may require allocations or
715  * sleeping, so the gfp mask is used to indicate what is allowed.
716  *
717  * If any of the exclusive bits are set, this will fail with -EEXIST if some
718  * part of the range already has the desired bits set.  The start of the
719  * existing range is returned in failed_start in this case.
720  *
721  * [start, end] is inclusive This takes the tree lock.
722  */
723
724 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
725                    int bits, int exclusive_bits, u64 *failed_start,
726                    struct extent_state **cached_state, gfp_t mask)
727 {
728         struct extent_state *state;
729         struct extent_state *prealloc = NULL;
730         struct rb_node *node;
731         int err = 0;
732         u64 last_start;
733         u64 last_end;
734
735         bits |= EXTENT_FIRST_DELALLOC;
736 again:
737         if (!prealloc && (mask & __GFP_WAIT)) {
738                 prealloc = alloc_extent_state(mask);
739                 BUG_ON(!prealloc);
740         }
741
742         spin_lock(&tree->lock);
743         if (cached_state && *cached_state) {
744                 state = *cached_state;
745                 if (state->start <= start && state->end > start &&
746                     state->tree) {
747                         node = &state->rb_node;
748                         goto hit_next;
749                 }
750         }
751         /*
752          * this search will find all the extents that end after
753          * our range starts.
754          */
755         node = tree_search(tree, start);
756         if (!node) {
757                 prealloc = alloc_extent_state_atomic(prealloc);
758                 BUG_ON(!prealloc);
759                 err = insert_state(tree, prealloc, start, end, &bits);
760                 if (err)
761                         extent_io_tree_panic(tree, err);
762
763                 prealloc = NULL;
764                 goto out;
765         }
766         state = rb_entry(node, struct extent_state, rb_node);
767 hit_next:
768         last_start = state->start;
769         last_end = state->end;
770
771         /*
772          * | ---- desired range ---- |
773          * | state |
774          *
775          * Just lock what we found and keep going
776          */
777         if (state->start == start && state->end <= end) {
778                 struct rb_node *next_node;
779                 if (state->state & exclusive_bits) {
780                         *failed_start = state->start;
781                         err = -EEXIST;
782                         goto out;
783                 }
784
785                 set_state_bits(tree, state, &bits);
786
787                 cache_state(state, cached_state);
788                 merge_state(tree, state);
789                 if (last_end == (u64)-1)
790                         goto out;
791
792                 start = last_end + 1;
793                 next_node = rb_next(&state->rb_node);
794                 if (next_node && start < end && prealloc && !need_resched()) {
795                         state = rb_entry(next_node, struct extent_state,
796                                          rb_node);
797                         if (state->start == start)
798                                 goto hit_next;
799                 }
800                 goto search_again;
801         }
802
803         /*
804          *     | ---- desired range ---- |
805          * | state |
806          *   or
807          * | ------------- state -------------- |
808          *
809          * We need to split the extent we found, and may flip bits on
810          * second half.
811          *
812          * If the extent we found extends past our
813          * range, we just split and search again.  It'll get split
814          * again the next time though.
815          *
816          * If the extent we found is inside our range, we set the
817          * desired bit on it.
818          */
819         if (state->start < start) {
820                 if (state->state & exclusive_bits) {
821                         *failed_start = start;
822                         err = -EEXIST;
823                         goto out;
824                 }
825
826                 prealloc = alloc_extent_state_atomic(prealloc);
827                 BUG_ON(!prealloc);
828                 err = split_state(tree, state, prealloc, start);
829                 if (err)
830                         extent_io_tree_panic(tree, err);
831
832                 prealloc = NULL;
833                 if (err)
834                         goto out;
835                 if (state->end <= end) {
836                         set_state_bits(tree, state, &bits);
837                         cache_state(state, cached_state);
838                         merge_state(tree, state);
839                         if (last_end == (u64)-1)
840                                 goto out;
841                         start = last_end + 1;
842                 }
843                 goto search_again;
844         }
845         /*
846          * | ---- desired range ---- |
847          *     | state | or               | state |
848          *
849          * There's a hole, we need to insert something in it and
850          * ignore the extent we found.
851          */
852         if (state->start > start) {
853                 u64 this_end;
854                 if (end < last_start)
855                         this_end = end;
856                 else
857                         this_end = last_start - 1;
858
859                 prealloc = alloc_extent_state_atomic(prealloc);
860                 BUG_ON(!prealloc);
861
862                 /*
863                  * Avoid to free 'prealloc' if it can be merged with
864                  * the later extent.
865                  */
866                 err = insert_state(tree, prealloc, start, this_end,
867                                    &bits);
868                 if (err)
869                         extent_io_tree_panic(tree, err);
870
871                 cache_state(prealloc, cached_state);
872                 prealloc = NULL;
873                 start = this_end + 1;
874                 goto search_again;
875         }
876         /*
877          * | ---- desired range ---- |
878          *                        | state |
879          * We need to split the extent, and set the bit
880          * on the first half
881          */
882         if (state->start <= end && state->end > end) {
883                 if (state->state & exclusive_bits) {
884                         *failed_start = start;
885                         err = -EEXIST;
886                         goto out;
887                 }
888
889                 prealloc = alloc_extent_state_atomic(prealloc);
890                 BUG_ON(!prealloc);
891                 err = split_state(tree, state, prealloc, end + 1);
892                 if (err)
893                         extent_io_tree_panic(tree, err);
894
895                 set_state_bits(tree, prealloc, &bits);
896                 cache_state(prealloc, cached_state);
897                 merge_state(tree, prealloc);
898                 prealloc = NULL;
899                 goto out;
900         }
901
902         goto search_again;
903
904 out:
905         spin_unlock(&tree->lock);
906         if (prealloc)
907                 free_extent_state(prealloc);
908
909         return err;
910
911 search_again:
912         if (start > end)
913                 goto out;
914         spin_unlock(&tree->lock);
915         if (mask & __GFP_WAIT)
916                 cond_resched();
917         goto again;
918 }
919
920 /**
921  * convert_extent - convert all bits in a given range from one bit to another
922  * @tree:       the io tree to search
923  * @start:      the start offset in bytes
924  * @end:        the end offset in bytes (inclusive)
925  * @bits:       the bits to set in this range
926  * @clear_bits: the bits to clear in this range
927  * @mask:       the allocation mask
928  *
929  * This will go through and set bits for the given range.  If any states exist
930  * already in this range they are set with the given bit and cleared of the
931  * clear_bits.  This is only meant to be used by things that are mergeable, ie
932  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
933  * boundary bits like LOCK.
934  */
935 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
936                        int bits, int clear_bits, gfp_t mask)
937 {
938         struct extent_state *state;
939         struct extent_state *prealloc = NULL;
940         struct rb_node *node;
941         int err = 0;
942         u64 last_start;
943         u64 last_end;
944
945 again:
946         if (!prealloc && (mask & __GFP_WAIT)) {
947                 prealloc = alloc_extent_state(mask);
948                 if (!prealloc)
949                         return -ENOMEM;
950         }
951
952         spin_lock(&tree->lock);
953         /*
954          * this search will find all the extents that end after
955          * our range starts.
956          */
957         node = tree_search(tree, start);
958         if (!node) {
959                 prealloc = alloc_extent_state_atomic(prealloc);
960                 if (!prealloc) {
961                         err = -ENOMEM;
962                         goto out;
963                 }
964                 err = insert_state(tree, prealloc, start, end, &bits);
965                 prealloc = NULL;
966                 if (err)
967                         extent_io_tree_panic(tree, err);
968                 goto out;
969         }
970         state = rb_entry(node, struct extent_state, rb_node);
971 hit_next:
972         last_start = state->start;
973         last_end = state->end;
974
975         /*
976          * | ---- desired range ---- |
977          * | state |
978          *
979          * Just lock what we found and keep going
980          */
981         if (state->start == start && state->end <= end) {
982                 struct rb_node *next_node;
983
984                 set_state_bits(tree, state, &bits);
985                 clear_state_bit(tree, state, &clear_bits, 0);
986                 if (last_end == (u64)-1)
987                         goto out;
988
989                 start = last_end + 1;
990                 next_node = rb_next(&state->rb_node);
991                 if (next_node && start < end && prealloc && !need_resched()) {
992                         state = rb_entry(next_node, struct extent_state,
993                                          rb_node);
994                         if (state->start == start)
995                                 goto hit_next;
996                 }
997                 goto search_again;
998         }
999
1000         /*
1001          *     | ---- desired range ---- |
1002          * | state |
1003          *   or
1004          * | ------------- state -------------- |
1005          *
1006          * We need to split the extent we found, and may flip bits on
1007          * second half.
1008          *
1009          * If the extent we found extends past our
1010          * range, we just split and search again.  It'll get split
1011          * again the next time though.
1012          *
1013          * If the extent we found is inside our range, we set the
1014          * desired bit on it.
1015          */
1016         if (state->start < start) {
1017                 prealloc = alloc_extent_state_atomic(prealloc);
1018                 if (!prealloc) {
1019                         err = -ENOMEM;
1020                         goto out;
1021                 }
1022                 err = split_state(tree, state, prealloc, start);
1023                 if (err)
1024                         extent_io_tree_panic(tree, err);
1025                 prealloc = NULL;
1026                 if (err)
1027                         goto out;
1028                 if (state->end <= end) {
1029                         set_state_bits(tree, state, &bits);
1030                         clear_state_bit(tree, state, &clear_bits, 0);
1031                         if (last_end == (u64)-1)
1032                                 goto out;
1033                         start = last_end + 1;
1034                 }
1035                 goto search_again;
1036         }
1037         /*
1038          * | ---- desired range ---- |
1039          *     | state | or               | state |
1040          *
1041          * There's a hole, we need to insert something in it and
1042          * ignore the extent we found.
1043          */
1044         if (state->start > start) {
1045                 u64 this_end;
1046                 if (end < last_start)
1047                         this_end = end;
1048                 else
1049                         this_end = last_start - 1;
1050
1051                 prealloc = alloc_extent_state_atomic(prealloc);
1052                 if (!prealloc) {
1053                         err = -ENOMEM;
1054                         goto out;
1055                 }
1056
1057                 /*
1058                  * Avoid to free 'prealloc' if it can be merged with
1059                  * the later extent.
1060                  */
1061                 err = insert_state(tree, prealloc, start, this_end,
1062                                    &bits);
1063                 if (err)
1064                         extent_io_tree_panic(tree, err);
1065                 prealloc = NULL;
1066                 start = this_end + 1;
1067                 goto search_again;
1068         }
1069         /*
1070          * | ---- desired range ---- |
1071          *                        | state |
1072          * We need to split the extent, and set the bit
1073          * on the first half
1074          */
1075         if (state->start <= end && state->end > end) {
1076                 prealloc = alloc_extent_state_atomic(prealloc);
1077                 if (!prealloc) {
1078                         err = -ENOMEM;
1079                         goto out;
1080                 }
1081
1082                 err = split_state(tree, state, prealloc, end + 1);
1083                 if (err)
1084                         extent_io_tree_panic(tree, err);
1085
1086                 set_state_bits(tree, prealloc, &bits);
1087                 clear_state_bit(tree, prealloc, &clear_bits, 0);
1088                 prealloc = NULL;
1089                 goto out;
1090         }
1091
1092         goto search_again;
1093
1094 out:
1095         spin_unlock(&tree->lock);
1096         if (prealloc)
1097                 free_extent_state(prealloc);
1098
1099         return err;
1100
1101 search_again:
1102         if (start > end)
1103                 goto out;
1104         spin_unlock(&tree->lock);
1105         if (mask & __GFP_WAIT)
1106                 cond_resched();
1107         goto again;
1108 }
1109
1110 /* wrappers around set/clear extent bit */
1111 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1112                      gfp_t mask)
1113 {
1114         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
1115                               NULL, mask);
1116 }
1117
1118 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1119                     int bits, gfp_t mask)
1120 {
1121         return set_extent_bit(tree, start, end, bits, 0, NULL,
1122                               NULL, mask);
1123 }
1124
1125 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1126                       int bits, gfp_t mask)
1127 {
1128         return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1129 }
1130
1131 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1132                         struct extent_state **cached_state, gfp_t mask)
1133 {
1134         return set_extent_bit(tree, start, end,
1135                               EXTENT_DELALLOC | EXTENT_UPTODATE,
1136                               0, NULL, cached_state, mask);
1137 }
1138
1139 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1140                        gfp_t mask)
1141 {
1142         return clear_extent_bit(tree, start, end,
1143                                 EXTENT_DIRTY | EXTENT_DELALLOC |
1144                                 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1145 }
1146
1147 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1148                      gfp_t mask)
1149 {
1150         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
1151                               NULL, mask);
1152 }
1153
1154 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1155                         struct extent_state **cached_state, gfp_t mask)
1156 {
1157         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1158                               NULL, cached_state, mask);
1159 }
1160
1161 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
1162                                  u64 end, struct extent_state **cached_state,
1163                                  gfp_t mask)
1164 {
1165         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1166                                 cached_state, mask);
1167 }
1168
1169 /*
1170  * either insert or lock state struct between start and end use mask to tell
1171  * us if waiting is desired.
1172  */
1173 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1174                      int bits, struct extent_state **cached_state, gfp_t mask)
1175 {
1176         int err;
1177         u64 failed_start;
1178         while (1) {
1179                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1180                                      EXTENT_LOCKED, &failed_start,
1181                                      cached_state, mask);
1182                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
1183                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1184                         start = failed_start;
1185                 } else {
1186                         break;
1187                 }
1188                 WARN_ON(start > end);
1189         }
1190         return err;
1191 }
1192
1193 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1194 {
1195         return lock_extent_bits(tree, start, end, 0, NULL, mask);
1196 }
1197
1198 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1199                     gfp_t mask)
1200 {
1201         int err;
1202         u64 failed_start;
1203
1204         err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1205                              &failed_start, NULL, mask);
1206         if (err == -EEXIST) {
1207                 if (failed_start > start)
1208                         clear_extent_bit(tree, start, failed_start - 1,
1209                                          EXTENT_LOCKED, 1, 0, NULL, mask);
1210                 return 0;
1211         }
1212         return 1;
1213 }
1214
1215 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1216                          struct extent_state **cached, gfp_t mask)
1217 {
1218         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1219                                 mask);
1220 }
1221
1222 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1223 {
1224         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1225                                 mask);
1226 }
1227
1228 /*
1229  * helper function to set both pages and extents in the tree writeback
1230  */
1231 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1232 {
1233         unsigned long index = start >> PAGE_CACHE_SHIFT;
1234         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1235         struct page *page;
1236
1237         while (index <= end_index) {
1238                 page = find_get_page(tree->mapping, index);
1239                 BUG_ON(!page);
1240                 set_page_writeback(page);
1241                 page_cache_release(page);
1242                 index++;
1243         }
1244         return 0;
1245 }
1246
1247 /* find the first state struct with 'bits' set after 'start', and
1248  * return it.  tree->lock must be held.  NULL will returned if
1249  * nothing was found after 'start'
1250  */
1251 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1252                                                  u64 start, int bits)
1253 {
1254         struct rb_node *node;
1255         struct extent_state *state;
1256
1257         /*
1258          * this search will find all the extents that end after
1259          * our range starts.
1260          */
1261         node = tree_search(tree, start);
1262         if (!node)
1263                 goto out;
1264
1265         while (1) {
1266                 state = rb_entry(node, struct extent_state, rb_node);
1267                 if (state->end >= start && (state->state & bits))
1268                         return state;
1269
1270                 node = rb_next(node);
1271                 if (!node)
1272                         break;
1273         }
1274 out:
1275         return NULL;
1276 }
1277
1278 /*
1279  * find the first offset in the io tree with 'bits' set. zero is
1280  * returned if we find something, and *start_ret and *end_ret are
1281  * set to reflect the state struct that was found.
1282  *
1283  * If nothing was found, 1 is returned, < 0 on error
1284  */
1285 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1286                           u64 *start_ret, u64 *end_ret, int bits)
1287 {
1288         struct extent_state *state;
1289         int ret = 1;
1290
1291         spin_lock(&tree->lock);
1292         state = find_first_extent_bit_state(tree, start, bits);
1293         if (state) {
1294                 *start_ret = state->start;
1295                 *end_ret = state->end;
1296                 ret = 0;
1297         }
1298         spin_unlock(&tree->lock);
1299         return ret;
1300 }
1301
1302 /*
1303  * find a contiguous range of bytes in the file marked as delalloc, not
1304  * more than 'max_bytes'.  start and end are used to return the range,
1305  *
1306  * 1 is returned if we find something, 0 if nothing was in the tree
1307  */
1308 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1309                                         u64 *start, u64 *end, u64 max_bytes,
1310                                         struct extent_state **cached_state)
1311 {
1312         struct rb_node *node;
1313         struct extent_state *state;
1314         u64 cur_start = *start;
1315         u64 found = 0;
1316         u64 total_bytes = 0;
1317
1318         spin_lock(&tree->lock);
1319
1320         /*
1321          * this search will find all the extents that end after
1322          * our range starts.
1323          */
1324         node = tree_search(tree, cur_start);
1325         if (!node) {
1326                 if (!found)
1327                         *end = (u64)-1;
1328                 goto out;
1329         }
1330
1331         while (1) {
1332                 state = rb_entry(node, struct extent_state, rb_node);
1333                 if (found && (state->start != cur_start ||
1334                               (state->state & EXTENT_BOUNDARY))) {
1335                         goto out;
1336                 }
1337                 if (!(state->state & EXTENT_DELALLOC)) {
1338                         if (!found)
1339                                 *end = state->end;
1340                         goto out;
1341                 }
1342                 if (!found) {
1343                         *start = state->start;
1344                         *cached_state = state;
1345                         atomic_inc(&state->refs);
1346                 }
1347                 found++;
1348                 *end = state->end;
1349                 cur_start = state->end + 1;
1350                 node = rb_next(node);
1351                 if (!node)
1352                         break;
1353                 total_bytes += state->end - state->start + 1;
1354                 if (total_bytes >= max_bytes)
1355                         break;
1356         }
1357 out:
1358         spin_unlock(&tree->lock);
1359         return found;
1360 }
1361
1362 static noinline void __unlock_for_delalloc(struct inode *inode,
1363                                            struct page *locked_page,
1364                                            u64 start, u64 end)
1365 {
1366         int ret;
1367         struct page *pages[16];
1368         unsigned long index = start >> PAGE_CACHE_SHIFT;
1369         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1370         unsigned long nr_pages = end_index - index + 1;
1371         int i;
1372
1373         if (index == locked_page->index && end_index == index)
1374                 return;
1375
1376         while (nr_pages > 0) {
1377                 ret = find_get_pages_contig(inode->i_mapping, index,
1378                                      min_t(unsigned long, nr_pages,
1379                                      ARRAY_SIZE(pages)), pages);
1380                 for (i = 0; i < ret; i++) {
1381                         if (pages[i] != locked_page)
1382                                 unlock_page(pages[i]);
1383                         page_cache_release(pages[i]);
1384                 }
1385                 nr_pages -= ret;
1386                 index += ret;
1387                 cond_resched();
1388         }
1389 }
1390
1391 static noinline int lock_delalloc_pages(struct inode *inode,
1392                                         struct page *locked_page,
1393                                         u64 delalloc_start,
1394                                         u64 delalloc_end)
1395 {
1396         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1397         unsigned long start_index = index;
1398         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1399         unsigned long pages_locked = 0;
1400         struct page *pages[16];
1401         unsigned long nrpages;
1402         int ret;
1403         int i;
1404
1405         /* the caller is responsible for locking the start index */
1406         if (index == locked_page->index && index == end_index)
1407                 return 0;
1408
1409         /* skip the page at the start index */
1410         nrpages = end_index - index + 1;
1411         while (nrpages > 0) {
1412                 ret = find_get_pages_contig(inode->i_mapping, index,
1413                                      min_t(unsigned long,
1414                                      nrpages, ARRAY_SIZE(pages)), pages);
1415                 if (ret == 0) {
1416                         ret = -EAGAIN;
1417                         goto done;
1418                 }
1419                 /* now we have an array of pages, lock them all */
1420                 for (i = 0; i < ret; i++) {
1421                         /*
1422                          * the caller is taking responsibility for
1423                          * locked_page
1424                          */
1425                         if (pages[i] != locked_page) {
1426                                 lock_page(pages[i]);
1427                                 if (!PageDirty(pages[i]) ||
1428                                     pages[i]->mapping != inode->i_mapping) {
1429                                         ret = -EAGAIN;
1430                                         unlock_page(pages[i]);
1431                                         page_cache_release(pages[i]);
1432                                         goto done;
1433                                 }
1434                         }
1435                         page_cache_release(pages[i]);
1436                         pages_locked++;
1437                 }
1438                 nrpages -= ret;
1439                 index += ret;
1440                 cond_resched();
1441         }
1442         ret = 0;
1443 done:
1444         if (ret && pages_locked) {
1445                 __unlock_for_delalloc(inode, locked_page,
1446                               delalloc_start,
1447                               ((u64)(start_index + pages_locked - 1)) <<
1448                               PAGE_CACHE_SHIFT);
1449         }
1450         return ret;
1451 }
1452
1453 /*
1454  * find a contiguous range of bytes in the file marked as delalloc, not
1455  * more than 'max_bytes'.  start and end are used to return the range,
1456  *
1457  * 1 is returned if we find something, 0 if nothing was in the tree
1458  */
1459 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1460                                              struct extent_io_tree *tree,
1461                                              struct page *locked_page,
1462                                              u64 *start, u64 *end,
1463                                              u64 max_bytes)
1464 {
1465         u64 delalloc_start;
1466         u64 delalloc_end;
1467         u64 found;
1468         struct extent_state *cached_state = NULL;
1469         int ret;
1470         int loops = 0;
1471
1472 again:
1473         /* step one, find a bunch of delalloc bytes starting at start */
1474         delalloc_start = *start;
1475         delalloc_end = 0;
1476         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1477                                     max_bytes, &cached_state);
1478         if (!found || delalloc_end <= *start) {
1479                 *start = delalloc_start;
1480                 *end = delalloc_end;
1481                 free_extent_state(cached_state);
1482                 return found;
1483         }
1484
1485         /*
1486          * start comes from the offset of locked_page.  We have to lock
1487          * pages in order, so we can't process delalloc bytes before
1488          * locked_page
1489          */
1490         if (delalloc_start < *start)
1491                 delalloc_start = *start;
1492
1493         /*
1494          * make sure to limit the number of pages we try to lock down
1495          * if we're looping.
1496          */
1497         if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1498                 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1499
1500         /* step two, lock all the pages after the page that has start */
1501         ret = lock_delalloc_pages(inode, locked_page,
1502                                   delalloc_start, delalloc_end);
1503         if (ret == -EAGAIN) {
1504                 /* some of the pages are gone, lets avoid looping by
1505                  * shortening the size of the delalloc range we're searching
1506                  */
1507                 free_extent_state(cached_state);
1508                 if (!loops) {
1509                         unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1510                         max_bytes = PAGE_CACHE_SIZE - offset;
1511                         loops = 1;
1512                         goto again;
1513                 } else {
1514                         found = 0;
1515                         goto out_failed;
1516                 }
1517         }
1518         BUG_ON(ret);
1519
1520         /* step three, lock the state bits for the whole range */
1521         lock_extent_bits(tree, delalloc_start, delalloc_end,
1522                          0, &cached_state, GFP_NOFS);
1523
1524         /* then test to make sure it is all still delalloc */
1525         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1526                              EXTENT_DELALLOC, 1, cached_state);
1527         if (!ret) {
1528                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1529                                      &cached_state, GFP_NOFS);
1530                 __unlock_for_delalloc(inode, locked_page,
1531                               delalloc_start, delalloc_end);
1532                 cond_resched();
1533                 goto again;
1534         }
1535         free_extent_state(cached_state);
1536         *start = delalloc_start;
1537         *end = delalloc_end;
1538 out_failed:
1539         return found;
1540 }
1541
1542 int extent_clear_unlock_delalloc(struct inode *inode,
1543                                 struct extent_io_tree *tree,
1544                                 u64 start, u64 end, struct page *locked_page,
1545                                 unsigned long op)
1546 {
1547         int ret;
1548         struct page *pages[16];
1549         unsigned long index = start >> PAGE_CACHE_SHIFT;
1550         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1551         unsigned long nr_pages = end_index - index + 1;
1552         int i;
1553         int clear_bits = 0;
1554
1555         if (op & EXTENT_CLEAR_UNLOCK)
1556                 clear_bits |= EXTENT_LOCKED;
1557         if (op & EXTENT_CLEAR_DIRTY)
1558                 clear_bits |= EXTENT_DIRTY;
1559
1560         if (op & EXTENT_CLEAR_DELALLOC)
1561                 clear_bits |= EXTENT_DELALLOC;
1562
1563         clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1564         if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1565                     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1566                     EXTENT_SET_PRIVATE2)))
1567                 return 0;
1568
1569         while (nr_pages > 0) {
1570                 ret = find_get_pages_contig(inode->i_mapping, index,
1571                                      min_t(unsigned long,
1572                                      nr_pages, ARRAY_SIZE(pages)), pages);
1573                 for (i = 0; i < ret; i++) {
1574
1575                         if (op & EXTENT_SET_PRIVATE2)
1576                                 SetPagePrivate2(pages[i]);
1577
1578                         if (pages[i] == locked_page) {
1579                                 page_cache_release(pages[i]);
1580                                 continue;
1581                         }
1582                         if (op & EXTENT_CLEAR_DIRTY)
1583                                 clear_page_dirty_for_io(pages[i]);
1584                         if (op & EXTENT_SET_WRITEBACK)
1585                                 set_page_writeback(pages[i]);
1586                         if (op & EXTENT_END_WRITEBACK)
1587                                 end_page_writeback(pages[i]);
1588                         if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1589                                 unlock_page(pages[i]);
1590                         page_cache_release(pages[i]);
1591                 }
1592                 nr_pages -= ret;
1593                 index += ret;
1594                 cond_resched();
1595         }
1596         return 0;
1597 }
1598
1599 /*
1600  * count the number of bytes in the tree that have a given bit(s)
1601  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1602  * cached.  The total number found is returned.
1603  */
1604 u64 count_range_bits(struct extent_io_tree *tree,
1605                      u64 *start, u64 search_end, u64 max_bytes,
1606                      unsigned long bits, int contig)
1607 {
1608         struct rb_node *node;
1609         struct extent_state *state;
1610         u64 cur_start = *start;
1611         u64 total_bytes = 0;
1612         u64 last = 0;
1613         int found = 0;
1614
1615         if (search_end <= cur_start) {
1616                 WARN_ON(1);
1617                 return 0;
1618         }
1619
1620         spin_lock(&tree->lock);
1621         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1622                 total_bytes = tree->dirty_bytes;
1623                 goto out;
1624         }
1625         /*
1626          * this search will find all the extents that end after
1627          * our range starts.
1628          */
1629         node = tree_search(tree, cur_start);
1630         if (!node)
1631                 goto out;
1632
1633         while (1) {
1634                 state = rb_entry(node, struct extent_state, rb_node);
1635                 if (state->start > search_end)
1636                         break;
1637                 if (contig && found && state->start > last + 1)
1638                         break;
1639                 if (state->end >= cur_start && (state->state & bits) == bits) {
1640                         total_bytes += min(search_end, state->end) + 1 -
1641                                        max(cur_start, state->start);
1642                         if (total_bytes >= max_bytes)
1643                                 break;
1644                         if (!found) {
1645                                 *start = max(cur_start, state->start);
1646                                 found = 1;
1647                         }
1648                         last = state->end;
1649                 } else if (contig && found) {
1650                         break;
1651                 }
1652                 node = rb_next(node);
1653                 if (!node)
1654                         break;
1655         }
1656 out:
1657         spin_unlock(&tree->lock);
1658         return total_bytes;
1659 }
1660
1661 /*
1662  * set the private field for a given byte offset in the tree.  If there isn't
1663  * an extent_state there already, this does nothing.
1664  */
1665 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1666 {
1667         struct rb_node *node;
1668         struct extent_state *state;
1669         int ret = 0;
1670
1671         spin_lock(&tree->lock);
1672         /*
1673          * this search will find all the extents that end after
1674          * our range starts.
1675          */
1676         node = tree_search(tree, start);
1677         if (!node) {
1678                 ret = -ENOENT;
1679                 goto out;
1680         }
1681         state = rb_entry(node, struct extent_state, rb_node);
1682         if (state->start != start) {
1683                 ret = -ENOENT;
1684                 goto out;
1685         }
1686         state->private = private;
1687 out:
1688         spin_unlock(&tree->lock);
1689         return ret;
1690 }
1691
1692 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1693 {
1694         struct rb_node *node;
1695         struct extent_state *state;
1696         int ret = 0;
1697
1698         spin_lock(&tree->lock);
1699         /*
1700          * this search will find all the extents that end after
1701          * our range starts.
1702          */
1703         node = tree_search(tree, start);
1704         if (!node) {
1705                 ret = -ENOENT;
1706                 goto out;
1707         }
1708         state = rb_entry(node, struct extent_state, rb_node);
1709         if (state->start != start) {
1710                 ret = -ENOENT;
1711                 goto out;
1712         }
1713         *private = state->private;
1714 out:
1715         spin_unlock(&tree->lock);
1716         return ret;
1717 }
1718
1719 /*
1720  * searches a range in the state tree for a given mask.
1721  * If 'filled' == 1, this returns 1 only if every extent in the tree
1722  * has the bits set.  Otherwise, 1 is returned if any bit in the
1723  * range is found set.
1724  */
1725 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1726                    int bits, int filled, struct extent_state *cached)
1727 {
1728         struct extent_state *state = NULL;
1729         struct rb_node *node;
1730         int bitset = 0;
1731
1732         spin_lock(&tree->lock);
1733         if (cached && cached->tree && cached->start <= start &&
1734             cached->end > start)
1735                 node = &cached->rb_node;
1736         else
1737                 node = tree_search(tree, start);
1738         while (node && start <= end) {
1739                 state = rb_entry(node, struct extent_state, rb_node);
1740
1741                 if (filled && state->start > start) {
1742                         bitset = 0;
1743                         break;
1744                 }
1745
1746                 if (state->start > end)
1747                         break;
1748
1749                 if (state->state & bits) {
1750                         bitset = 1;
1751                         if (!filled)
1752                                 break;
1753                 } else if (filled) {
1754                         bitset = 0;
1755                         break;
1756                 }
1757
1758                 if (state->end == (u64)-1)
1759                         break;
1760
1761                 start = state->end + 1;
1762                 if (start > end)
1763                         break;
1764                 node = rb_next(node);
1765                 if (!node) {
1766                         if (filled)
1767                                 bitset = 0;
1768                         break;
1769                 }
1770         }
1771         spin_unlock(&tree->lock);
1772         return bitset;
1773 }
1774
1775 /*
1776  * helper function to set a given page up to date if all the
1777  * extents in the tree for that page are up to date
1778  */
1779 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1780 {
1781         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1782         u64 end = start + PAGE_CACHE_SIZE - 1;
1783         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1784                 SetPageUptodate(page);
1785 }
1786
1787 /*
1788  * helper function to unlock a page if all the extents in the tree
1789  * for that page are unlocked
1790  */
1791 static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1792 {
1793         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1794         u64 end = start + PAGE_CACHE_SIZE - 1;
1795         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1796                 unlock_page(page);
1797 }
1798
1799 /*
1800  * helper function to end page writeback if all the extents
1801  * in the tree for that page are done with writeback
1802  */
1803 static void check_page_writeback(struct extent_io_tree *tree,
1804                                  struct page *page)
1805 {
1806         end_page_writeback(page);
1807 }
1808
1809 /*
1810  * When IO fails, either with EIO or csum verification fails, we
1811  * try other mirrors that might have a good copy of the data.  This
1812  * io_failure_record is used to record state as we go through all the
1813  * mirrors.  If another mirror has good data, the page is set up to date
1814  * and things continue.  If a good mirror can't be found, the original
1815  * bio end_io callback is called to indicate things have failed.
1816  */
1817 struct io_failure_record {
1818         struct page *page;
1819         u64 start;
1820         u64 len;
1821         u64 logical;
1822         unsigned long bio_flags;
1823         int this_mirror;
1824         int failed_mirror;
1825         int in_validation;
1826 };
1827
1828 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1829                                 int did_repair)
1830 {
1831         int ret;
1832         int err = 0;
1833         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1834
1835         set_state_private(failure_tree, rec->start, 0);
1836         ret = clear_extent_bits(failure_tree, rec->start,
1837                                 rec->start + rec->len - 1,
1838                                 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1839         if (ret)
1840                 err = ret;
1841
1842         if (did_repair) {
1843                 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1844                                         rec->start + rec->len - 1,
1845                                         EXTENT_DAMAGED, GFP_NOFS);
1846                 if (ret && !err)
1847                         err = ret;
1848         }
1849
1850         kfree(rec);
1851         return err;
1852 }
1853
1854 static void repair_io_failure_callback(struct bio *bio, int err)
1855 {
1856         complete(bio->bi_private);
1857 }
1858
1859 /*
1860  * this bypasses the standard btrfs submit functions deliberately, as
1861  * the standard behavior is to write all copies in a raid setup. here we only
1862  * want to write the one bad copy. so we do the mapping for ourselves and issue
1863  * submit_bio directly.
1864  * to avoid any synchonization issues, wait for the data after writing, which
1865  * actually prevents the read that triggered the error from finishing.
1866  * currently, there can be no more than two copies of every data bit. thus,
1867  * exactly one rewrite is required.
1868  */
1869 int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1870                         u64 length, u64 logical, struct page *page,
1871                         int mirror_num)
1872 {
1873         struct bio *bio;
1874         struct btrfs_device *dev;
1875         DECLARE_COMPLETION_ONSTACK(compl);
1876         u64 map_length = 0;
1877         u64 sector;
1878         struct btrfs_bio *bbio = NULL;
1879         int ret;
1880
1881         BUG_ON(!mirror_num);
1882
1883         bio = bio_alloc(GFP_NOFS, 1);
1884         if (!bio)
1885                 return -EIO;
1886         bio->bi_private = &compl;
1887         bio->bi_end_io = repair_io_failure_callback;
1888         bio->bi_size = 0;
1889         map_length = length;
1890
1891         ret = btrfs_map_block(map_tree, WRITE, logical,
1892                               &map_length, &bbio, mirror_num);
1893         if (ret) {
1894                 bio_put(bio);
1895                 return -EIO;
1896         }
1897         BUG_ON(mirror_num != bbio->mirror_num);
1898         sector = bbio->stripes[mirror_num-1].physical >> 9;
1899         bio->bi_sector = sector;
1900         dev = bbio->stripes[mirror_num-1].dev;
1901         kfree(bbio);
1902         if (!dev || !dev->bdev || !dev->writeable) {
1903                 bio_put(bio);
1904                 return -EIO;
1905         }
1906         bio->bi_bdev = dev->bdev;
1907         bio_add_page(bio, page, length, start-page_offset(page));
1908         btrfsic_submit_bio(WRITE_SYNC, bio);
1909         wait_for_completion(&compl);
1910
1911         if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1912                 /* try to remap that extent elsewhere? */
1913                 bio_put(bio);
1914                 return -EIO;
1915         }
1916
1917         printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
1918                         "sector %llu)\n", page->mapping->host->i_ino, start,
1919                         dev->name, sector);
1920
1921         bio_put(bio);
1922         return 0;
1923 }
1924
1925 /*
1926  * each time an IO finishes, we do a fast check in the IO failure tree
1927  * to see if we need to process or clean up an io_failure_record
1928  */
1929 static int clean_io_failure(u64 start, struct page *page)
1930 {
1931         u64 private;
1932         u64 private_failure;
1933         struct io_failure_record *failrec;
1934         struct btrfs_mapping_tree *map_tree;
1935         struct extent_state *state;
1936         int num_copies;
1937         int did_repair = 0;
1938         int ret;
1939         struct inode *inode = page->mapping->host;
1940
1941         private = 0;
1942         ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1943                                 (u64)-1, 1, EXTENT_DIRTY, 0);
1944         if (!ret)
1945                 return 0;
1946
1947         ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1948                                 &private_failure);
1949         if (ret)
1950                 return 0;
1951
1952         failrec = (struct io_failure_record *)(unsigned long) private_failure;
1953         BUG_ON(!failrec->this_mirror);
1954
1955         if (failrec->in_validation) {
1956                 /* there was no real error, just free the record */
1957                 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1958                          failrec->start);
1959                 did_repair = 1;
1960                 goto out;
1961         }
1962
1963         spin_lock(&BTRFS_I(inode)->io_tree.lock);
1964         state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1965                                             failrec->start,
1966                                             EXTENT_LOCKED);
1967         spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1968
1969         if (state && state->start == failrec->start) {
1970                 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
1971                 num_copies = btrfs_num_copies(map_tree, failrec->logical,
1972                                                 failrec->len);
1973                 if (num_copies > 1)  {
1974                         ret = repair_io_failure(map_tree, start, failrec->len,
1975                                                 failrec->logical, page,
1976                                                 failrec->failed_mirror);
1977                         did_repair = !ret;
1978                 }
1979         }
1980
1981 out:
1982         if (!ret)
1983                 ret = free_io_failure(inode, failrec, did_repair);
1984
1985         return ret;
1986 }
1987
1988 /*
1989  * this is a generic handler for readpage errors (default
1990  * readpage_io_failed_hook). if other copies exist, read those and write back
1991  * good data to the failed position. does not investigate in remapping the
1992  * failed extent elsewhere, hoping the device will be smart enough to do this as
1993  * needed
1994  */
1995
1996 static int bio_readpage_error(struct bio *failed_bio, struct page *page,
1997                                 u64 start, u64 end, int failed_mirror,
1998                                 struct extent_state *state)
1999 {
2000         struct io_failure_record *failrec = NULL;
2001         u64 private;
2002         struct extent_map *em;
2003         struct inode *inode = page->mapping->host;
2004         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2005         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2006         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2007         struct bio *bio;
2008         int num_copies;
2009         int ret;
2010         int read_mode;
2011         u64 logical;
2012
2013         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2014
2015         ret = get_state_private(failure_tree, start, &private);
2016         if (ret) {
2017                 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2018                 if (!failrec)
2019                         return -ENOMEM;
2020                 failrec->start = start;
2021                 failrec->len = end - start + 1;
2022                 failrec->this_mirror = 0;
2023                 failrec->bio_flags = 0;
2024                 failrec->in_validation = 0;
2025
2026                 read_lock(&em_tree->lock);
2027                 em = lookup_extent_mapping(em_tree, start, failrec->len);
2028                 if (!em) {
2029                         read_unlock(&em_tree->lock);
2030                         kfree(failrec);
2031                         return -EIO;
2032                 }
2033
2034                 if (em->start > start || em->start + em->len < start) {
2035                         free_extent_map(em);
2036                         em = NULL;
2037                 }
2038                 read_unlock(&em_tree->lock);
2039
2040                 if (!em || IS_ERR(em)) {
2041                         kfree(failrec);
2042                         return -EIO;
2043                 }
2044                 logical = start - em->start;
2045                 logical = em->block_start + logical;
2046                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2047                         logical = em->block_start;
2048                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2049                         extent_set_compress_type(&failrec->bio_flags,
2050                                                  em->compress_type);
2051                 }
2052                 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2053                          "len=%llu\n", logical, start, failrec->len);
2054                 failrec->logical = logical;
2055                 free_extent_map(em);
2056
2057                 /* set the bits in the private failure tree */
2058                 ret = set_extent_bits(failure_tree, start, end,
2059                                         EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2060                 if (ret >= 0)
2061                         ret = set_state_private(failure_tree, start,
2062                                                 (u64)(unsigned long)failrec);
2063                 /* set the bits in the inode's tree */
2064                 if (ret >= 0)
2065                         ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2066                                                 GFP_NOFS);
2067                 if (ret < 0) {
2068                         kfree(failrec);
2069                         return ret;
2070                 }
2071         } else {
2072                 failrec = (struct io_failure_record *)(unsigned long)private;
2073                 pr_debug("bio_readpage_error: (found) logical=%llu, "
2074                          "start=%llu, len=%llu, validation=%d\n",
2075                          failrec->logical, failrec->start, failrec->len,
2076                          failrec->in_validation);
2077                 /*
2078                  * when data can be on disk more than twice, add to failrec here
2079                  * (e.g. with a list for failed_mirror) to make
2080                  * clean_io_failure() clean all those errors at once.
2081                  */
2082         }
2083         num_copies = btrfs_num_copies(
2084                               &BTRFS_I(inode)->root->fs_info->mapping_tree,
2085                               failrec->logical, failrec->len);
2086         if (num_copies == 1) {
2087                 /*
2088                  * we only have a single copy of the data, so don't bother with
2089                  * all the retry and error correction code that follows. no
2090                  * matter what the error is, it is very likely to persist.
2091                  */
2092                 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2093                          "state=%p, num_copies=%d, next_mirror %d, "
2094                          "failed_mirror %d\n", state, num_copies,
2095                          failrec->this_mirror, failed_mirror);
2096                 free_io_failure(inode, failrec, 0);
2097                 return -EIO;
2098         }
2099
2100         if (!state) {
2101                 spin_lock(&tree->lock);
2102                 state = find_first_extent_bit_state(tree, failrec->start,
2103                                                     EXTENT_LOCKED);
2104                 if (state && state->start != failrec->start)
2105                         state = NULL;
2106                 spin_unlock(&tree->lock);
2107         }
2108
2109         /*
2110          * there are two premises:
2111          *      a) deliver good data to the caller
2112          *      b) correct the bad sectors on disk
2113          */
2114         if (failed_bio->bi_vcnt > 1) {
2115                 /*
2116                  * to fulfill b), we need to know the exact failing sectors, as
2117                  * we don't want to rewrite any more than the failed ones. thus,
2118                  * we need separate read requests for the failed bio
2119                  *
2120                  * if the following BUG_ON triggers, our validation request got
2121                  * merged. we need separate requests for our algorithm to work.
2122                  */
2123                 BUG_ON(failrec->in_validation);
2124                 failrec->in_validation = 1;
2125                 failrec->this_mirror = failed_mirror;
2126                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2127         } else {
2128                 /*
2129                  * we're ready to fulfill a) and b) alongside. get a good copy
2130                  * of the failed sector and if we succeed, we have setup
2131                  * everything for repair_io_failure to do the rest for us.
2132                  */
2133                 if (failrec->in_validation) {
2134                         BUG_ON(failrec->this_mirror != failed_mirror);
2135                         failrec->in_validation = 0;
2136                         failrec->this_mirror = 0;
2137                 }
2138                 failrec->failed_mirror = failed_mirror;
2139                 failrec->this_mirror++;
2140                 if (failrec->this_mirror == failed_mirror)
2141                         failrec->this_mirror++;
2142                 read_mode = READ_SYNC;
2143         }
2144
2145         if (!state || failrec->this_mirror > num_copies) {
2146                 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2147                          "next_mirror %d, failed_mirror %d\n", state,
2148                          num_copies, failrec->this_mirror, failed_mirror);
2149                 free_io_failure(inode, failrec, 0);
2150                 return -EIO;
2151         }
2152
2153         bio = bio_alloc(GFP_NOFS, 1);
2154         bio->bi_private = state;
2155         bio->bi_end_io = failed_bio->bi_end_io;
2156         bio->bi_sector = failrec->logical >> 9;
2157         bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2158         bio->bi_size = 0;
2159
2160         bio_add_page(bio, page, failrec->len, start - page_offset(page));
2161
2162         pr_debug("bio_readpage_error: submitting new read[%#x] to "
2163                  "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2164                  failrec->this_mirror, num_copies, failrec->in_validation);
2165
2166         ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2167                                          failrec->this_mirror,
2168                                          failrec->bio_flags, 0);
2169         return ret;
2170 }
2171
2172 /* lots and lots of room for performance fixes in the end_bio funcs */
2173
2174 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2175 {
2176         int uptodate = (err == 0);
2177         struct extent_io_tree *tree;
2178         int ret;
2179
2180         tree = &BTRFS_I(page->mapping->host)->io_tree;
2181
2182         if (tree->ops && tree->ops->writepage_end_io_hook) {
2183                 ret = tree->ops->writepage_end_io_hook(page, start,
2184                                                end, NULL, uptodate);
2185                 if (ret)
2186                         uptodate = 0;
2187         }
2188
2189         if (!uptodate && tree->ops &&
2190             tree->ops->writepage_io_failed_hook) {
2191                 ret = tree->ops->writepage_io_failed_hook(NULL, page,
2192                                                  start, end, NULL);
2193                 /* Writeback already completed */
2194                 if (ret == 0)
2195                         return 1;
2196                 BUG_ON(ret < 0);
2197         }
2198
2199         if (!uptodate) {
2200                 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2201                 ClearPageUptodate(page);
2202                 SetPageError(page);
2203         }
2204         return 0;
2205 }
2206
2207 /*
2208  * after a writepage IO is done, we need to:
2209  * clear the uptodate bits on error
2210  * clear the writeback bits in the extent tree for this IO
2211  * end_page_writeback if the page has no more pending IO
2212  *
2213  * Scheduling is not allowed, so the extent state tree is expected
2214  * to have one and only one object corresponding to this IO.
2215  */
2216 static void end_bio_extent_writepage(struct bio *bio, int err)
2217 {
2218         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2219         struct extent_io_tree *tree;
2220         u64 start;
2221         u64 end;
2222         int whole_page;
2223
2224         do {
2225                 struct page *page = bvec->bv_page;
2226                 tree = &BTRFS_I(page->mapping->host)->io_tree;
2227
2228                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2229                          bvec->bv_offset;
2230                 end = start + bvec->bv_len - 1;
2231
2232                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2233                         whole_page = 1;
2234                 else
2235                         whole_page = 0;
2236
2237                 if (--bvec >= bio->bi_io_vec)
2238                         prefetchw(&bvec->bv_page->flags);
2239
2240                 if (end_extent_writepage(page, err, start, end))
2241                         continue;
2242
2243                 if (whole_page)
2244                         end_page_writeback(page);
2245                 else
2246                         check_page_writeback(tree, page);
2247         } while (bvec >= bio->bi_io_vec);
2248
2249         bio_put(bio);
2250 }
2251
2252 /*
2253  * after a readpage IO is done, we need to:
2254  * clear the uptodate bits on error
2255  * set the uptodate bits if things worked
2256  * set the page up to date if all extents in the tree are uptodate
2257  * clear the lock bit in the extent tree
2258  * unlock the page if there are no other extents locked for it
2259  *
2260  * Scheduling is not allowed, so the extent state tree is expected
2261  * to have one and only one object corresponding to this IO.
2262  */
2263 static void end_bio_extent_readpage(struct bio *bio, int err)
2264 {
2265         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2266         struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2267         struct bio_vec *bvec = bio->bi_io_vec;
2268         struct extent_io_tree *tree;
2269         u64 start;
2270         u64 end;
2271         int whole_page;
2272         int ret;
2273
2274         if (err)
2275                 uptodate = 0;
2276
2277         do {
2278                 struct page *page = bvec->bv_page;
2279                 struct extent_state *cached = NULL;
2280                 struct extent_state *state;
2281
2282                 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2283                          "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2284                          (long int)bio->bi_bdev);
2285                 tree = &BTRFS_I(page->mapping->host)->io_tree;
2286
2287                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2288                         bvec->bv_offset;
2289                 end = start + bvec->bv_len - 1;
2290
2291                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2292                         whole_page = 1;
2293                 else
2294                         whole_page = 0;
2295
2296                 if (++bvec <= bvec_end)
2297                         prefetchw(&bvec->bv_page->flags);
2298
2299                 spin_lock(&tree->lock);
2300                 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
2301                 if (state && state->start == start) {
2302                         /*
2303                          * take a reference on the state, unlock will drop
2304                          * the ref
2305                          */
2306                         cache_state(state, &cached);
2307                 }
2308                 spin_unlock(&tree->lock);
2309
2310                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2311                         ret = tree->ops->readpage_end_io_hook(page, start, end,
2312                                                               state);
2313                         if (ret)
2314                                 uptodate = 0;
2315                         else
2316                                 clean_io_failure(start, page);
2317                 }
2318                 if (!uptodate) {
2319                         int failed_mirror;
2320                         failed_mirror = (int)(unsigned long)bio->bi_bdev;
2321                         /*
2322                          * The generic bio_readpage_error handles errors the
2323                          * following way: If possible, new read requests are
2324                          * created and submitted and will end up in
2325                          * end_bio_extent_readpage as well (if we're lucky, not
2326                          * in the !uptodate case). In that case it returns 0 and
2327                          * we just go on with the next page in our bio. If it
2328                          * can't handle the error it will return -EIO and we
2329                          * remain responsible for that page.
2330                          */
2331                         ret = bio_readpage_error(bio, page, start, end,
2332                                                         failed_mirror, NULL);
2333                         if (ret == 0) {
2334 error_handled:
2335                                 uptodate =
2336                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
2337                                 if (err)
2338                                         uptodate = 0;
2339                                 uncache_state(&cached);
2340                                 continue;
2341                         }
2342                         if (tree->ops && tree->ops->readpage_io_failed_hook) {
2343                                 ret = tree->ops->readpage_io_failed_hook(
2344                                                         bio, page, start, end,
2345                                                         failed_mirror, state);
2346                                 if (ret == 0)
2347                                         goto error_handled;
2348                         }
2349                         BUG_ON(ret < 0);
2350                 }
2351
2352                 if (uptodate) {
2353                         set_extent_uptodate(tree, start, end, &cached,
2354                                             GFP_ATOMIC);
2355                 }
2356                 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2357
2358                 if (whole_page) {
2359                         if (uptodate) {
2360                                 SetPageUptodate(page);
2361                         } else {
2362                                 ClearPageUptodate(page);
2363                                 SetPageError(page);
2364                         }
2365                         unlock_page(page);
2366                 } else {
2367                         if (uptodate) {
2368                                 check_page_uptodate(tree, page);
2369                         } else {
2370                                 ClearPageUptodate(page);
2371                                 SetPageError(page);
2372                         }
2373                         check_page_locked(tree, page);
2374                 }
2375         } while (bvec <= bvec_end);
2376
2377         bio_put(bio);
2378 }
2379
2380 struct bio *
2381 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2382                 gfp_t gfp_flags)
2383 {
2384         struct bio *bio;
2385
2386         bio = bio_alloc(gfp_flags, nr_vecs);
2387
2388         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2389                 while (!bio && (nr_vecs /= 2))
2390                         bio = bio_alloc(gfp_flags, nr_vecs);
2391         }
2392
2393         if (bio) {
2394                 bio->bi_size = 0;
2395                 bio->bi_bdev = bdev;
2396                 bio->bi_sector = first_sector;
2397         }
2398         return bio;
2399 }
2400
2401 static int __must_check submit_one_bio(int rw, struct bio *bio,
2402                                        int mirror_num, unsigned long bio_flags)
2403 {
2404         int ret = 0;
2405         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2406         struct page *page = bvec->bv_page;
2407         struct extent_io_tree *tree = bio->bi_private;
2408         u64 start;
2409
2410         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
2411
2412         bio->bi_private = NULL;
2413
2414         bio_get(bio);
2415
2416         if (tree->ops && tree->ops->submit_bio_hook)
2417                 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2418                                            mirror_num, bio_flags, start);
2419         else
2420                 btrfsic_submit_bio(rw, bio);
2421
2422         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2423                 ret = -EOPNOTSUPP;
2424         bio_put(bio);
2425         return ret;
2426 }
2427
2428 static int merge_bio(struct extent_io_tree *tree, struct page *page,
2429                      unsigned long offset, size_t size, struct bio *bio,
2430                      unsigned long bio_flags)
2431 {
2432         int ret = 0;
2433         if (tree->ops && tree->ops->merge_bio_hook)
2434                 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2435                                                 bio_flags);
2436         BUG_ON(ret < 0);
2437         return ret;
2438
2439 }
2440
2441 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2442                               struct page *page, sector_t sector,
2443                               size_t size, unsigned long offset,
2444                               struct block_device *bdev,
2445                               struct bio **bio_ret,
2446                               unsigned long max_pages,
2447                               bio_end_io_t end_io_func,
2448                               int mirror_num,
2449                               unsigned long prev_bio_flags,
2450                               unsigned long bio_flags)
2451 {
2452         int ret = 0;
2453         struct bio *bio;
2454         int nr;
2455         int contig = 0;
2456         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2457         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2458         size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2459
2460         if (bio_ret && *bio_ret) {
2461                 bio = *bio_ret;
2462                 if (old_compressed)
2463                         contig = bio->bi_sector == sector;
2464                 else
2465                         contig = bio->bi_sector + (bio->bi_size >> 9) ==
2466                                 sector;
2467
2468                 if (prev_bio_flags != bio_flags || !contig ||
2469                     merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2470                     bio_add_page(bio, page, page_size, offset) < page_size) {
2471                         ret = submit_one_bio(rw, bio, mirror_num,
2472                                              prev_bio_flags);
2473                         BUG_ON(ret < 0);
2474                         bio = NULL;
2475                 } else {
2476                         return 0;
2477                 }
2478         }
2479         if (this_compressed)
2480                 nr = BIO_MAX_PAGES;
2481         else
2482                 nr = bio_get_nr_vecs(bdev);
2483
2484         bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2485         if (!bio)
2486                 return -ENOMEM;
2487
2488         bio_add_page(bio, page, page_size, offset);
2489         bio->bi_end_io = end_io_func;
2490         bio->bi_private = tree;
2491
2492         if (bio_ret)
2493                 *bio_ret = bio;
2494         else {
2495                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2496                 BUG_ON(ret < 0);
2497         }
2498
2499         return ret;
2500 }
2501
2502 void set_page_extent_mapped(struct page *page)
2503 {
2504         if (!PagePrivate(page)) {
2505                 SetPagePrivate(page);
2506                 page_cache_get(page);
2507                 set_page_private(page, EXTENT_PAGE_PRIVATE);
2508         }
2509 }
2510
2511 static void set_page_extent_head(struct page *page, unsigned long len)
2512 {
2513         WARN_ON(!PagePrivate(page));
2514         set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
2515 }
2516
2517 /*
2518  * basic readpage implementation.  Locked extent state structs are inserted
2519  * into the tree that are removed when the IO is done (by the end_io
2520  * handlers)
2521  */
2522 static int __extent_read_full_page(struct extent_io_tree *tree,
2523                                    struct page *page,
2524                                    get_extent_t *get_extent,
2525                                    struct bio **bio, int mirror_num,
2526                                    unsigned long *bio_flags)
2527 {
2528         struct inode *inode = page->mapping->host;
2529         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2530         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2531         u64 end;
2532         u64 cur = start;
2533         u64 extent_offset;
2534         u64 last_byte = i_size_read(inode);
2535         u64 block_start;
2536         u64 cur_end;
2537         sector_t sector;
2538         struct extent_map *em;
2539         struct block_device *bdev;
2540         struct btrfs_ordered_extent *ordered;
2541         int ret;
2542         int nr = 0;
2543         size_t pg_offset = 0;
2544         size_t iosize;
2545         size_t disk_io_size;
2546         size_t blocksize = inode->i_sb->s_blocksize;
2547         unsigned long this_bio_flag = 0;
2548
2549         set_page_extent_mapped(page);
2550
2551         if (!PageUptodate(page)) {
2552                 if (cleancache_get_page(page) == 0) {
2553                         BUG_ON(blocksize != PAGE_SIZE);
2554                         goto out;
2555                 }
2556         }
2557
2558         end = page_end;
2559         while (1) {
2560                 lock_extent(tree, start, end, GFP_NOFS);
2561                 ordered = btrfs_lookup_ordered_extent(inode, start);
2562                 if (!ordered)
2563                         break;
2564                 unlock_extent(tree, start, end, GFP_NOFS);
2565                 btrfs_start_ordered_extent(inode, ordered, 1);
2566                 btrfs_put_ordered_extent(ordered);
2567         }
2568
2569         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2570                 char *userpage;
2571                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2572
2573                 if (zero_offset) {
2574                         iosize = PAGE_CACHE_SIZE - zero_offset;
2575                         userpage = kmap_atomic(page, KM_USER0);
2576                         memset(userpage + zero_offset, 0, iosize);
2577                         flush_dcache_page(page);
2578                         kunmap_atomic(userpage, KM_USER0);
2579                 }
2580         }
2581         while (cur <= end) {
2582                 if (cur >= last_byte) {
2583                         char *userpage;
2584                         struct extent_state *cached = NULL;
2585
2586                         iosize = PAGE_CACHE_SIZE - pg_offset;
2587                         userpage = kmap_atomic(page, KM_USER0);
2588                         memset(userpage + pg_offset, 0, iosize);
2589                         flush_dcache_page(page);
2590                         kunmap_atomic(userpage, KM_USER0);
2591                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2592                                             &cached, GFP_NOFS);
2593                         unlock_extent_cached(tree, cur, cur + iosize - 1,
2594                                              &cached, GFP_NOFS);
2595                         break;
2596                 }
2597                 em = get_extent(inode, page, pg_offset, cur,
2598                                 end - cur + 1, 0);
2599                 if (IS_ERR_OR_NULL(em)) {
2600                         SetPageError(page);
2601                         unlock_extent(tree, cur, end, GFP_NOFS);
2602                         break;
2603                 }
2604                 extent_offset = cur - em->start;
2605                 BUG_ON(extent_map_end(em) <= cur);
2606                 BUG_ON(end < cur);
2607
2608                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2609                         this_bio_flag = EXTENT_BIO_COMPRESSED;
2610                         extent_set_compress_type(&this_bio_flag,
2611                                                  em->compress_type);
2612                 }
2613
2614                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2615                 cur_end = min(extent_map_end(em) - 1, end);
2616                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2617                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2618                         disk_io_size = em->block_len;
2619                         sector = em->block_start >> 9;
2620                 } else {
2621                         sector = (em->block_start + extent_offset) >> 9;
2622                         disk_io_size = iosize;
2623                 }
2624                 bdev = em->bdev;
2625                 block_start = em->block_start;
2626                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2627                         block_start = EXTENT_MAP_HOLE;
2628                 free_extent_map(em);
2629                 em = NULL;
2630
2631                 /* we've found a hole, just zero and go on */
2632                 if (block_start == EXTENT_MAP_HOLE) {
2633                         char *userpage;
2634                         struct extent_state *cached = NULL;
2635
2636                         userpage = kmap_atomic(page, KM_USER0);
2637                         memset(userpage + pg_offset, 0, iosize);
2638                         flush_dcache_page(page);
2639                         kunmap_atomic(userpage, KM_USER0);
2640
2641                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2642                                             &cached, GFP_NOFS);
2643                         unlock_extent_cached(tree, cur, cur + iosize - 1,
2644                                              &cached, GFP_NOFS);
2645                         cur = cur + iosize;
2646                         pg_offset += iosize;
2647                         continue;
2648                 }
2649                 /* the get_extent function already copied into the page */
2650                 if (test_range_bit(tree, cur, cur_end,
2651                                    EXTENT_UPTODATE, 1, NULL)) {
2652                         check_page_uptodate(tree, page);
2653                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2654                         cur = cur + iosize;
2655                         pg_offset += iosize;
2656                         continue;
2657                 }
2658                 /* we have an inline extent but it didn't get marked up
2659                  * to date.  Error out
2660                  */
2661                 if (block_start == EXTENT_MAP_INLINE) {
2662                         SetPageError(page);
2663                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2664                         cur = cur + iosize;
2665                         pg_offset += iosize;
2666                         continue;
2667                 }
2668
2669                 ret = 0;
2670                 if (tree->ops && tree->ops->readpage_io_hook) {
2671                         ret = tree->ops->readpage_io_hook(page, cur,
2672                                                           cur + iosize - 1);
2673                 }
2674                 if (!ret) {
2675                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2676                         pnr -= page->index;
2677                         ret = submit_extent_page(READ, tree, page,
2678                                          sector, disk_io_size, pg_offset,
2679                                          bdev, bio, pnr,
2680                                          end_bio_extent_readpage, mirror_num,
2681                                          *bio_flags,
2682                                          this_bio_flag);
2683                         nr++;
2684                         *bio_flags = this_bio_flag;
2685                 }
2686                 if (ret)
2687                         SetPageError(page);
2688                 cur = cur + iosize;
2689                 pg_offset += iosize;
2690         }
2691 out:
2692         if (!nr) {
2693                 if (!PageError(page))
2694                         SetPageUptodate(page);
2695                 unlock_page(page);
2696         }
2697         return 0;
2698 }
2699
2700 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2701                             get_extent_t *get_extent, int mirror_num)
2702 {
2703         struct bio *bio = NULL;
2704         unsigned long bio_flags = 0;
2705         int ret;
2706
2707         ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
2708                                       &bio_flags);
2709         if (bio) {
2710                 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
2711                 BUG_ON(ret < 0);
2712         }
2713         return ret;
2714 }
2715
2716 static noinline void update_nr_written(struct page *page,
2717                                       struct writeback_control *wbc,
2718                                       unsigned long nr_written)
2719 {
2720         wbc->nr_to_write -= nr_written;
2721         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2722             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2723                 page->mapping->writeback_index = page->index + nr_written;
2724 }
2725
2726 /*
2727  * the writepage semantics are similar to regular writepage.  extent
2728  * records are inserted to lock ranges in the tree, and as dirty areas
2729  * are found, they are marked writeback.  Then the lock bits are removed
2730  * and the end_io handler clears the writeback ranges
2731  */
2732 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2733                               void *data)
2734 {
2735         struct inode *inode = page->mapping->host;
2736         struct extent_page_data *epd = data;
2737         struct extent_io_tree *tree = epd->tree;
2738         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2739         u64 delalloc_start;
2740         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2741         u64 end;
2742         u64 cur = start;
2743         u64 extent_offset;
2744         u64 last_byte = i_size_read(inode);
2745         u64 block_start;
2746         u64 iosize;
2747         sector_t sector;
2748         struct extent_state *cached_state = NULL;
2749         struct extent_map *em;
2750         struct block_device *bdev;
2751         int ret;
2752         int nr = 0;
2753         size_t pg_offset = 0;
2754         size_t blocksize;
2755         loff_t i_size = i_size_read(inode);
2756         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2757         u64 nr_delalloc;
2758         u64 delalloc_end;
2759         int page_started;
2760         int compressed;
2761         int write_flags;
2762         unsigned long nr_written = 0;
2763         bool fill_delalloc = true;
2764
2765         if (wbc->sync_mode == WB_SYNC_ALL)
2766                 write_flags = WRITE_SYNC;
2767         else
2768                 write_flags = WRITE;
2769
2770         trace___extent_writepage(page, inode, wbc);
2771
2772         WARN_ON(!PageLocked(page));
2773
2774         ClearPageError(page);
2775
2776         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2777         if (page->index > end_index ||
2778            (page->index == end_index && !pg_offset)) {
2779                 page->mapping->a_ops->invalidatepage(page, 0);
2780                 unlock_page(page);
2781                 return 0;
2782         }
2783
2784         if (page->index == end_index) {
2785                 char *userpage;
2786
2787                 userpage = kmap_atomic(page, KM_USER0);
2788                 memset(userpage + pg_offset, 0,
2789                        PAGE_CACHE_SIZE - pg_offset);
2790                 kunmap_atomic(userpage, KM_USER0);
2791                 flush_dcache_page(page);
2792         }
2793         pg_offset = 0;
2794
2795         set_page_extent_mapped(page);
2796
2797         if (!tree->ops || !tree->ops->fill_delalloc)
2798                 fill_delalloc = false;
2799
2800         delalloc_start = start;
2801         delalloc_end = 0;
2802         page_started = 0;
2803         if (!epd->extent_locked && fill_delalloc) {
2804                 u64 delalloc_to_write = 0;
2805                 /*
2806                  * make sure the wbc mapping index is at least updated
2807                  * to this page.
2808                  */
2809                 update_nr_written(page, wbc, 0);
2810
2811                 while (delalloc_end < page_end) {
2812                         nr_delalloc = find_lock_delalloc_range(inode, tree,
2813                                                        page,
2814                                                        &delalloc_start,
2815                                                        &delalloc_end,
2816                                                        128 * 1024 * 1024);
2817                         if (nr_delalloc == 0) {
2818                                 delalloc_start = delalloc_end + 1;
2819                                 continue;
2820                         }
2821                         ret = tree->ops->fill_delalloc(inode, page,
2822                                                        delalloc_start,
2823                                                        delalloc_end,
2824                                                        &page_started,
2825                                                        &nr_written);
2826                         BUG_ON(ret);
2827                         /*
2828                          * delalloc_end is already one less than the total
2829                          * length, so we don't subtract one from
2830                          * PAGE_CACHE_SIZE
2831                          */
2832                         delalloc_to_write += (delalloc_end - delalloc_start +
2833                                               PAGE_CACHE_SIZE) >>
2834                                               PAGE_CACHE_SHIFT;
2835                         delalloc_start = delalloc_end + 1;
2836                 }
2837                 if (wbc->nr_to_write < delalloc_to_write) {
2838                         int thresh = 8192;
2839
2840                         if (delalloc_to_write < thresh * 2)
2841                                 thresh = delalloc_to_write;
2842                         wbc->nr_to_write = min_t(u64, delalloc_to_write,
2843                                                  thresh);
2844                 }
2845
2846                 /* did the fill delalloc function already unlock and start
2847                  * the IO?
2848                  */
2849                 if (page_started) {
2850                         ret = 0;
2851                         /*
2852                          * we've unlocked the page, so we can't update
2853                          * the mapping's writeback index, just update
2854                          * nr_to_write.
2855                          */
2856                         wbc->nr_to_write -= nr_written;
2857                         goto done_unlocked;
2858                 }
2859         }
2860         if (tree->ops && tree->ops->writepage_start_hook) {
2861                 ret = tree->ops->writepage_start_hook(page, start,
2862                                                       page_end);
2863                 if (ret) {
2864                         /* Fixup worker will requeue */
2865                         if (ret == -EBUSY)
2866                                 wbc->pages_skipped++;
2867                         else
2868                                 redirty_page_for_writepage(wbc, page);
2869                         update_nr_written(page, wbc, nr_written);
2870                         unlock_page(page);
2871                         ret = 0;
2872                         goto done_unlocked;
2873                 }
2874         }
2875
2876         /*
2877          * we don't want to touch the inode after unlocking the page,
2878          * so we update the mapping writeback index now
2879          */
2880         update_nr_written(page, wbc, nr_written + 1);
2881
2882         end = page_end;
2883         if (last_byte <= start) {
2884                 if (tree->ops && tree->ops->writepage_end_io_hook)
2885                         tree->ops->writepage_end_io_hook(page, start,
2886                                                          page_end, NULL, 1);
2887                 goto done;
2888         }
2889
2890         blocksize = inode->i_sb->s_blocksize;
2891
2892         while (cur <= end) {
2893                 if (cur >= last_byte) {
2894                         if (tree->ops && tree->ops->writepage_end_io_hook)
2895                                 tree->ops->writepage_end_io_hook(page, cur,
2896                                                          page_end, NULL, 1);
2897                         break;
2898                 }
2899                 em = epd->get_extent(inode, page, pg_offset, cur,
2900                                      end - cur + 1, 1);
2901                 if (IS_ERR_OR_NULL(em)) {
2902                         SetPageError(page);
2903                         break;
2904                 }
2905
2906                 extent_offset = cur - em->start;
2907                 BUG_ON(extent_map_end(em) <= cur);
2908                 BUG_ON(end < cur);
2909                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2910                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2911                 sector = (em->block_start + extent_offset) >> 9;
2912                 bdev = em->bdev;
2913                 block_start = em->block_start;
2914                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2915                 free_extent_map(em);
2916                 em = NULL;
2917
2918                 /*
2919                  * compressed and inline extents are written through other
2920                  * paths in the FS
2921                  */
2922                 if (compressed || block_start == EXTENT_MAP_HOLE ||
2923                     block_start == EXTENT_MAP_INLINE) {
2924                         /*
2925                          * end_io notification does not happen here for
2926                          * compressed extents
2927                          */
2928                         if (!compressed && tree->ops &&
2929                             tree->ops->writepage_end_io_hook)
2930                                 tree->ops->writepage_end_io_hook(page, cur,
2931                                                          cur + iosize - 1,
2932                                                          NULL, 1);
2933                         else if (compressed) {
2934                                 /* we don't want to end_page_writeback on
2935                                  * a compressed extent.  this happens
2936                                  * elsewhere
2937                                  */
2938                                 nr++;
2939                         }
2940
2941                         cur += iosize;
2942                         pg_offset += iosize;
2943                         continue;
2944                 }
2945                 /* leave this out until we have a page_mkwrite call */
2946                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2947                                    EXTENT_DIRTY, 0, NULL)) {
2948                         cur = cur + iosize;
2949                         pg_offset += iosize;
2950                         continue;
2951                 }
2952
2953                 if (tree->ops && tree->ops->writepage_io_hook) {
2954                         ret = tree->ops->writepage_io_hook(page, cur,
2955                                                 cur + iosize - 1);
2956                 } else {
2957                         ret = 0;
2958                 }
2959                 if (ret) {
2960                         SetPageError(page);
2961                 } else {
2962                         unsigned long max_nr = end_index + 1;
2963
2964                         set_range_writeback(tree, cur, cur + iosize - 1);
2965                         if (!PageWriteback(page)) {
2966                                 printk(KERN_ERR "btrfs warning page %lu not "
2967                                        "writeback, cur %llu end %llu\n",
2968                                        page->index, (unsigned long long)cur,
2969                                        (unsigned long long)end);
2970                         }
2971
2972                         ret = submit_extent_page(write_flags, tree, page,
2973                                                  sector, iosize, pg_offset,
2974                                                  bdev, &epd->bio, max_nr,
2975                                                  end_bio_extent_writepage,
2976                                                  0, 0, 0);
2977                         if (ret)
2978                                 SetPageError(page);
2979                 }
2980                 cur = cur + iosize;
2981                 pg_offset += iosize;
2982                 nr++;
2983         }
2984 done:
2985         if (nr == 0) {
2986                 /* make sure the mapping tag for page dirty gets cleared */
2987                 set_page_writeback(page);
2988                 end_page_writeback(page);
2989         }
2990         unlock_page(page);
2991
2992 done_unlocked:
2993
2994         /* drop our reference on any cached states */
2995         free_extent_state(cached_state);
2996         return 0;
2997 }
2998
2999 /**
3000  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3001  * @mapping: address space structure to write
3002  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3003  * @writepage: function called for each page
3004  * @data: data passed to writepage function
3005  *
3006  * If a page is already under I/O, write_cache_pages() skips it, even
3007  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3008  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3009  * and msync() need to guarantee that all the data which was dirty at the time
3010  * the call was made get new I/O started against them.  If wbc->sync_mode is
3011  * WB_SYNC_ALL then we were called for data integrity and we must wait for
3012  * existing IO to complete.
3013  */
3014 static int extent_write_cache_pages(struct extent_io_tree *tree,
3015                              struct address_space *mapping,
3016                              struct writeback_control *wbc,
3017                              writepage_t writepage, void *data,
3018                              void (*flush_fn)(void *))
3019 {
3020         int ret = 0;
3021         int done = 0;
3022         int nr_to_write_done = 0;
3023         struct pagevec pvec;
3024         int nr_pages;
3025         pgoff_t index;
3026         pgoff_t end;            /* Inclusive */
3027         int scanned = 0;
3028         int tag;
3029
3030         pagevec_init(&pvec, 0);
3031         if (wbc->range_cyclic) {
3032                 index = mapping->writeback_index; /* Start from prev offset */
3033                 end = -1;
3034         } else {
3035                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3036                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3037                 scanned = 1;
3038         }
3039         if (wbc->sync_mode == WB_SYNC_ALL)
3040                 tag = PAGECACHE_TAG_TOWRITE;
3041         else
3042                 tag = PAGECACHE_TAG_DIRTY;
3043 retry:
3044         if (wbc->sync_mode == WB_SYNC_ALL)
3045                 tag_pages_for_writeback(mapping, index, end);
3046         while (!done && !nr_to_write_done && (index <= end) &&
3047                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3048                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3049                 unsigned i;
3050
3051                 scanned = 1;
3052                 for (i = 0; i < nr_pages; i++) {
3053                         struct page *page = pvec.pages[i];
3054
3055                         /*
3056                          * At this point we hold neither mapping->tree_lock nor
3057                          * lock on the page itself: the page may be truncated or
3058                          * invalidated (changing page->mapping to NULL), or even
3059                          * swizzled back from swapper_space to tmpfs file
3060                          * mapping
3061                          */
3062                         if (tree->ops &&
3063                             tree->ops->write_cache_pages_lock_hook) {
3064                                 tree->ops->write_cache_pages_lock_hook(page,
3065                                                                data, flush_fn);
3066                         } else {
3067                                 if (!trylock_page(page)) {
3068                                         flush_fn(data);
3069                                         lock_page(page);
3070                                 }
3071                         }
3072
3073                         if (unlikely(page->mapping != mapping)) {
3074                                 unlock_page(page);
3075                                 continue;
3076                         }
3077
3078                         if (!wbc->range_cyclic && page->index > end) {
3079                                 done = 1;
3080                                 unlock_page(page);
3081                                 continue;
3082                         }
3083
3084                         if (wbc->sync_mode != WB_SYNC_NONE) {
3085                                 if (PageWriteback(page))
3086                                         flush_fn(data);
3087                                 wait_on_page_writeback(page);
3088                         }
3089
3090                         if (PageWriteback(page) ||
3091                             !clear_page_dirty_for_io(page)) {
3092                                 unlock_page(page);
3093                                 continue;
3094                         }
3095
3096                         ret = (*writepage)(page, wbc, data);
3097
3098                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3099                                 unlock_page(page);
3100                                 ret = 0;
3101                         }
3102                         if (ret)
3103                                 done = 1;
3104
3105                         /*
3106                          * the filesystem may choose to bump up nr_to_write.
3107                          * We have to make sure to honor the new nr_to_write
3108                          * at any time
3109                          */
3110                         nr_to_write_done = wbc->nr_to_write <= 0;
3111                 }
3112                 pagevec_release(&pvec);
3113                 cond_resched();
3114         }
3115         if (!scanned && !done) {
3116                 /*
3117                  * We hit the last page and there is more work to be done: wrap
3118                  * back to the start of the file
3119                  */
3120                 scanned = 1;
3121                 index = 0;
3122                 goto retry;
3123         }
3124         return ret;
3125 }
3126
3127 static void flush_epd_write_bio(struct extent_page_data *epd)
3128 {
3129         if (epd->bio) {
3130                 int rw = WRITE;
3131                 int ret;
3132
3133                 if (epd->sync_io)
3134                         rw = WRITE_SYNC;
3135
3136                 ret = submit_one_bio(rw, epd->bio, 0, 0);
3137                 BUG_ON(ret < 0);
3138                 epd->bio = NULL;
3139         }
3140 }
3141
3142 static noinline void flush_write_bio(void *data)
3143 {
3144         struct extent_page_data *epd = data;
3145         flush_epd_write_bio(epd);
3146 }
3147
3148 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3149                           get_extent_t *get_extent,
3150                           struct writeback_control *wbc)
3151 {
3152         int ret;
3153         struct extent_page_data epd = {
3154                 .bio = NULL,
3155                 .tree = tree,
3156                 .get_extent = get_extent,
3157                 .extent_locked = 0,
3158                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3159         };
3160
3161         ret = __extent_writepage(page, wbc, &epd);
3162
3163         flush_epd_write_bio(&epd);
3164         return ret;
3165 }
3166
3167 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3168                               u64 start, u64 end, get_extent_t *get_extent,
3169                               int mode)
3170 {
3171         int ret = 0;
3172         struct address_space *mapping = inode->i_mapping;
3173         struct page *page;
3174         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3175                 PAGE_CACHE_SHIFT;
3176
3177         struct extent_page_data epd = {
3178                 .bio = NULL,
3179                 .tree = tree,
3180                 .get_extent = get_extent,
3181                 .extent_locked = 1,
3182                 .sync_io = mode == WB_SYNC_ALL,
3183         };
3184         struct writeback_control wbc_writepages = {
3185                 .sync_mode      = mode,
3186                 .nr_to_write    = nr_pages * 2,
3187                 .range_start    = start,
3188                 .range_end      = end + 1,
3189         };
3190
3191         while (start <= end) {
3192                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3193                 if (clear_page_dirty_for_io(page))
3194                         ret = __extent_writepage(page, &wbc_writepages, &epd);
3195                 else {
3196                         if (tree->ops && tree->ops->writepage_end_io_hook)
3197                                 tree->ops->writepage_end_io_hook(page, start,
3198                                                  start + PAGE_CACHE_SIZE - 1,
3199                                                  NULL, 1);
3200                         unlock_page(page);
3201                 }
3202                 page_cache_release(page);
3203                 start += PAGE_CACHE_SIZE;
3204         }
3205
3206         flush_epd_write_bio(&epd);
3207         return ret;
3208 }
3209
3210 int extent_writepages(struct extent_io_tree *tree,
3211                       struct address_space *mapping,
3212                       get_extent_t *get_extent,
3213                       struct writeback_control *wbc)
3214 {
3215         int ret = 0;
3216         struct extent_page_data epd = {
3217                 .bio = NULL,
3218                 .tree = tree,
3219                 .get_extent = get_extent,
3220                 .extent_locked = 0,
3221                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3222         };
3223
3224         ret = extent_write_cache_pages(tree, mapping, wbc,
3225                                        __extent_writepage, &epd,
3226                                        flush_write_bio);
3227         flush_epd_write_bio(&epd);
3228         return ret;
3229 }
3230
3231 int extent_readpages(struct extent_io_tree *tree,
3232                      struct address_space *mapping,
3233                      struct list_head *pages, unsigned nr_pages,
3234                      get_extent_t get_extent)
3235 {
3236         struct bio *bio = NULL;
3237         unsigned page_idx;
3238         unsigned long bio_flags = 0;
3239
3240         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3241                 struct page *page = list_entry(pages->prev, struct page, lru);
3242
3243                 prefetchw(&page->flags);
3244                 list_del(&page->lru);
3245                 if (!add_to_page_cache_lru(page, mapping,
3246                                         page->index, GFP_NOFS)) {
3247                         __extent_read_full_page(tree, page, get_extent,
3248                                                 &bio, 0, &bio_flags);
3249                 }
3250                 page_cache_release(page);
3251         }
3252         BUG_ON(!list_empty(pages));
3253         if (bio) {
3254                 int ret = submit_one_bio(READ, bio, 0, bio_flags);
3255                 BUG_ON(ret < 0);
3256         }
3257         return 0;
3258 }
3259
3260 /*
3261  * basic invalidatepage code, this waits on any locked or writeback
3262  * ranges corresponding to the page, and then deletes any extent state
3263  * records from the tree
3264  */
3265 int extent_invalidatepage(struct extent_io_tree *tree,
3266                           struct page *page, unsigned long offset)
3267 {
3268         struct extent_state *cached_state = NULL;
3269         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3270         u64 end = start + PAGE_CACHE_SIZE - 1;
3271         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3272
3273         start += (offset + blocksize - 1) & ~(blocksize - 1);
3274         if (start > end)
3275                 return 0;
3276
3277         lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
3278         wait_on_page_writeback(page);
3279         clear_extent_bit(tree, start, end,
3280                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3281                          EXTENT_DO_ACCOUNTING,
3282                          1, 1, &cached_state, GFP_NOFS);
3283         return 0;
3284 }
3285
3286 /*
3287  * a helper for releasepage, this tests for areas of the page that
3288  * are locked or under IO and drops the related state bits if it is safe
3289  * to drop the page.
3290  */
3291 int try_release_extent_state(struct extent_map_tree *map,
3292                              struct extent_io_tree *tree, struct page *page,
3293                              gfp_t mask)
3294 {
3295         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3296         u64 end = start + PAGE_CACHE_SIZE - 1;
3297         int ret = 1;
3298
3299         if (test_range_bit(tree, start, end,
3300                            EXTENT_IOBITS, 0, NULL))
3301                 ret = 0;
3302         else {
3303                 if ((mask & GFP_NOFS) == GFP_NOFS)
3304                         mask = GFP_NOFS;
3305                 /*
3306                  * at this point we can safely clear everything except the
3307                  * locked bit and the nodatasum bit
3308                  */
3309                 ret = clear_extent_bit(tree, start, end,
3310                                  ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3311                                  0, 0, NULL, mask);
3312
3313                 /* if clear_extent_bit failed for enomem reasons,
3314                  * we can't allow the release to continue.
3315                  */
3316                 if (ret < 0)
3317                         ret = 0;
3318                 else
3319                         ret = 1;
3320         }
3321         return ret;
3322 }
3323
3324 /*
3325  * a helper for releasepage.  As long as there are no locked extents
3326  * in the range corresponding to the page, both state records and extent
3327  * map records are removed
3328  */
3329 int try_release_extent_mapping(struct extent_map_tree *map,
3330                                struct extent_io_tree *tree, struct page *page,
3331                                gfp_t mask)
3332 {
3333         struct extent_map *em;
3334         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3335         u64 end = start + PAGE_CACHE_SIZE - 1;
3336
3337         if ((mask & __GFP_WAIT) &&
3338             page->mapping->host->i_size > 16 * 1024 * 1024) {
3339                 u64 len;
3340                 while (start <= end) {
3341                         len = end - start + 1;
3342                         write_lock(&map->lock);
3343                         em = lookup_extent_mapping(map, start, len);
3344                         if (!em) {
3345                                 write_unlock(&map->lock);
3346                                 break;
3347                         }
3348                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3349                             em->start != start) {
3350                                 write_unlock(&map->lock);
3351                                 free_extent_map(em);
3352                                 break;
3353                         }
3354                         if (!test_range_bit(tree, em->start,
3355                                             extent_map_end(em) - 1,
3356                                             EXTENT_LOCKED | EXTENT_WRITEBACK,
3357                                             0, NULL)) {
3358                                 remove_extent_mapping(map, em);
3359                                 /* once for the rb tree */
3360                                 free_extent_map(em);
3361                         }
3362                         start = extent_map_end(em);
3363                         write_unlock(&map->lock);
3364
3365                         /* once for us */
3366                         free_extent_map(em);
3367                 }
3368         }
3369         return try_release_extent_state(map, tree, page, mask);
3370 }
3371
3372 /*
3373  * helper function for fiemap, which doesn't want to see any holes.
3374  * This maps until we find something past 'last'
3375  */
3376 static struct extent_map *get_extent_skip_holes(struct inode *inode,
3377                                                 u64 offset,
3378                                                 u64 last,
3379                                                 get_extent_t *get_extent)
3380 {
3381         u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3382         struct extent_map *em;
3383         u64 len;
3384
3385         if (offset >= last)
3386                 return NULL;
3387
3388         while(1) {
3389                 len = last - offset;
3390                 if (len == 0)
3391                         break;
3392                 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3393                 em = get_extent(inode, NULL, 0, offset, len, 0);
3394                 if (IS_ERR_OR_NULL(em))
3395                         return em;
3396
3397                 /* if this isn't a hole return it */
3398                 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3399                     em->block_start != EXTENT_MAP_HOLE) {
3400                         return em;
3401                 }
3402
3403                 /* this is a hole, advance to the next extent */
3404                 offset = extent_map_end(em);
3405                 free_extent_map(em);
3406                 if (offset >= last)
3407                         break;
3408         }
3409         return NULL;
3410 }
3411
3412 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3413                 __u64 start, __u64 len, get_extent_t *get_extent)
3414 {
3415         int ret = 0;
3416         u64 off = start;
3417         u64 max = start + len;
3418         u32 flags = 0;
3419         u32 found_type;
3420         u64 last;
3421         u64 last_for_get_extent = 0;
3422         u64 disko = 0;
3423         u64 isize = i_size_read(inode);
3424         struct btrfs_key found_key;
3425         struct extent_map *em = NULL;
3426         struct extent_state *cached_state = NULL;
3427         struct btrfs_path *path;
3428         struct btrfs_file_extent_item *item;
3429         int end = 0;
3430         u64 em_start = 0;
3431         u64 em_len = 0;
3432         u64 em_end = 0;
3433         unsigned long emflags;
3434
3435         if (len == 0)
3436                 return -EINVAL;
3437
3438         path = btrfs_alloc_path();
3439         if (!path)
3440                 return -ENOMEM;
3441         path->leave_spinning = 1;
3442
3443         start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3444         len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3445
3446         /*
3447          * lookup the last file extent.  We're not using i_size here
3448          * because there might be preallocation past i_size
3449          */
3450         ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3451                                        path, btrfs_ino(inode), -1, 0);
3452         if (ret < 0) {
3453                 btrfs_free_path(path);
3454                 return ret;
3455         }
3456         WARN_ON(!ret);
3457         path->slots[0]--;
3458         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3459                               struct btrfs_file_extent_item);
3460         btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3461         found_type = btrfs_key_type(&found_key);
3462
3463         /* No extents, but there might be delalloc bits */
3464         if (found_key.objectid != btrfs_ino(inode) ||
3465             found_type != BTRFS_EXTENT_DATA_KEY) {
3466                 /* have to trust i_size as the end */
3467                 last = (u64)-1;
3468                 last_for_get_extent = isize;
3469         } else {
3470                 /*
3471                  * remember the start of the last extent.  There are a
3472                  * bunch of different factors that go into the length of the
3473                  * extent, so its much less complex to remember where it started
3474                  */
3475                 last = found_key.offset;
3476                 last_for_get_extent = last + 1;
3477         }
3478         btrfs_free_path(path);
3479
3480         /*
3481          * we might have some extents allocated but more delalloc past those
3482          * extents.  so, we trust isize unless the start of the last extent is
3483          * beyond isize
3484          */
3485         if (last < isize) {
3486                 last = (u64)-1;
3487                 last_for_get_extent = isize;
3488         }
3489
3490         lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3491                          &cached_state, GFP_NOFS);
3492
3493         em = get_extent_skip_holes(inode, start, last_for_get_extent,
3494                                    get_extent);
3495         if (!em)
3496                 goto out;
3497         if (IS_ERR(em)) {
3498                 ret = PTR_ERR(em);
3499                 goto out;
3500         }
3501
3502         while (!end) {
3503                 u64 offset_in_extent;
3504
3505                 /* break if the extent we found is outside the range */
3506                 if (em->start >= max || extent_map_end(em) < off)
3507                         break;
3508
3509                 /*
3510                  * get_extent may return an extent that starts before our
3511                  * requested range.  We have to make sure the ranges
3512                  * we return to fiemap always move forward and don't
3513                  * overlap, so adjust the offsets here
3514                  */
3515                 em_start = max(em->start, off);
3516
3517                 /*
3518                  * record the offset from the start of the extent
3519                  * for adjusting the disk offset below
3520                  */
3521                 offset_in_extent = em_start - em->start;
3522                 em_end = extent_map_end(em);
3523                 em_len = em_end - em_start;
3524                 emflags = em->flags;
3525                 disko = 0;
3526                 flags = 0;
3527
3528                 /*
3529                  * bump off for our next call to get_extent
3530                  */
3531                 off = extent_map_end(em);
3532                 if (off >= max)
3533                         end = 1;
3534
3535                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3536                         end = 1;
3537                         flags |= FIEMAP_EXTENT_LAST;
3538                 } else if (em->block_start == EXTENT_MAP_INLINE) {
3539                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
3540                                   FIEMAP_EXTENT_NOT_ALIGNED);
3541                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
3542                         flags |= (FIEMAP_EXTENT_DELALLOC |
3543                                   FIEMAP_EXTENT_UNKNOWN);
3544                 } else {
3545                         disko = em->block_start + offset_in_extent;
3546                 }
3547                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3548                         flags |= FIEMAP_EXTENT_ENCODED;
3549
3550                 free_extent_map(em);
3551                 em = NULL;
3552                 if ((em_start >= last) || em_len == (u64)-1 ||
3553                    (last == (u64)-1 && isize <= em_end)) {
3554                         flags |= FIEMAP_EXTENT_LAST;
3555                         end = 1;
3556                 }
3557
3558                 /* now scan forward to see if this is really the last extent. */
3559                 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3560                                            get_extent);
3561                 if (IS_ERR(em)) {
3562                         ret = PTR_ERR(em);
3563                         goto out;
3564                 }
3565                 if (!em) {
3566                         flags |= FIEMAP_EXTENT_LAST;
3567                         end = 1;
3568                 }
3569                 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3570                                               em_len, flags);
3571                 if (ret)
3572                         goto out_free;
3573         }
3574 out_free:
3575         free_extent_map(em);
3576 out:
3577         unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3578                              &cached_state, GFP_NOFS);
3579         return ret;
3580 }
3581
3582 inline struct page *extent_buffer_page(struct extent_buffer *eb,
3583                                               unsigned long i)
3584 {
3585         struct page *p;
3586         struct address_space *mapping;
3587
3588         if (i == 0)
3589                 return eb->first_page;
3590         i += eb->start >> PAGE_CACHE_SHIFT;
3591         mapping = eb->first_page->mapping;
3592         if (!mapping)
3593                 return NULL;
3594
3595         /*
3596          * extent_buffer_page is only called after pinning the page
3597          * by increasing the reference count.  So we know the page must
3598          * be in the radix tree.
3599          */
3600         rcu_read_lock();
3601         p = radix_tree_lookup(&mapping->page_tree, i);
3602         rcu_read_unlock();
3603
3604         return p;
3605 }
3606
3607 inline unsigned long num_extent_pages(u64 start, u64 len)
3608 {
3609         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3610                 (start >> PAGE_CACHE_SHIFT);
3611 }
3612
3613 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3614                                                    u64 start,
3615                                                    unsigned long len,
3616                                                    gfp_t mask)
3617 {
3618         struct extent_buffer *eb = NULL;
3619 #if LEAK_DEBUG
3620         unsigned long flags;
3621 #endif
3622
3623         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3624         if (eb == NULL)
3625                 return NULL;
3626         eb->start = start;
3627         eb->len = len;
3628         rwlock_init(&eb->lock);
3629         atomic_set(&eb->write_locks, 0);
3630         atomic_set(&eb->read_locks, 0);
3631         atomic_set(&eb->blocking_readers, 0);
3632         atomic_set(&eb->blocking_writers, 0);
3633         atomic_set(&eb->spinning_readers, 0);
3634         atomic_set(&eb->spinning_writers, 0);
3635         eb->lock_nested = 0;
3636         init_waitqueue_head(&eb->write_lock_wq);
3637         init_waitqueue_head(&eb->read_lock_wq);
3638
3639 #if LEAK_DEBUG
3640         spin_lock_irqsave(&leak_lock, flags);
3641         list_add(&eb->leak_list, &buffers);
3642         spin_unlock_irqrestore(&leak_lock, flags);
3643 #endif
3644         atomic_set(&eb->refs, 1);
3645
3646         return eb;
3647 }
3648
3649 static void __free_extent_buffer(struct extent_buffer *eb)
3650 {
3651 #if LEAK_DEBUG
3652         unsigned long flags;
3653         spin_lock_irqsave(&leak_lock, flags);
3654         list_del(&eb->leak_list);
3655         spin_unlock_irqrestore(&leak_lock, flags);
3656 #endif
3657         kmem_cache_free(extent_buffer_cache, eb);
3658 }
3659
3660 /*
3661  * Helper for releasing extent buffer page.
3662  */
3663 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3664                                                 unsigned long start_idx)
3665 {
3666         unsigned long index;
3667         struct page *page;
3668
3669         if (!eb->first_page)
3670                 return;
3671
3672         index = num_extent_pages(eb->start, eb->len);
3673         if (start_idx >= index)
3674                 return;
3675
3676         do {
3677                 index--;
3678                 page = extent_buffer_page(eb, index);
3679                 if (page)
3680                         page_cache_release(page);
3681         } while (index != start_idx);
3682 }
3683
3684 /*
3685  * Helper for releasing the extent buffer.
3686  */
3687 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3688 {
3689         btrfs_release_extent_buffer_page(eb, 0);
3690         __free_extent_buffer(eb);
3691 }
3692
3693 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3694                                           u64 start, unsigned long len,
3695                                           struct page *page0)
3696 {
3697         unsigned long num_pages = num_extent_pages(start, len);
3698         unsigned long i;
3699         unsigned long index = start >> PAGE_CACHE_SHIFT;
3700         struct extent_buffer *eb;
3701         struct extent_buffer *exists = NULL;
3702         struct page *p;
3703         struct address_space *mapping = tree->mapping;
3704         int uptodate = 1;
3705         int ret;
3706
3707         rcu_read_lock();
3708         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3709         if (eb && atomic_inc_not_zero(&eb->refs)) {
3710                 rcu_read_unlock();
3711                 mark_page_accessed(eb->first_page);
3712                 return eb;
3713         }
3714         rcu_read_unlock();
3715
3716         eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3717         if (!eb)
3718                 return NULL;
3719
3720         if (page0) {
3721                 eb->first_page = page0;
3722                 i = 1;
3723                 index++;
3724                 page_cache_get(page0);
3725                 mark_page_accessed(page0);
3726                 set_page_extent_mapped(page0);
3727                 set_page_extent_head(page0, len);
3728                 uptodate = PageUptodate(page0);
3729         } else {
3730                 i = 0;
3731         }
3732         for (; i < num_pages; i++, index++) {
3733                 p = find_or_create_page(mapping, index, GFP_NOFS);
3734                 if (!p) {
3735                         WARN_ON(1);
3736                         goto free_eb;
3737                 }
3738                 set_page_extent_mapped(p);
3739                 mark_page_accessed(p);
3740                 if (i == 0) {
3741                         eb->first_page = p;
3742                         set_page_extent_head(p, len);
3743                 } else {
3744                         set_page_private(p, EXTENT_PAGE_PRIVATE);
3745                 }
3746                 if (!PageUptodate(p))
3747                         uptodate = 0;
3748
3749                 /*
3750                  * see below about how we avoid a nasty race with release page
3751                  * and why we unlock later
3752                  */
3753                 if (i != 0)
3754                         unlock_page(p);
3755         }
3756         if (uptodate)
3757                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3758
3759         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3760         if (ret)
3761                 goto free_eb;
3762
3763         spin_lock(&tree->buffer_lock);
3764         ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3765         if (ret == -EEXIST) {
3766                 exists = radix_tree_lookup(&tree->buffer,
3767                                                 start >> PAGE_CACHE_SHIFT);
3768                 /* add one reference for the caller */
3769                 atomic_inc(&exists->refs);
3770                 spin_unlock(&tree->buffer_lock);
3771                 radix_tree_preload_end();
3772                 goto free_eb;
3773         }
3774         /* add one reference for the tree */
3775         atomic_inc(&eb->refs);
3776         spin_unlock(&tree->buffer_lock);
3777         radix_tree_preload_end();
3778
3779         /*
3780          * there is a race where release page may have
3781          * tried to find this extent buffer in the radix
3782          * but failed.  It will tell the VM it is safe to
3783          * reclaim the, and it will clear the page private bit.
3784          * We must make sure to set the page private bit properly
3785          * after the extent buffer is in the radix tree so
3786          * it doesn't get lost
3787          */
3788         set_page_extent_mapped(eb->first_page);
3789         set_page_extent_head(eb->first_page, eb->len);
3790         if (!page0)
3791                 unlock_page(eb->first_page);
3792         return eb;
3793
3794 free_eb:
3795         if (eb->first_page && !page0)
3796                 unlock_page(eb->first_page);
3797
3798         if (!atomic_dec_and_test(&eb->refs))
3799                 return exists;
3800         btrfs_release_extent_buffer(eb);
3801         return exists;
3802 }
3803
3804 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3805                                          u64 start, unsigned long len)
3806 {
3807         struct extent_buffer *eb;
3808
3809         rcu_read_lock();
3810         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3811         if (eb && atomic_inc_not_zero(&eb->refs)) {
3812                 rcu_read_unlock();
3813                 mark_page_accessed(eb->first_page);
3814                 return eb;
3815         }
3816         rcu_read_unlock();
3817
3818         return NULL;
3819 }
3820
3821 void free_extent_buffer(struct extent_buffer *eb)
3822 {
3823         if (!eb)
3824                 return;
3825
3826         if (!atomic_dec_and_test(&eb->refs))
3827                 return;
3828
3829         WARN_ON(1);
3830 }
3831
3832 void clear_extent_buffer_dirty(struct extent_io_tree *tree,
3833                               struct extent_buffer *eb)
3834 {
3835         unsigned long i;
3836         unsigned long num_pages;
3837         struct page *page;
3838
3839         num_pages = num_extent_pages(eb->start, eb->len);
3840
3841         for (i = 0; i < num_pages; i++) {
3842                 page = extent_buffer_page(eb, i);
3843                 if (!PageDirty(page))
3844                         continue;
3845
3846                 lock_page(page);
3847                 WARN_ON(!PagePrivate(page));
3848
3849                 set_page_extent_mapped(page);
3850                 if (i == 0)
3851                         set_page_extent_head(page, eb->len);
3852
3853                 clear_page_dirty_for_io(page);
3854                 spin_lock_irq(&page->mapping->tree_lock);
3855                 if (!PageDirty(page)) {
3856                         radix_tree_tag_clear(&page->mapping->page_tree,
3857                                                 page_index(page),
3858                                                 PAGECACHE_TAG_DIRTY);
3859                 }
3860                 spin_unlock_irq(&page->mapping->tree_lock);
3861                 ClearPageError(page);
3862                 unlock_page(page);
3863         }
3864 }
3865
3866 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3867                              struct extent_buffer *eb)
3868 {
3869         unsigned long i;
3870         unsigned long num_pages;
3871         int was_dirty = 0;
3872
3873         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3874         num_pages = num_extent_pages(eb->start, eb->len);
3875         for (i = 0; i < num_pages; i++)
3876                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3877         return was_dirty;
3878 }
3879
3880 static int __eb_straddles_pages(u64 start, u64 len)
3881 {
3882         if (len < PAGE_CACHE_SIZE)
3883                 return 1;
3884         if (start & (PAGE_CACHE_SIZE - 1))
3885                 return 1;
3886         if ((start + len) & (PAGE_CACHE_SIZE - 1))
3887                 return 1;
3888         return 0;
3889 }
3890
3891 static int eb_straddles_pages(struct extent_buffer *eb)
3892 {
3893         return __eb_straddles_pages(eb->start, eb->len);
3894 }
3895
3896 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3897                                 struct extent_buffer *eb,
3898                                 struct extent_state **cached_state)
3899 {
3900         unsigned long i;
3901         struct page *page;
3902         unsigned long num_pages;
3903
3904         num_pages = num_extent_pages(eb->start, eb->len);
3905         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3906
3907         clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3908                               cached_state, GFP_NOFS);
3909
3910         for (i = 0; i < num_pages; i++) {
3911                 page = extent_buffer_page(eb, i);
3912                 if (page)
3913                         ClearPageUptodate(page);
3914         }
3915         return 0;
3916 }
3917
3918 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3919                                 struct extent_buffer *eb)
3920 {
3921         unsigned long i;
3922         struct page *page;
3923         unsigned long num_pages;
3924
3925         num_pages = num_extent_pages(eb->start, eb->len);
3926
3927         if (eb_straddles_pages(eb)) {
3928                 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3929                                     NULL, GFP_NOFS);
3930         }
3931         for (i = 0; i < num_pages; i++) {
3932                 page = extent_buffer_page(eb, i);
3933                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3934                     ((i == num_pages - 1) &&
3935                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3936                         check_page_uptodate(tree, page);
3937                         continue;
3938                 }
3939                 SetPageUptodate(page);
3940         }
3941         return 0;
3942 }
3943
3944 int extent_range_uptodate(struct extent_io_tree *tree,
3945                           u64 start, u64 end)
3946 {
3947         struct page *page;
3948         int ret;
3949         int pg_uptodate = 1;
3950         int uptodate;
3951         unsigned long index;
3952
3953         if (__eb_straddles_pages(start, end - start + 1)) {
3954                 ret = test_range_bit(tree, start, end,
3955                                      EXTENT_UPTODATE, 1, NULL);
3956                 if (ret)
3957                         return 1;
3958         }
3959         while (start <= end) {
3960                 index = start >> PAGE_CACHE_SHIFT;
3961                 page = find_get_page(tree->mapping, index);
3962                 if (!page)
3963                         return 1;
3964                 uptodate = PageUptodate(page);
3965                 page_cache_release(page);
3966                 if (!uptodate) {
3967                         pg_uptodate = 0;
3968                         break;
3969                 }
3970                 start += PAGE_CACHE_SIZE;
3971         }
3972         return pg_uptodate;
3973 }
3974
3975 int extent_buffer_uptodate(struct extent_io_tree *tree,
3976                            struct extent_buffer *eb,
3977                            struct extent_state *cached_state)
3978 {
3979         int ret = 0;
3980         unsigned long num_pages;
3981         unsigned long i;
3982         struct page *page;
3983         int pg_uptodate = 1;
3984
3985         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3986                 return 1;
3987
3988         if (eb_straddles_pages(eb)) {
3989                 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3990                                    EXTENT_UPTODATE, 1, cached_state);
3991                 if (ret)
3992                         return ret;
3993         }
3994
3995         num_pages = num_extent_pages(eb->start, eb->len);
3996         for (i = 0; i < num_pages; i++) {
3997                 page = extent_buffer_page(eb, i);
3998                 if (!PageUptodate(page)) {
3999                         pg_uptodate = 0;
4000                         break;
4001                 }
4002         }
4003         return pg_uptodate;
4004 }
4005
4006 int read_extent_buffer_pages(struct extent_io_tree *tree,
4007                              struct extent_buffer *eb, u64 start, int wait,
4008                              get_extent_t *get_extent, int mirror_num)
4009 {
4010         unsigned long i;
4011         unsigned long start_i;
4012         struct page *page;
4013         int err;
4014         int ret = 0;
4015         int locked_pages = 0;
4016         int all_uptodate = 1;
4017         int inc_all_pages = 0;
4018         unsigned long num_pages;
4019         struct bio *bio = NULL;
4020         unsigned long bio_flags = 0;
4021
4022         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4023                 return 0;
4024
4025         if (eb_straddles_pages(eb)) {
4026                 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
4027                                    EXTENT_UPTODATE, 1, NULL)) {
4028                         return 0;
4029                 }
4030         }
4031
4032         if (start) {
4033                 WARN_ON(start < eb->start);
4034                 start_i = (start >> PAGE_CACHE_SHIFT) -
4035                         (eb->start >> PAGE_CACHE_SHIFT);
4036         } else {
4037                 start_i = 0;
4038         }
4039
4040         num_pages = num_extent_pages(eb->start, eb->len);
4041         for (i = start_i; i < num_pages; i++) {
4042                 page = extent_buffer_page(eb, i);
4043                 if (wait == WAIT_NONE) {
4044                         if (!trylock_page(page))
4045                                 goto unlock_exit;
4046                 } else {
4047                         lock_page(page);
4048                 }
4049                 locked_pages++;
4050                 if (!PageUptodate(page))
4051                         all_uptodate = 0;
4052         }
4053         if (all_uptodate) {
4054                 if (start_i == 0)
4055                         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4056                 goto unlock_exit;
4057         }
4058
4059         for (i = start_i; i < num_pages; i++) {
4060                 page = extent_buffer_page(eb, i);
4061
4062                 WARN_ON(!PagePrivate(page));
4063
4064                 set_page_extent_mapped(page);
4065                 if (i == 0)
4066                         set_page_extent_head(page, eb->len);
4067
4068                 if (inc_all_pages)
4069                         page_cache_get(page);
4070                 if (!PageUptodate(page)) {
4071                         if (start_i == 0)
4072                                 inc_all_pages = 1;
4073                         ClearPageError(page);
4074                         err = __extent_read_full_page(tree, page,
4075                                                       get_extent, &bio,
4076                                                       mirror_num, &bio_flags);
4077                         if (err)
4078                                 ret = err;
4079                 } else {
4080                         unlock_page(page);
4081                 }
4082         }
4083
4084         if (bio) {
4085                 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4086                 BUG_ON(err < 0);
4087         }
4088
4089         if (ret || wait != WAIT_COMPLETE)
4090                 return ret;
4091
4092         for (i = start_i; i < num_pages; i++) {
4093                 page = extent_buffer_page(eb, i);
4094                 wait_on_page_locked(page);
4095                 if (!PageUptodate(page))
4096                         ret = -EIO;
4097         }
4098
4099         if (!ret)
4100                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4101         return ret;
4102
4103 unlock_exit:
4104         i = start_i;
4105         while (locked_pages > 0) {
4106                 page = extent_buffer_page(eb, i);
4107                 i++;
4108                 unlock_page(page);
4109                 locked_pages--;
4110         }
4111         return ret;
4112 }
4113
4114 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4115                         unsigned long start,
4116                         unsigned long len)
4117 {
4118         size_t cur;
4119         size_t offset;
4120         struct page *page;
4121         char *kaddr;
4122         char *dst = (char *)dstv;
4123         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4124         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4125
4126         WARN_ON(start > eb->len);
4127         WARN_ON(start + len > eb->start + eb->len);
4128
4129         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4130
4131         while (len > 0) {
4132                 page = extent_buffer_page(eb, i);
4133
4134                 cur = min(len, (PAGE_CACHE_SIZE - offset));
4135                 kaddr = page_address(page);
4136                 memcpy(dst, kaddr + offset, cur);
4137
4138                 dst += cur;
4139                 len -= cur;
4140                 offset = 0;
4141                 i++;
4142         }
4143 }
4144
4145 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4146                                unsigned long min_len, char **map,
4147                                unsigned long *map_start,
4148                                unsigned long *map_len)
4149 {
4150         size_t offset = start & (PAGE_CACHE_SIZE - 1);
4151         char *kaddr;
4152         struct page *p;
4153         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4154         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4155         unsigned long end_i = (start_offset + start + min_len - 1) >>
4156                 PAGE_CACHE_SHIFT;
4157
4158         if (i != end_i)
4159                 return -EINVAL;
4160
4161         if (i == 0) {
4162                 offset = start_offset;
4163                 *map_start = 0;
4164         } else {
4165                 offset = 0;
4166                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4167         }
4168
4169         if (start + min_len > eb->len) {
4170                 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4171                        "wanted %lu %lu\n", (unsigned long long)eb->start,
4172                        eb->len, start, min_len);
4173                 WARN_ON(1);
4174                 return -EINVAL;
4175         }
4176
4177         p = extent_buffer_page(eb, i);
4178         kaddr = page_address(p);
4179         *map = kaddr + offset;
4180         *map_len = PAGE_CACHE_SIZE - offset;
4181         return 0;
4182 }
4183
4184 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4185                           unsigned long start,
4186                           unsigned long len)
4187 {
4188         size_t cur;
4189         size_t offset;
4190         struct page *page;
4191         char *kaddr;
4192         char *ptr = (char *)ptrv;
4193         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4194         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4195         int ret = 0;
4196
4197         WARN_ON(start > eb->len);
4198         WARN_ON(start + len > eb->start + eb->len);
4199
4200         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4201
4202         while (len > 0) {
4203                 page = extent_buffer_page(eb, i);
4204
4205                 cur = min(len, (PAGE_CACHE_SIZE - offset));
4206
4207                 kaddr = page_address(page);
4208                 ret = memcmp(ptr, kaddr + offset, cur);
4209                 if (ret)
4210                         break;
4211
4212                 ptr += cur;
4213                 len -= cur;
4214                 offset = 0;
4215                 i++;
4216         }
4217         return ret;
4218 }
4219
4220 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4221                          unsigned long start, unsigned long len)
4222 {
4223         size_t cur;
4224         size_t offset;
4225         struct page *page;
4226         char *kaddr;
4227         char *src = (char *)srcv;
4228         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4229         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4230
4231         WARN_ON(start > eb->len);
4232         WARN_ON(start + len > eb->start + eb->len);
4233
4234         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4235
4236         while (len > 0) {
4237                 page = extent_buffer_page(eb, i);
4238                 WARN_ON(!PageUptodate(page));
4239
4240                 cur = min(len, PAGE_CACHE_SIZE - offset);
4241                 kaddr = page_address(page);
4242                 memcpy(kaddr + offset, src, cur);
4243
4244                 src += cur;
4245                 len -= cur;
4246                 offset = 0;
4247                 i++;
4248         }
4249 }
4250
4251 void memset_extent_buffer(struct extent_buffer *eb, char c,
4252                           unsigned long start, unsigned long len)
4253 {
4254         size_t cur;
4255         size_t offset;
4256         struct page *page;
4257         char *kaddr;
4258         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4259         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4260
4261         WARN_ON(start > eb->len);
4262         WARN_ON(start + len > eb->start + eb->len);
4263
4264         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4265
4266         while (len > 0) {
4267                 page = extent_buffer_page(eb, i);
4268                 WARN_ON(!PageUptodate(page));
4269
4270                 cur = min(len, PAGE_CACHE_SIZE - offset);
4271                 kaddr = page_address(page);
4272                 memset(kaddr + offset, c, cur);
4273
4274                 len -= cur;
4275                 offset = 0;
4276                 i++;
4277         }
4278 }
4279
4280 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4281                         unsigned long dst_offset, unsigned long src_offset,
4282                         unsigned long len)
4283 {
4284         u64 dst_len = dst->len;
4285         size_t cur;
4286         size_t offset;
4287         struct page *page;
4288         char *kaddr;
4289         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4290         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4291
4292         WARN_ON(src->len != dst_len);
4293
4294         offset = (start_offset + dst_offset) &
4295                 ((unsigned long)PAGE_CACHE_SIZE - 1);
4296
4297         while (len > 0) {
4298                 page = extent_buffer_page(dst, i);
4299                 WARN_ON(!PageUptodate(page));
4300
4301                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4302
4303                 kaddr = page_address(page);
4304                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4305
4306                 src_offset += cur;
4307                 len -= cur;
4308                 offset = 0;
4309                 i++;
4310         }
4311 }
4312
4313 static void move_pages(struct page *dst_page, struct page *src_page,
4314                        unsigned long dst_off, unsigned long src_off,
4315                        unsigned long len)
4316 {
4317         char *dst_kaddr = page_address(dst_page);
4318         if (dst_page == src_page) {
4319                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4320         } else {
4321                 char *src_kaddr = page_address(src_page);
4322                 char *p = dst_kaddr + dst_off + len;
4323                 char *s = src_kaddr + src_off + len;
4324
4325                 while (len--)
4326                         *--p = *--s;
4327         }
4328 }
4329
4330 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4331 {
4332         unsigned long distance = (src > dst) ? src - dst : dst - src;
4333         return distance < len;
4334 }
4335
4336 static void copy_pages(struct page *dst_page, struct page *src_page,
4337                        unsigned long dst_off, unsigned long src_off,
4338                        unsigned long len)
4339 {
4340         char *dst_kaddr = page_address(dst_page);
4341         char *src_kaddr;
4342
4343         if (dst_page != src_page) {
4344                 src_kaddr = page_address(src_page);
4345         } else {
4346                 src_kaddr = dst_kaddr;
4347                 BUG_ON(areas_overlap(src_off, dst_off, len));
4348         }
4349
4350         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4351 }
4352
4353 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4354                            unsigned long src_offset, unsigned long len)
4355 {
4356         size_t cur;
4357         size_t dst_off_in_page;
4358         size_t src_off_in_page;
4359         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4360         unsigned long dst_i;
4361         unsigned long src_i;
4362
4363         if (src_offset + len > dst->len) {
4364                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4365                        "len %lu dst len %lu\n", src_offset, len, dst->len);
4366                 BUG_ON(1);
4367         }
4368         if (dst_offset + len > dst->len) {
4369                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4370                        "len %lu dst len %lu\n", dst_offset, len, dst->len);
4371                 BUG_ON(1);
4372         }
4373
4374         while (len > 0) {
4375                 dst_off_in_page = (start_offset + dst_offset) &
4376                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4377                 src_off_in_page = (start_offset + src_offset) &
4378                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4379
4380                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4381                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4382
4383                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4384                                                src_off_in_page));
4385                 cur = min_t(unsigned long, cur,
4386                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4387
4388                 copy_pages(extent_buffer_page(dst, dst_i),
4389                            extent_buffer_page(dst, src_i),
4390                            dst_off_in_page, src_off_in_page, cur);
4391
4392                 src_offset += cur;
4393                 dst_offset += cur;
4394                 len -= cur;
4395         }
4396 }
4397
4398 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4399                            unsigned long src_offset, unsigned long len)
4400 {
4401         size_t cur;
4402         size_t dst_off_in_page;
4403         size_t src_off_in_page;
4404         unsigned long dst_end = dst_offset + len - 1;
4405         unsigned long src_end = src_offset + len - 1;
4406         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4407         unsigned long dst_i;
4408         unsigned long src_i;
4409
4410         if (src_offset + len > dst->len) {
4411                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4412                        "len %lu len %lu\n", src_offset, len, dst->len);
4413                 BUG_ON(1);
4414         }
4415         if (dst_offset + len > dst->len) {
4416                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4417                        "len %lu len %lu\n", dst_offset, len, dst->len);
4418                 BUG_ON(1);
4419         }
4420         if (!areas_overlap(src_offset, dst_offset, len)) {
4421                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4422                 return;
4423         }
4424         while (len > 0) {
4425                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4426                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4427
4428                 dst_off_in_page = (start_offset + dst_end) &
4429                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4430                 src_off_in_page = (start_offset + src_end) &
4431                         ((unsigned long)PAGE_CACHE_SIZE - 1);
4432
4433                 cur = min_t(unsigned long, len, src_off_in_page + 1);
4434                 cur = min(cur, dst_off_in_page + 1);
4435                 move_pages(extent_buffer_page(dst, dst_i),
4436                            extent_buffer_page(dst, src_i),
4437                            dst_off_in_page - cur + 1,
4438                            src_off_in_page - cur + 1, cur);
4439
4440                 dst_end -= cur;
4441                 src_end -= cur;
4442                 len -= cur;
4443         }
4444 }
4445
4446 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4447 {
4448         struct extent_buffer *eb =
4449                         container_of(head, struct extent_buffer, rcu_head);
4450
4451         btrfs_release_extent_buffer(eb);
4452 }
4453
4454 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
4455 {
4456         u64 start = page_offset(page);
4457         struct extent_buffer *eb;
4458         int ret = 1;
4459
4460         spin_lock(&tree->buffer_lock);
4461         eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4462         if (!eb) {
4463                 spin_unlock(&tree->buffer_lock);
4464                 return ret;
4465         }
4466
4467         if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4468                 ret = 0;
4469                 goto out;
4470         }
4471
4472         /*
4473          * set @eb->refs to 0 if it is already 1, and then release the @eb.
4474          * Or go back.
4475          */
4476         if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
4477                 ret = 0;
4478                 goto out;
4479         }
4480
4481         radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4482 out:
4483         spin_unlock(&tree->buffer_lock);
4484
4485         /* at this point we can safely release the extent buffer */
4486         if (atomic_read(&eb->refs) == 0)
4487                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4488         return ret;
4489 }