btrfs: __add_reloc_root error push-up
[linux-2.6-block.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
d1310b2e
CM
5#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
d1310b2e
CM
11#include <linux/writeback.h>
12#include <linux/pagevec.h>
268bb0ce 13#include <linux/prefetch.h>
90a887c9 14#include <linux/cleancache.h>
d1310b2e
CM
15#include "extent_io.h"
16#include "extent_map.h"
2db04966 17#include "compat.h"
902b22f3
DW
18#include "ctree.h"
19#include "btrfs_inode.h"
4a54c8c1 20#include "volumes.h"
21adbd5c 21#include "check-integrity.h"
d1310b2e 22
d1310b2e
CM
23static struct kmem_cache *extent_state_cache;
24static struct kmem_cache *extent_buffer_cache;
25
26static LIST_HEAD(buffers);
27static LIST_HEAD(states);
4bef0848 28
b47eda86 29#define LEAK_DEBUG 0
3935127c 30#if LEAK_DEBUG
d397712b 31static DEFINE_SPINLOCK(leak_lock);
4bef0848 32#endif
d1310b2e 33
d1310b2e
CM
34#define BUFFER_LRU_MAX 64
35
36struct tree_entry {
37 u64 start;
38 u64 end;
d1310b2e
CM
39 struct rb_node rb_node;
40};
41
42struct extent_page_data {
43 struct bio *bio;
44 struct extent_io_tree *tree;
45 get_extent_t *get_extent;
771ed689
CM
46
47 /* tells writepage not to lock the state bits for this range
48 * it still does the unlocking
49 */
ffbd517d
CM
50 unsigned int extent_locked:1;
51
52 /* tells the submit_bio code to use a WRITE_SYNC */
53 unsigned int sync_io:1;
d1310b2e
CM
54};
55
c2d904e0
JM
56static inline struct btrfs_fs_info *
57tree_fs_info(struct extent_io_tree *tree)
58{
59 return btrfs_sb(tree->mapping->host->i_sb);
60}
61
d1310b2e
CM
62int __init extent_io_init(void)
63{
9601e3f6
CH
64 extent_state_cache = kmem_cache_create("extent_state",
65 sizeof(struct extent_state), 0,
66 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
67 if (!extent_state_cache)
68 return -ENOMEM;
69
9601e3f6
CH
70 extent_buffer_cache = kmem_cache_create("extent_buffers",
71 sizeof(struct extent_buffer), 0,
72 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
73 if (!extent_buffer_cache)
74 goto free_state_cache;
75 return 0;
76
77free_state_cache:
78 kmem_cache_destroy(extent_state_cache);
79 return -ENOMEM;
80}
81
82void extent_io_exit(void)
83{
84 struct extent_state *state;
2d2ae547 85 struct extent_buffer *eb;
d1310b2e
CM
86
87 while (!list_empty(&states)) {
2d2ae547 88 state = list_entry(states.next, struct extent_state, leak_list);
d397712b
CM
89 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
90 "state %lu in tree %p refs %d\n",
91 (unsigned long long)state->start,
92 (unsigned long long)state->end,
93 state->state, state->tree, atomic_read(&state->refs));
2d2ae547 94 list_del(&state->leak_list);
d1310b2e
CM
95 kmem_cache_free(extent_state_cache, state);
96
97 }
98
2d2ae547
CM
99 while (!list_empty(&buffers)) {
100 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
d397712b
CM
101 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
102 "refs %d\n", (unsigned long long)eb->start,
103 eb->len, atomic_read(&eb->refs));
2d2ae547
CM
104 list_del(&eb->leak_list);
105 kmem_cache_free(extent_buffer_cache, eb);
106 }
d1310b2e
CM
107 if (extent_state_cache)
108 kmem_cache_destroy(extent_state_cache);
109 if (extent_buffer_cache)
110 kmem_cache_destroy(extent_buffer_cache);
111}
112
113void extent_io_tree_init(struct extent_io_tree *tree,
f993c883 114 struct address_space *mapping)
d1310b2e 115{
6bef4d31 116 tree->state = RB_ROOT;
19fe0a8b 117 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
d1310b2e
CM
118 tree->ops = NULL;
119 tree->dirty_bytes = 0;
70dec807 120 spin_lock_init(&tree->lock);
6af118ce 121 spin_lock_init(&tree->buffer_lock);
d1310b2e 122 tree->mapping = mapping;
d1310b2e 123}
d1310b2e 124
b2950863 125static struct extent_state *alloc_extent_state(gfp_t mask)
d1310b2e
CM
126{
127 struct extent_state *state;
3935127c 128#if LEAK_DEBUG
2d2ae547 129 unsigned long flags;
4bef0848 130#endif
d1310b2e
CM
131
132 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 133 if (!state)
d1310b2e
CM
134 return state;
135 state->state = 0;
d1310b2e 136 state->private = 0;
70dec807 137 state->tree = NULL;
3935127c 138#if LEAK_DEBUG
2d2ae547
CM
139 spin_lock_irqsave(&leak_lock, flags);
140 list_add(&state->leak_list, &states);
141 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 142#endif
d1310b2e
CM
143 atomic_set(&state->refs, 1);
144 init_waitqueue_head(&state->wq);
145 return state;
146}
d1310b2e 147
4845e44f 148void free_extent_state(struct extent_state *state)
d1310b2e 149{
d1310b2e
CM
150 if (!state)
151 return;
152 if (atomic_dec_and_test(&state->refs)) {
3935127c 153#if LEAK_DEBUG
2d2ae547 154 unsigned long flags;
4bef0848 155#endif
70dec807 156 WARN_ON(state->tree);
3935127c 157#if LEAK_DEBUG
2d2ae547
CM
158 spin_lock_irqsave(&leak_lock, flags);
159 list_del(&state->leak_list);
160 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 161#endif
d1310b2e
CM
162 kmem_cache_free(extent_state_cache, state);
163 }
164}
d1310b2e
CM
165
166static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
167 struct rb_node *node)
168{
d397712b
CM
169 struct rb_node **p = &root->rb_node;
170 struct rb_node *parent = NULL;
d1310b2e
CM
171 struct tree_entry *entry;
172
d397712b 173 while (*p) {
d1310b2e
CM
174 parent = *p;
175 entry = rb_entry(parent, struct tree_entry, rb_node);
176
177 if (offset < entry->start)
178 p = &(*p)->rb_left;
179 else if (offset > entry->end)
180 p = &(*p)->rb_right;
181 else
182 return parent;
183 }
184
185 entry = rb_entry(node, struct tree_entry, rb_node);
d1310b2e
CM
186 rb_link_node(node, parent, p);
187 rb_insert_color(node, root);
188 return NULL;
189}
190
80ea96b1 191static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
192 struct rb_node **prev_ret,
193 struct rb_node **next_ret)
194{
80ea96b1 195 struct rb_root *root = &tree->state;
d397712b 196 struct rb_node *n = root->rb_node;
d1310b2e
CM
197 struct rb_node *prev = NULL;
198 struct rb_node *orig_prev = NULL;
199 struct tree_entry *entry;
200 struct tree_entry *prev_entry = NULL;
201
d397712b 202 while (n) {
d1310b2e
CM
203 entry = rb_entry(n, struct tree_entry, rb_node);
204 prev = n;
205 prev_entry = entry;
206
207 if (offset < entry->start)
208 n = n->rb_left;
209 else if (offset > entry->end)
210 n = n->rb_right;
d397712b 211 else
d1310b2e
CM
212 return n;
213 }
214
215 if (prev_ret) {
216 orig_prev = prev;
d397712b 217 while (prev && offset > prev_entry->end) {
d1310b2e
CM
218 prev = rb_next(prev);
219 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
220 }
221 *prev_ret = prev;
222 prev = orig_prev;
223 }
224
225 if (next_ret) {
226 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
d397712b 227 while (prev && offset < prev_entry->start) {
d1310b2e
CM
228 prev = rb_prev(prev);
229 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
230 }
231 *next_ret = prev;
232 }
233 return NULL;
234}
235
80ea96b1
CM
236static inline struct rb_node *tree_search(struct extent_io_tree *tree,
237 u64 offset)
d1310b2e 238{
70dec807 239 struct rb_node *prev = NULL;
d1310b2e 240 struct rb_node *ret;
70dec807 241
80ea96b1 242 ret = __etree_search(tree, offset, &prev, NULL);
d397712b 243 if (!ret)
d1310b2e
CM
244 return prev;
245 return ret;
246}
247
9ed74f2d
JB
248static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
249 struct extent_state *other)
250{
251 if (tree->ops && tree->ops->merge_extent_hook)
252 tree->ops->merge_extent_hook(tree->mapping->host, new,
253 other);
254}
255
d1310b2e
CM
256/*
257 * utility function to look for merge candidates inside a given range.
258 * Any extents with matching state are merged together into a single
259 * extent in the tree. Extents with EXTENT_IO in their state field
260 * are not merged because the end_io handlers need to be able to do
261 * operations on them without sleeping (or doing allocations/splits).
262 *
263 * This should be called with the tree lock held.
264 */
1bf85046
JM
265static void merge_state(struct extent_io_tree *tree,
266 struct extent_state *state)
d1310b2e
CM
267{
268 struct extent_state *other;
269 struct rb_node *other_node;
270
5b21f2ed 271 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
1bf85046 272 return;
d1310b2e
CM
273
274 other_node = rb_prev(&state->rb_node);
275 if (other_node) {
276 other = rb_entry(other_node, struct extent_state, rb_node);
277 if (other->end == state->start - 1 &&
278 other->state == state->state) {
9ed74f2d 279 merge_cb(tree, state, other);
d1310b2e 280 state->start = other->start;
70dec807 281 other->tree = NULL;
d1310b2e
CM
282 rb_erase(&other->rb_node, &tree->state);
283 free_extent_state(other);
284 }
285 }
286 other_node = rb_next(&state->rb_node);
287 if (other_node) {
288 other = rb_entry(other_node, struct extent_state, rb_node);
289 if (other->start == state->end + 1 &&
290 other->state == state->state) {
9ed74f2d 291 merge_cb(tree, state, other);
df98b6e2
JB
292 state->end = other->end;
293 other->tree = NULL;
294 rb_erase(&other->rb_node, &tree->state);
295 free_extent_state(other);
d1310b2e
CM
296 }
297 }
d1310b2e
CM
298}
299
1bf85046 300static void set_state_cb(struct extent_io_tree *tree,
0ca1f7ce 301 struct extent_state *state, int *bits)
291d673e 302{
1bf85046
JM
303 if (tree->ops && tree->ops->set_bit_hook)
304 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
291d673e
CM
305}
306
307static void clear_state_cb(struct extent_io_tree *tree,
0ca1f7ce 308 struct extent_state *state, int *bits)
291d673e 309{
9ed74f2d
JB
310 if (tree->ops && tree->ops->clear_bit_hook)
311 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
291d673e
CM
312}
313
3150b699
XG
314static void set_state_bits(struct extent_io_tree *tree,
315 struct extent_state *state, int *bits);
316
d1310b2e
CM
317/*
318 * insert an extent_state struct into the tree. 'bits' are set on the
319 * struct before it is inserted.
320 *
321 * This may return -EEXIST if the extent is already there, in which case the
322 * state struct is freed.
323 *
324 * The tree lock is not taken internally. This is a utility function and
325 * probably isn't what you want to call (see set/clear_extent_bit).
326 */
327static int insert_state(struct extent_io_tree *tree,
328 struct extent_state *state, u64 start, u64 end,
0ca1f7ce 329 int *bits)
d1310b2e
CM
330{
331 struct rb_node *node;
332
333 if (end < start) {
d397712b
CM
334 printk(KERN_ERR "btrfs end < start %llu %llu\n",
335 (unsigned long long)end,
336 (unsigned long long)start);
d1310b2e
CM
337 WARN_ON(1);
338 }
d1310b2e
CM
339 state->start = start;
340 state->end = end;
9ed74f2d 341
3150b699
XG
342 set_state_bits(tree, state, bits);
343
d1310b2e
CM
344 node = tree_insert(&tree->state, end, &state->rb_node);
345 if (node) {
346 struct extent_state *found;
347 found = rb_entry(node, struct extent_state, rb_node);
d397712b
CM
348 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
349 "%llu %llu\n", (unsigned long long)found->start,
350 (unsigned long long)found->end,
351 (unsigned long long)start, (unsigned long long)end);
d1310b2e
CM
352 return -EEXIST;
353 }
70dec807 354 state->tree = tree;
d1310b2e
CM
355 merge_state(tree, state);
356 return 0;
357}
358
1bf85046 359static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
9ed74f2d
JB
360 u64 split)
361{
362 if (tree->ops && tree->ops->split_extent_hook)
1bf85046 363 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
9ed74f2d
JB
364}
365
d1310b2e
CM
366/*
367 * split a given extent state struct in two, inserting the preallocated
368 * struct 'prealloc' as the newly created second half. 'split' indicates an
369 * offset inside 'orig' where it should be split.
370 *
371 * Before calling,
372 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
373 * are two extent state structs in the tree:
374 * prealloc: [orig->start, split - 1]
375 * orig: [ split, orig->end ]
376 *
377 * The tree locks are not taken by this function. They need to be held
378 * by the caller.
379 */
380static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
381 struct extent_state *prealloc, u64 split)
382{
383 struct rb_node *node;
9ed74f2d
JB
384
385 split_cb(tree, orig, split);
386
d1310b2e
CM
387 prealloc->start = orig->start;
388 prealloc->end = split - 1;
389 prealloc->state = orig->state;
390 orig->start = split;
391
392 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
393 if (node) {
d1310b2e
CM
394 free_extent_state(prealloc);
395 return -EEXIST;
396 }
70dec807 397 prealloc->tree = tree;
d1310b2e
CM
398 return 0;
399}
400
401/*
402 * utility function to clear some bits in an extent state struct.
403 * it will optionally wake up any one waiting on this state (wake == 1), or
404 * forcibly remove the state from the tree (delete == 1).
405 *
406 * If no bits are set on the state struct after clearing things, the
407 * struct is freed and removed from the tree
408 */
409static int clear_state_bit(struct extent_io_tree *tree,
0ca1f7ce
YZ
410 struct extent_state *state,
411 int *bits, int wake)
d1310b2e 412{
0ca1f7ce 413 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
32c00aff 414 int ret = state->state & bits_to_clear;
d1310b2e 415
0ca1f7ce 416 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
d1310b2e
CM
417 u64 range = state->end - state->start + 1;
418 WARN_ON(range > tree->dirty_bytes);
419 tree->dirty_bytes -= range;
420 }
291d673e 421 clear_state_cb(tree, state, bits);
32c00aff 422 state->state &= ~bits_to_clear;
d1310b2e
CM
423 if (wake)
424 wake_up(&state->wq);
0ca1f7ce 425 if (state->state == 0) {
70dec807 426 if (state->tree) {
d1310b2e 427 rb_erase(&state->rb_node, &tree->state);
70dec807 428 state->tree = NULL;
d1310b2e
CM
429 free_extent_state(state);
430 } else {
431 WARN_ON(1);
432 }
433 } else {
434 merge_state(tree, state);
435 }
436 return ret;
437}
438
8233767a
XG
439static struct extent_state *
440alloc_extent_state_atomic(struct extent_state *prealloc)
441{
442 if (!prealloc)
443 prealloc = alloc_extent_state(GFP_ATOMIC);
444
445 return prealloc;
446}
447
c2d904e0
JM
448void extent_io_tree_panic(struct extent_io_tree *tree, int err)
449{
450 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
451 "Extent tree was modified by another "
452 "thread while locked.");
453}
454
d1310b2e
CM
455/*
456 * clear some bits on a range in the tree. This may require splitting
457 * or inserting elements in the tree, so the gfp mask is used to
458 * indicate which allocations or sleeping are allowed.
459 *
460 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
461 * the given range from the tree regardless of state (ie for truncate).
462 *
463 * the range [start, end] is inclusive.
464 *
6763af84 465 * This takes the tree lock, and returns 0 on success and < 0 on error.
d1310b2e
CM
466 */
467int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
2c64c53d
CM
468 int bits, int wake, int delete,
469 struct extent_state **cached_state,
470 gfp_t mask)
d1310b2e
CM
471{
472 struct extent_state *state;
2c64c53d 473 struct extent_state *cached;
d1310b2e 474 struct extent_state *prealloc = NULL;
2c64c53d 475 struct rb_node *next_node;
d1310b2e 476 struct rb_node *node;
5c939df5 477 u64 last_end;
d1310b2e 478 int err;
2ac55d41 479 int clear = 0;
d1310b2e 480
0ca1f7ce
YZ
481 if (delete)
482 bits |= ~EXTENT_CTLBITS;
483 bits |= EXTENT_FIRST_DELALLOC;
484
2ac55d41
JB
485 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
486 clear = 1;
d1310b2e
CM
487again:
488 if (!prealloc && (mask & __GFP_WAIT)) {
489 prealloc = alloc_extent_state(mask);
490 if (!prealloc)
491 return -ENOMEM;
492 }
493
cad321ad 494 spin_lock(&tree->lock);
2c64c53d
CM
495 if (cached_state) {
496 cached = *cached_state;
2ac55d41
JB
497
498 if (clear) {
499 *cached_state = NULL;
500 cached_state = NULL;
501 }
502
df98b6e2
JB
503 if (cached && cached->tree && cached->start <= start &&
504 cached->end > start) {
2ac55d41
JB
505 if (clear)
506 atomic_dec(&cached->refs);
2c64c53d 507 state = cached;
42daec29 508 goto hit_next;
2c64c53d 509 }
2ac55d41
JB
510 if (clear)
511 free_extent_state(cached);
2c64c53d 512 }
d1310b2e
CM
513 /*
514 * this search will find the extents that end after
515 * our range starts
516 */
80ea96b1 517 node = tree_search(tree, start);
d1310b2e
CM
518 if (!node)
519 goto out;
520 state = rb_entry(node, struct extent_state, rb_node);
2c64c53d 521hit_next:
d1310b2e
CM
522 if (state->start > end)
523 goto out;
524 WARN_ON(state->end < start);
5c939df5 525 last_end = state->end;
d1310b2e 526
0449314a
LB
527 if (state->end < end && !need_resched())
528 next_node = rb_next(&state->rb_node);
529 else
530 next_node = NULL;
531
532 /* the state doesn't have the wanted bits, go ahead */
533 if (!(state->state & bits))
534 goto next;
535
d1310b2e
CM
536 /*
537 * | ---- desired range ---- |
538 * | state | or
539 * | ------------- state -------------- |
540 *
541 * We need to split the extent we found, and may flip
542 * bits on second half.
543 *
544 * If the extent we found extends past our range, we
545 * just split and search again. It'll get split again
546 * the next time though.
547 *
548 * If the extent we found is inside our range, we clear
549 * the desired bit on it.
550 */
551
552 if (state->start < start) {
8233767a
XG
553 prealloc = alloc_extent_state_atomic(prealloc);
554 BUG_ON(!prealloc);
d1310b2e 555 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
556 if (err)
557 extent_io_tree_panic(tree, err);
558
d1310b2e
CM
559 prealloc = NULL;
560 if (err)
561 goto out;
562 if (state->end <= end) {
6763af84 563 clear_state_bit(tree, state, &bits, wake);
5c939df5
YZ
564 if (last_end == (u64)-1)
565 goto out;
566 start = last_end + 1;
d1310b2e
CM
567 }
568 goto search_again;
569 }
570 /*
571 * | ---- desired range ---- |
572 * | state |
573 * We need to split the extent, and clear the bit
574 * on the first half
575 */
576 if (state->start <= end && state->end > end) {
8233767a
XG
577 prealloc = alloc_extent_state_atomic(prealloc);
578 BUG_ON(!prealloc);
d1310b2e 579 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
580 if (err)
581 extent_io_tree_panic(tree, err);
582
d1310b2e
CM
583 if (wake)
584 wake_up(&state->wq);
42daec29 585
6763af84 586 clear_state_bit(tree, prealloc, &bits, wake);
9ed74f2d 587
d1310b2e
CM
588 prealloc = NULL;
589 goto out;
590 }
42daec29 591
6763af84 592 clear_state_bit(tree, state, &bits, wake);
0449314a 593next:
5c939df5
YZ
594 if (last_end == (u64)-1)
595 goto out;
596 start = last_end + 1;
2c64c53d
CM
597 if (start <= end && next_node) {
598 state = rb_entry(next_node, struct extent_state,
599 rb_node);
692e5759 600 goto hit_next;
2c64c53d 601 }
d1310b2e
CM
602 goto search_again;
603
604out:
cad321ad 605 spin_unlock(&tree->lock);
d1310b2e
CM
606 if (prealloc)
607 free_extent_state(prealloc);
608
6763af84 609 return 0;
d1310b2e
CM
610
611search_again:
612 if (start > end)
613 goto out;
cad321ad 614 spin_unlock(&tree->lock);
d1310b2e
CM
615 if (mask & __GFP_WAIT)
616 cond_resched();
617 goto again;
618}
d1310b2e
CM
619
620static int wait_on_state(struct extent_io_tree *tree,
621 struct extent_state *state)
641f5219
CH
622 __releases(tree->lock)
623 __acquires(tree->lock)
d1310b2e
CM
624{
625 DEFINE_WAIT(wait);
626 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
cad321ad 627 spin_unlock(&tree->lock);
d1310b2e 628 schedule();
cad321ad 629 spin_lock(&tree->lock);
d1310b2e
CM
630 finish_wait(&state->wq, &wait);
631 return 0;
632}
633
634/*
635 * waits for one or more bits to clear on a range in the state tree.
636 * The range [start, end] is inclusive.
637 * The tree lock is taken by this function
638 */
639int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
640{
641 struct extent_state *state;
642 struct rb_node *node;
643
cad321ad 644 spin_lock(&tree->lock);
d1310b2e
CM
645again:
646 while (1) {
647 /*
648 * this search will find all the extents that end after
649 * our range starts
650 */
80ea96b1 651 node = tree_search(tree, start);
d1310b2e
CM
652 if (!node)
653 break;
654
655 state = rb_entry(node, struct extent_state, rb_node);
656
657 if (state->start > end)
658 goto out;
659
660 if (state->state & bits) {
661 start = state->start;
662 atomic_inc(&state->refs);
663 wait_on_state(tree, state);
664 free_extent_state(state);
665 goto again;
666 }
667 start = state->end + 1;
668
669 if (start > end)
670 break;
671
ded91f08 672 cond_resched_lock(&tree->lock);
d1310b2e
CM
673 }
674out:
cad321ad 675 spin_unlock(&tree->lock);
d1310b2e
CM
676 return 0;
677}
d1310b2e 678
1bf85046 679static void set_state_bits(struct extent_io_tree *tree,
d1310b2e 680 struct extent_state *state,
0ca1f7ce 681 int *bits)
d1310b2e 682{
0ca1f7ce 683 int bits_to_set = *bits & ~EXTENT_CTLBITS;
9ed74f2d 684
1bf85046 685 set_state_cb(tree, state, bits);
0ca1f7ce 686 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
d1310b2e
CM
687 u64 range = state->end - state->start + 1;
688 tree->dirty_bytes += range;
689 }
0ca1f7ce 690 state->state |= bits_to_set;
d1310b2e
CM
691}
692
2c64c53d
CM
693static void cache_state(struct extent_state *state,
694 struct extent_state **cached_ptr)
695{
696 if (cached_ptr && !(*cached_ptr)) {
697 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
698 *cached_ptr = state;
699 atomic_inc(&state->refs);
700 }
701 }
702}
703
507903b8
AJ
704static void uncache_state(struct extent_state **cached_ptr)
705{
706 if (cached_ptr && (*cached_ptr)) {
707 struct extent_state *state = *cached_ptr;
109b36a2
CM
708 *cached_ptr = NULL;
709 free_extent_state(state);
507903b8
AJ
710 }
711}
712
d1310b2e 713/*
1edbb734
CM
714 * set some bits on a range in the tree. This may require allocations or
715 * sleeping, so the gfp mask is used to indicate what is allowed.
d1310b2e 716 *
1edbb734
CM
717 * If any of the exclusive bits are set, this will fail with -EEXIST if some
718 * part of the range already has the desired bits set. The start of the
719 * existing range is returned in failed_start in this case.
d1310b2e 720 *
1edbb734 721 * [start, end] is inclusive This takes the tree lock.
d1310b2e 722 */
1edbb734 723
4845e44f
CM
724int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
725 int bits, int exclusive_bits, u64 *failed_start,
726 struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
727{
728 struct extent_state *state;
729 struct extent_state *prealloc = NULL;
730 struct rb_node *node;
d1310b2e 731 int err = 0;
d1310b2e
CM
732 u64 last_start;
733 u64 last_end;
42daec29 734
0ca1f7ce 735 bits |= EXTENT_FIRST_DELALLOC;
d1310b2e
CM
736again:
737 if (!prealloc && (mask & __GFP_WAIT)) {
738 prealloc = alloc_extent_state(mask);
8233767a 739 BUG_ON(!prealloc);
d1310b2e
CM
740 }
741
cad321ad 742 spin_lock(&tree->lock);
9655d298
CM
743 if (cached_state && *cached_state) {
744 state = *cached_state;
df98b6e2
JB
745 if (state->start <= start && state->end > start &&
746 state->tree) {
9655d298
CM
747 node = &state->rb_node;
748 goto hit_next;
749 }
750 }
d1310b2e
CM
751 /*
752 * this search will find all the extents that end after
753 * our range starts.
754 */
80ea96b1 755 node = tree_search(tree, start);
d1310b2e 756 if (!node) {
8233767a
XG
757 prealloc = alloc_extent_state_atomic(prealloc);
758 BUG_ON(!prealloc);
0ca1f7ce 759 err = insert_state(tree, prealloc, start, end, &bits);
c2d904e0
JM
760 if (err)
761 extent_io_tree_panic(tree, err);
762
d1310b2e 763 prealloc = NULL;
d1310b2e
CM
764 goto out;
765 }
d1310b2e 766 state = rb_entry(node, struct extent_state, rb_node);
40431d6c 767hit_next:
d1310b2e
CM
768 last_start = state->start;
769 last_end = state->end;
770
771 /*
772 * | ---- desired range ---- |
773 * | state |
774 *
775 * Just lock what we found and keep going
776 */
777 if (state->start == start && state->end <= end) {
40431d6c 778 struct rb_node *next_node;
1edbb734 779 if (state->state & exclusive_bits) {
d1310b2e
CM
780 *failed_start = state->start;
781 err = -EEXIST;
782 goto out;
783 }
42daec29 784
1bf85046 785 set_state_bits(tree, state, &bits);
9ed74f2d 786
2c64c53d 787 cache_state(state, cached_state);
d1310b2e 788 merge_state(tree, state);
5c939df5
YZ
789 if (last_end == (u64)-1)
790 goto out;
40431d6c 791
5c939df5 792 start = last_end + 1;
df98b6e2 793 next_node = rb_next(&state->rb_node);
c7f895a2
XG
794 if (next_node && start < end && prealloc && !need_resched()) {
795 state = rb_entry(next_node, struct extent_state,
796 rb_node);
797 if (state->start == start)
798 goto hit_next;
40431d6c 799 }
d1310b2e
CM
800 goto search_again;
801 }
802
803 /*
804 * | ---- desired range ---- |
805 * | state |
806 * or
807 * | ------------- state -------------- |
808 *
809 * We need to split the extent we found, and may flip bits on
810 * second half.
811 *
812 * If the extent we found extends past our
813 * range, we just split and search again. It'll get split
814 * again the next time though.
815 *
816 * If the extent we found is inside our range, we set the
817 * desired bit on it.
818 */
819 if (state->start < start) {
1edbb734 820 if (state->state & exclusive_bits) {
d1310b2e
CM
821 *failed_start = start;
822 err = -EEXIST;
823 goto out;
824 }
8233767a
XG
825
826 prealloc = alloc_extent_state_atomic(prealloc);
827 BUG_ON(!prealloc);
d1310b2e 828 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
829 if (err)
830 extent_io_tree_panic(tree, err);
831
d1310b2e
CM
832 prealloc = NULL;
833 if (err)
834 goto out;
835 if (state->end <= end) {
1bf85046 836 set_state_bits(tree, state, &bits);
2c64c53d 837 cache_state(state, cached_state);
d1310b2e 838 merge_state(tree, state);
5c939df5
YZ
839 if (last_end == (u64)-1)
840 goto out;
841 start = last_end + 1;
d1310b2e
CM
842 }
843 goto search_again;
844 }
845 /*
846 * | ---- desired range ---- |
847 * | state | or | state |
848 *
849 * There's a hole, we need to insert something in it and
850 * ignore the extent we found.
851 */
852 if (state->start > start) {
853 u64 this_end;
854 if (end < last_start)
855 this_end = end;
856 else
d397712b 857 this_end = last_start - 1;
8233767a
XG
858
859 prealloc = alloc_extent_state_atomic(prealloc);
860 BUG_ON(!prealloc);
c7f895a2
XG
861
862 /*
863 * Avoid to free 'prealloc' if it can be merged with
864 * the later extent.
865 */
d1310b2e 866 err = insert_state(tree, prealloc, start, this_end,
0ca1f7ce 867 &bits);
c2d904e0
JM
868 if (err)
869 extent_io_tree_panic(tree, err);
870
9ed74f2d
JB
871 cache_state(prealloc, cached_state);
872 prealloc = NULL;
d1310b2e
CM
873 start = this_end + 1;
874 goto search_again;
875 }
876 /*
877 * | ---- desired range ---- |
878 * | state |
879 * We need to split the extent, and set the bit
880 * on the first half
881 */
882 if (state->start <= end && state->end > end) {
1edbb734 883 if (state->state & exclusive_bits) {
d1310b2e
CM
884 *failed_start = start;
885 err = -EEXIST;
886 goto out;
887 }
8233767a
XG
888
889 prealloc = alloc_extent_state_atomic(prealloc);
890 BUG_ON(!prealloc);
d1310b2e 891 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
892 if (err)
893 extent_io_tree_panic(tree, err);
d1310b2e 894
1bf85046 895 set_state_bits(tree, prealloc, &bits);
2c64c53d 896 cache_state(prealloc, cached_state);
d1310b2e
CM
897 merge_state(tree, prealloc);
898 prealloc = NULL;
899 goto out;
900 }
901
902 goto search_again;
903
904out:
cad321ad 905 spin_unlock(&tree->lock);
d1310b2e
CM
906 if (prealloc)
907 free_extent_state(prealloc);
908
909 return err;
910
911search_again:
912 if (start > end)
913 goto out;
cad321ad 914 spin_unlock(&tree->lock);
d1310b2e
CM
915 if (mask & __GFP_WAIT)
916 cond_resched();
917 goto again;
918}
d1310b2e 919
462d6fac
JB
920/**
921 * convert_extent - convert all bits in a given range from one bit to another
922 * @tree: the io tree to search
923 * @start: the start offset in bytes
924 * @end: the end offset in bytes (inclusive)
925 * @bits: the bits to set in this range
926 * @clear_bits: the bits to clear in this range
927 * @mask: the allocation mask
928 *
929 * This will go through and set bits for the given range. If any states exist
930 * already in this range they are set with the given bit and cleared of the
931 * clear_bits. This is only meant to be used by things that are mergeable, ie
932 * converting from say DELALLOC to DIRTY. This is not meant to be used with
933 * boundary bits like LOCK.
934 */
935int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
936 int bits, int clear_bits, gfp_t mask)
937{
938 struct extent_state *state;
939 struct extent_state *prealloc = NULL;
940 struct rb_node *node;
941 int err = 0;
942 u64 last_start;
943 u64 last_end;
944
945again:
946 if (!prealloc && (mask & __GFP_WAIT)) {
947 prealloc = alloc_extent_state(mask);
948 if (!prealloc)
949 return -ENOMEM;
950 }
951
952 spin_lock(&tree->lock);
953 /*
954 * this search will find all the extents that end after
955 * our range starts.
956 */
957 node = tree_search(tree, start);
958 if (!node) {
959 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
960 if (!prealloc) {
961 err = -ENOMEM;
962 goto out;
963 }
462d6fac
JB
964 err = insert_state(tree, prealloc, start, end, &bits);
965 prealloc = NULL;
c2d904e0
JM
966 if (err)
967 extent_io_tree_panic(tree, err);
462d6fac
JB
968 goto out;
969 }
970 state = rb_entry(node, struct extent_state, rb_node);
971hit_next:
972 last_start = state->start;
973 last_end = state->end;
974
975 /*
976 * | ---- desired range ---- |
977 * | state |
978 *
979 * Just lock what we found and keep going
980 */
981 if (state->start == start && state->end <= end) {
982 struct rb_node *next_node;
983
984 set_state_bits(tree, state, &bits);
985 clear_state_bit(tree, state, &clear_bits, 0);
462d6fac
JB
986 if (last_end == (u64)-1)
987 goto out;
988
989 start = last_end + 1;
990 next_node = rb_next(&state->rb_node);
991 if (next_node && start < end && prealloc && !need_resched()) {
992 state = rb_entry(next_node, struct extent_state,
993 rb_node);
994 if (state->start == start)
995 goto hit_next;
996 }
997 goto search_again;
998 }
999
1000 /*
1001 * | ---- desired range ---- |
1002 * | state |
1003 * or
1004 * | ------------- state -------------- |
1005 *
1006 * We need to split the extent we found, and may flip bits on
1007 * second half.
1008 *
1009 * If the extent we found extends past our
1010 * range, we just split and search again. It'll get split
1011 * again the next time though.
1012 *
1013 * If the extent we found is inside our range, we set the
1014 * desired bit on it.
1015 */
1016 if (state->start < start) {
1017 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1018 if (!prealloc) {
1019 err = -ENOMEM;
1020 goto out;
1021 }
462d6fac 1022 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
1023 if (err)
1024 extent_io_tree_panic(tree, err);
462d6fac
JB
1025 prealloc = NULL;
1026 if (err)
1027 goto out;
1028 if (state->end <= end) {
1029 set_state_bits(tree, state, &bits);
1030 clear_state_bit(tree, state, &clear_bits, 0);
462d6fac
JB
1031 if (last_end == (u64)-1)
1032 goto out;
1033 start = last_end + 1;
1034 }
1035 goto search_again;
1036 }
1037 /*
1038 * | ---- desired range ---- |
1039 * | state | or | state |
1040 *
1041 * There's a hole, we need to insert something in it and
1042 * ignore the extent we found.
1043 */
1044 if (state->start > start) {
1045 u64 this_end;
1046 if (end < last_start)
1047 this_end = end;
1048 else
1049 this_end = last_start - 1;
1050
1051 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1052 if (!prealloc) {
1053 err = -ENOMEM;
1054 goto out;
1055 }
462d6fac
JB
1056
1057 /*
1058 * Avoid to free 'prealloc' if it can be merged with
1059 * the later extent.
1060 */
1061 err = insert_state(tree, prealloc, start, this_end,
1062 &bits);
c2d904e0
JM
1063 if (err)
1064 extent_io_tree_panic(tree, err);
462d6fac
JB
1065 prealloc = NULL;
1066 start = this_end + 1;
1067 goto search_again;
1068 }
1069 /*
1070 * | ---- desired range ---- |
1071 * | state |
1072 * We need to split the extent, and set the bit
1073 * on the first half
1074 */
1075 if (state->start <= end && state->end > end) {
1076 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1077 if (!prealloc) {
1078 err = -ENOMEM;
1079 goto out;
1080 }
462d6fac
JB
1081
1082 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
1083 if (err)
1084 extent_io_tree_panic(tree, err);
462d6fac
JB
1085
1086 set_state_bits(tree, prealloc, &bits);
1087 clear_state_bit(tree, prealloc, &clear_bits, 0);
462d6fac
JB
1088 prealloc = NULL;
1089 goto out;
1090 }
1091
1092 goto search_again;
1093
1094out:
1095 spin_unlock(&tree->lock);
1096 if (prealloc)
1097 free_extent_state(prealloc);
1098
1099 return err;
1100
1101search_again:
1102 if (start > end)
1103 goto out;
1104 spin_unlock(&tree->lock);
1105 if (mask & __GFP_WAIT)
1106 cond_resched();
1107 goto again;
1108}
1109
d1310b2e
CM
1110/* wrappers around set/clear extent bit */
1111int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1112 gfp_t mask)
1113{
1114 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
2c64c53d 1115 NULL, mask);
d1310b2e 1116}
d1310b2e
CM
1117
1118int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1119 int bits, gfp_t mask)
1120{
1121 return set_extent_bit(tree, start, end, bits, 0, NULL,
2c64c53d 1122 NULL, mask);
d1310b2e 1123}
d1310b2e
CM
1124
1125int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1126 int bits, gfp_t mask)
1127{
2c64c53d 1128 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
d1310b2e 1129}
d1310b2e
CM
1130
1131int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
2ac55d41 1132 struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
1133{
1134 return set_extent_bit(tree, start, end,
fee187d9 1135 EXTENT_DELALLOC | EXTENT_UPTODATE,
2ac55d41 1136 0, NULL, cached_state, mask);
d1310b2e 1137}
d1310b2e
CM
1138
1139int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1140 gfp_t mask)
1141{
1142 return clear_extent_bit(tree, start, end,
32c00aff 1143 EXTENT_DIRTY | EXTENT_DELALLOC |
0ca1f7ce 1144 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
d1310b2e 1145}
d1310b2e
CM
1146
1147int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1148 gfp_t mask)
1149{
1150 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
2c64c53d 1151 NULL, mask);
d1310b2e 1152}
d1310b2e 1153
d1310b2e 1154int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
507903b8 1155 struct extent_state **cached_state, gfp_t mask)
d1310b2e 1156{
507903b8
AJ
1157 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1158 NULL, cached_state, mask);
d1310b2e 1159}
d1310b2e 1160
d397712b 1161static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
2ac55d41
JB
1162 u64 end, struct extent_state **cached_state,
1163 gfp_t mask)
d1310b2e 1164{
2c64c53d 1165 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
2ac55d41 1166 cached_state, mask);
d1310b2e 1167}
d1310b2e 1168
d352ac68
CM
1169/*
1170 * either insert or lock state struct between start and end use mask to tell
1171 * us if waiting is desired.
1172 */
1edbb734 1173int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
2c64c53d 1174 int bits, struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
1175{
1176 int err;
1177 u64 failed_start;
1178 while (1) {
1edbb734 1179 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
2c64c53d
CM
1180 EXTENT_LOCKED, &failed_start,
1181 cached_state, mask);
d1310b2e
CM
1182 if (err == -EEXIST && (mask & __GFP_WAIT)) {
1183 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1184 start = failed_start;
1185 } else {
1186 break;
1187 }
1188 WARN_ON(start > end);
1189 }
1190 return err;
1191}
d1310b2e 1192
1edbb734
CM
1193int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1194{
2c64c53d 1195 return lock_extent_bits(tree, start, end, 0, NULL, mask);
1edbb734
CM
1196}
1197
25179201
JB
1198int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1199 gfp_t mask)
1200{
1201 int err;
1202 u64 failed_start;
1203
2c64c53d
CM
1204 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1205 &failed_start, NULL, mask);
6643558d
YZ
1206 if (err == -EEXIST) {
1207 if (failed_start > start)
1208 clear_extent_bit(tree, start, failed_start - 1,
2c64c53d 1209 EXTENT_LOCKED, 1, 0, NULL, mask);
25179201 1210 return 0;
6643558d 1211 }
25179201
JB
1212 return 1;
1213}
25179201 1214
2c64c53d
CM
1215int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1216 struct extent_state **cached, gfp_t mask)
1217{
1218 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1219 mask);
1220}
1221
507903b8 1222int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
d1310b2e 1223{
2c64c53d
CM
1224 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1225 mask);
d1310b2e 1226}
d1310b2e 1227
d1310b2e
CM
1228/*
1229 * helper function to set both pages and extents in the tree writeback
1230 */
b2950863 1231static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
d1310b2e
CM
1232{
1233 unsigned long index = start >> PAGE_CACHE_SHIFT;
1234 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1235 struct page *page;
1236
1237 while (index <= end_index) {
1238 page = find_get_page(tree->mapping, index);
1239 BUG_ON(!page);
1240 set_page_writeback(page);
1241 page_cache_release(page);
1242 index++;
1243 }
d1310b2e
CM
1244 return 0;
1245}
d1310b2e 1246
d352ac68
CM
1247/* find the first state struct with 'bits' set after 'start', and
1248 * return it. tree->lock must be held. NULL will returned if
1249 * nothing was found after 'start'
1250 */
d7fc640e
CM
1251struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1252 u64 start, int bits)
1253{
1254 struct rb_node *node;
1255 struct extent_state *state;
1256
1257 /*
1258 * this search will find all the extents that end after
1259 * our range starts.
1260 */
1261 node = tree_search(tree, start);
d397712b 1262 if (!node)
d7fc640e 1263 goto out;
d7fc640e 1264
d397712b 1265 while (1) {
d7fc640e 1266 state = rb_entry(node, struct extent_state, rb_node);
d397712b 1267 if (state->end >= start && (state->state & bits))
d7fc640e 1268 return state;
d397712b 1269
d7fc640e
CM
1270 node = rb_next(node);
1271 if (!node)
1272 break;
1273 }
1274out:
1275 return NULL;
1276}
d7fc640e 1277
69261c4b
XG
1278/*
1279 * find the first offset in the io tree with 'bits' set. zero is
1280 * returned if we find something, and *start_ret and *end_ret are
1281 * set to reflect the state struct that was found.
1282 *
1283 * If nothing was found, 1 is returned, < 0 on error
1284 */
1285int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1286 u64 *start_ret, u64 *end_ret, int bits)
1287{
1288 struct extent_state *state;
1289 int ret = 1;
1290
1291 spin_lock(&tree->lock);
1292 state = find_first_extent_bit_state(tree, start, bits);
1293 if (state) {
1294 *start_ret = state->start;
1295 *end_ret = state->end;
1296 ret = 0;
1297 }
1298 spin_unlock(&tree->lock);
1299 return ret;
1300}
1301
d352ac68
CM
1302/*
1303 * find a contiguous range of bytes in the file marked as delalloc, not
1304 * more than 'max_bytes'. start and end are used to return the range,
1305 *
1306 * 1 is returned if we find something, 0 if nothing was in the tree
1307 */
c8b97818 1308static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
c2a128d2
JB
1309 u64 *start, u64 *end, u64 max_bytes,
1310 struct extent_state **cached_state)
d1310b2e
CM
1311{
1312 struct rb_node *node;
1313 struct extent_state *state;
1314 u64 cur_start = *start;
1315 u64 found = 0;
1316 u64 total_bytes = 0;
1317
cad321ad 1318 spin_lock(&tree->lock);
c8b97818 1319
d1310b2e
CM
1320 /*
1321 * this search will find all the extents that end after
1322 * our range starts.
1323 */
80ea96b1 1324 node = tree_search(tree, cur_start);
2b114d1d 1325 if (!node) {
3b951516
CM
1326 if (!found)
1327 *end = (u64)-1;
d1310b2e
CM
1328 goto out;
1329 }
1330
d397712b 1331 while (1) {
d1310b2e 1332 state = rb_entry(node, struct extent_state, rb_node);
5b21f2ed
ZY
1333 if (found && (state->start != cur_start ||
1334 (state->state & EXTENT_BOUNDARY))) {
d1310b2e
CM
1335 goto out;
1336 }
1337 if (!(state->state & EXTENT_DELALLOC)) {
1338 if (!found)
1339 *end = state->end;
1340 goto out;
1341 }
c2a128d2 1342 if (!found) {
d1310b2e 1343 *start = state->start;
c2a128d2
JB
1344 *cached_state = state;
1345 atomic_inc(&state->refs);
1346 }
d1310b2e
CM
1347 found++;
1348 *end = state->end;
1349 cur_start = state->end + 1;
1350 node = rb_next(node);
1351 if (!node)
1352 break;
1353 total_bytes += state->end - state->start + 1;
1354 if (total_bytes >= max_bytes)
1355 break;
1356 }
1357out:
cad321ad 1358 spin_unlock(&tree->lock);
d1310b2e
CM
1359 return found;
1360}
1361
c8b97818
CM
1362static noinline int __unlock_for_delalloc(struct inode *inode,
1363 struct page *locked_page,
1364 u64 start, u64 end)
1365{
1366 int ret;
1367 struct page *pages[16];
1368 unsigned long index = start >> PAGE_CACHE_SHIFT;
1369 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1370 unsigned long nr_pages = end_index - index + 1;
1371 int i;
1372
1373 if (index == locked_page->index && end_index == index)
1374 return 0;
1375
d397712b 1376 while (nr_pages > 0) {
c8b97818 1377 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1378 min_t(unsigned long, nr_pages,
1379 ARRAY_SIZE(pages)), pages);
c8b97818
CM
1380 for (i = 0; i < ret; i++) {
1381 if (pages[i] != locked_page)
1382 unlock_page(pages[i]);
1383 page_cache_release(pages[i]);
1384 }
1385 nr_pages -= ret;
1386 index += ret;
1387 cond_resched();
1388 }
1389 return 0;
1390}
1391
1392static noinline int lock_delalloc_pages(struct inode *inode,
1393 struct page *locked_page,
1394 u64 delalloc_start,
1395 u64 delalloc_end)
1396{
1397 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1398 unsigned long start_index = index;
1399 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1400 unsigned long pages_locked = 0;
1401 struct page *pages[16];
1402 unsigned long nrpages;
1403 int ret;
1404 int i;
1405
1406 /* the caller is responsible for locking the start index */
1407 if (index == locked_page->index && index == end_index)
1408 return 0;
1409
1410 /* skip the page at the start index */
1411 nrpages = end_index - index + 1;
d397712b 1412 while (nrpages > 0) {
c8b97818 1413 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1414 min_t(unsigned long,
1415 nrpages, ARRAY_SIZE(pages)), pages);
c8b97818
CM
1416 if (ret == 0) {
1417 ret = -EAGAIN;
1418 goto done;
1419 }
1420 /* now we have an array of pages, lock them all */
1421 for (i = 0; i < ret; i++) {
1422 /*
1423 * the caller is taking responsibility for
1424 * locked_page
1425 */
771ed689 1426 if (pages[i] != locked_page) {
c8b97818 1427 lock_page(pages[i]);
f2b1c41c
CM
1428 if (!PageDirty(pages[i]) ||
1429 pages[i]->mapping != inode->i_mapping) {
771ed689
CM
1430 ret = -EAGAIN;
1431 unlock_page(pages[i]);
1432 page_cache_release(pages[i]);
1433 goto done;
1434 }
1435 }
c8b97818 1436 page_cache_release(pages[i]);
771ed689 1437 pages_locked++;
c8b97818 1438 }
c8b97818
CM
1439 nrpages -= ret;
1440 index += ret;
1441 cond_resched();
1442 }
1443 ret = 0;
1444done:
1445 if (ret && pages_locked) {
1446 __unlock_for_delalloc(inode, locked_page,
1447 delalloc_start,
1448 ((u64)(start_index + pages_locked - 1)) <<
1449 PAGE_CACHE_SHIFT);
1450 }
1451 return ret;
1452}
1453
1454/*
1455 * find a contiguous range of bytes in the file marked as delalloc, not
1456 * more than 'max_bytes'. start and end are used to return the range,
1457 *
1458 * 1 is returned if we find something, 0 if nothing was in the tree
1459 */
1460static noinline u64 find_lock_delalloc_range(struct inode *inode,
1461 struct extent_io_tree *tree,
1462 struct page *locked_page,
1463 u64 *start, u64 *end,
1464 u64 max_bytes)
1465{
1466 u64 delalloc_start;
1467 u64 delalloc_end;
1468 u64 found;
9655d298 1469 struct extent_state *cached_state = NULL;
c8b97818
CM
1470 int ret;
1471 int loops = 0;
1472
1473again:
1474 /* step one, find a bunch of delalloc bytes starting at start */
1475 delalloc_start = *start;
1476 delalloc_end = 0;
1477 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
c2a128d2 1478 max_bytes, &cached_state);
70b99e69 1479 if (!found || delalloc_end <= *start) {
c8b97818
CM
1480 *start = delalloc_start;
1481 *end = delalloc_end;
c2a128d2 1482 free_extent_state(cached_state);
c8b97818
CM
1483 return found;
1484 }
1485
70b99e69
CM
1486 /*
1487 * start comes from the offset of locked_page. We have to lock
1488 * pages in order, so we can't process delalloc bytes before
1489 * locked_page
1490 */
d397712b 1491 if (delalloc_start < *start)
70b99e69 1492 delalloc_start = *start;
70b99e69 1493
c8b97818
CM
1494 /*
1495 * make sure to limit the number of pages we try to lock down
1496 * if we're looping.
1497 */
d397712b 1498 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
771ed689 1499 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
d397712b 1500
c8b97818
CM
1501 /* step two, lock all the pages after the page that has start */
1502 ret = lock_delalloc_pages(inode, locked_page,
1503 delalloc_start, delalloc_end);
1504 if (ret == -EAGAIN) {
1505 /* some of the pages are gone, lets avoid looping by
1506 * shortening the size of the delalloc range we're searching
1507 */
9655d298 1508 free_extent_state(cached_state);
c8b97818
CM
1509 if (!loops) {
1510 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1511 max_bytes = PAGE_CACHE_SIZE - offset;
1512 loops = 1;
1513 goto again;
1514 } else {
1515 found = 0;
1516 goto out_failed;
1517 }
1518 }
1519 BUG_ON(ret);
1520
1521 /* step three, lock the state bits for the whole range */
9655d298
CM
1522 lock_extent_bits(tree, delalloc_start, delalloc_end,
1523 0, &cached_state, GFP_NOFS);
c8b97818
CM
1524
1525 /* then test to make sure it is all still delalloc */
1526 ret = test_range_bit(tree, delalloc_start, delalloc_end,
9655d298 1527 EXTENT_DELALLOC, 1, cached_state);
c8b97818 1528 if (!ret) {
9655d298
CM
1529 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1530 &cached_state, GFP_NOFS);
c8b97818
CM
1531 __unlock_for_delalloc(inode, locked_page,
1532 delalloc_start, delalloc_end);
1533 cond_resched();
1534 goto again;
1535 }
9655d298 1536 free_extent_state(cached_state);
c8b97818
CM
1537 *start = delalloc_start;
1538 *end = delalloc_end;
1539out_failed:
1540 return found;
1541}
1542
1543int extent_clear_unlock_delalloc(struct inode *inode,
1544 struct extent_io_tree *tree,
1545 u64 start, u64 end, struct page *locked_page,
a791e35e 1546 unsigned long op)
c8b97818
CM
1547{
1548 int ret;
1549 struct page *pages[16];
1550 unsigned long index = start >> PAGE_CACHE_SHIFT;
1551 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1552 unsigned long nr_pages = end_index - index + 1;
1553 int i;
771ed689 1554 int clear_bits = 0;
c8b97818 1555
a791e35e 1556 if (op & EXTENT_CLEAR_UNLOCK)
771ed689 1557 clear_bits |= EXTENT_LOCKED;
a791e35e 1558 if (op & EXTENT_CLEAR_DIRTY)
c8b97818
CM
1559 clear_bits |= EXTENT_DIRTY;
1560
a791e35e 1561 if (op & EXTENT_CLEAR_DELALLOC)
771ed689
CM
1562 clear_bits |= EXTENT_DELALLOC;
1563
2c64c53d 1564 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
32c00aff
JB
1565 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1566 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1567 EXTENT_SET_PRIVATE2)))
771ed689 1568 return 0;
c8b97818 1569
d397712b 1570 while (nr_pages > 0) {
c8b97818 1571 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1572 min_t(unsigned long,
1573 nr_pages, ARRAY_SIZE(pages)), pages);
c8b97818 1574 for (i = 0; i < ret; i++) {
8b62b72b 1575
a791e35e 1576 if (op & EXTENT_SET_PRIVATE2)
8b62b72b
CM
1577 SetPagePrivate2(pages[i]);
1578
c8b97818
CM
1579 if (pages[i] == locked_page) {
1580 page_cache_release(pages[i]);
1581 continue;
1582 }
a791e35e 1583 if (op & EXTENT_CLEAR_DIRTY)
c8b97818 1584 clear_page_dirty_for_io(pages[i]);
a791e35e 1585 if (op & EXTENT_SET_WRITEBACK)
c8b97818 1586 set_page_writeback(pages[i]);
a791e35e 1587 if (op & EXTENT_END_WRITEBACK)
c8b97818 1588 end_page_writeback(pages[i]);
a791e35e 1589 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
771ed689 1590 unlock_page(pages[i]);
c8b97818
CM
1591 page_cache_release(pages[i]);
1592 }
1593 nr_pages -= ret;
1594 index += ret;
1595 cond_resched();
1596 }
1597 return 0;
1598}
c8b97818 1599
d352ac68
CM
1600/*
1601 * count the number of bytes in the tree that have a given bit(s)
1602 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1603 * cached. The total number found is returned.
1604 */
d1310b2e
CM
1605u64 count_range_bits(struct extent_io_tree *tree,
1606 u64 *start, u64 search_end, u64 max_bytes,
ec29ed5b 1607 unsigned long bits, int contig)
d1310b2e
CM
1608{
1609 struct rb_node *node;
1610 struct extent_state *state;
1611 u64 cur_start = *start;
1612 u64 total_bytes = 0;
ec29ed5b 1613 u64 last = 0;
d1310b2e
CM
1614 int found = 0;
1615
1616 if (search_end <= cur_start) {
d1310b2e
CM
1617 WARN_ON(1);
1618 return 0;
1619 }
1620
cad321ad 1621 spin_lock(&tree->lock);
d1310b2e
CM
1622 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1623 total_bytes = tree->dirty_bytes;
1624 goto out;
1625 }
1626 /*
1627 * this search will find all the extents that end after
1628 * our range starts.
1629 */
80ea96b1 1630 node = tree_search(tree, cur_start);
d397712b 1631 if (!node)
d1310b2e 1632 goto out;
d1310b2e 1633
d397712b 1634 while (1) {
d1310b2e
CM
1635 state = rb_entry(node, struct extent_state, rb_node);
1636 if (state->start > search_end)
1637 break;
ec29ed5b
CM
1638 if (contig && found && state->start > last + 1)
1639 break;
1640 if (state->end >= cur_start && (state->state & bits) == bits) {
d1310b2e
CM
1641 total_bytes += min(search_end, state->end) + 1 -
1642 max(cur_start, state->start);
1643 if (total_bytes >= max_bytes)
1644 break;
1645 if (!found) {
af60bed2 1646 *start = max(cur_start, state->start);
d1310b2e
CM
1647 found = 1;
1648 }
ec29ed5b
CM
1649 last = state->end;
1650 } else if (contig && found) {
1651 break;
d1310b2e
CM
1652 }
1653 node = rb_next(node);
1654 if (!node)
1655 break;
1656 }
1657out:
cad321ad 1658 spin_unlock(&tree->lock);
d1310b2e
CM
1659 return total_bytes;
1660}
b2950863 1661
d352ac68
CM
1662/*
1663 * set the private field for a given byte offset in the tree. If there isn't
1664 * an extent_state there already, this does nothing.
1665 */
d1310b2e
CM
1666int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1667{
1668 struct rb_node *node;
1669 struct extent_state *state;
1670 int ret = 0;
1671
cad321ad 1672 spin_lock(&tree->lock);
d1310b2e
CM
1673 /*
1674 * this search will find all the extents that end after
1675 * our range starts.
1676 */
80ea96b1 1677 node = tree_search(tree, start);
2b114d1d 1678 if (!node) {
d1310b2e
CM
1679 ret = -ENOENT;
1680 goto out;
1681 }
1682 state = rb_entry(node, struct extent_state, rb_node);
1683 if (state->start != start) {
1684 ret = -ENOENT;
1685 goto out;
1686 }
1687 state->private = private;
1688out:
cad321ad 1689 spin_unlock(&tree->lock);
d1310b2e
CM
1690 return ret;
1691}
1692
1693int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1694{
1695 struct rb_node *node;
1696 struct extent_state *state;
1697 int ret = 0;
1698
cad321ad 1699 spin_lock(&tree->lock);
d1310b2e
CM
1700 /*
1701 * this search will find all the extents that end after
1702 * our range starts.
1703 */
80ea96b1 1704 node = tree_search(tree, start);
2b114d1d 1705 if (!node) {
d1310b2e
CM
1706 ret = -ENOENT;
1707 goto out;
1708 }
1709 state = rb_entry(node, struct extent_state, rb_node);
1710 if (state->start != start) {
1711 ret = -ENOENT;
1712 goto out;
1713 }
1714 *private = state->private;
1715out:
cad321ad 1716 spin_unlock(&tree->lock);
d1310b2e
CM
1717 return ret;
1718}
1719
1720/*
1721 * searches a range in the state tree for a given mask.
70dec807 1722 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1723 * has the bits set. Otherwise, 1 is returned if any bit in the
1724 * range is found set.
1725 */
1726int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
9655d298 1727 int bits, int filled, struct extent_state *cached)
d1310b2e
CM
1728{
1729 struct extent_state *state = NULL;
1730 struct rb_node *node;
1731 int bitset = 0;
d1310b2e 1732
cad321ad 1733 spin_lock(&tree->lock);
df98b6e2
JB
1734 if (cached && cached->tree && cached->start <= start &&
1735 cached->end > start)
9655d298
CM
1736 node = &cached->rb_node;
1737 else
1738 node = tree_search(tree, start);
d1310b2e
CM
1739 while (node && start <= end) {
1740 state = rb_entry(node, struct extent_state, rb_node);
1741
1742 if (filled && state->start > start) {
1743 bitset = 0;
1744 break;
1745 }
1746
1747 if (state->start > end)
1748 break;
1749
1750 if (state->state & bits) {
1751 bitset = 1;
1752 if (!filled)
1753 break;
1754 } else if (filled) {
1755 bitset = 0;
1756 break;
1757 }
46562cec
CM
1758
1759 if (state->end == (u64)-1)
1760 break;
1761
d1310b2e
CM
1762 start = state->end + 1;
1763 if (start > end)
1764 break;
1765 node = rb_next(node);
1766 if (!node) {
1767 if (filled)
1768 bitset = 0;
1769 break;
1770 }
1771 }
cad321ad 1772 spin_unlock(&tree->lock);
d1310b2e
CM
1773 return bitset;
1774}
d1310b2e
CM
1775
1776/*
1777 * helper function to set a given page up to date if all the
1778 * extents in the tree for that page are up to date
1779 */
1780static int check_page_uptodate(struct extent_io_tree *tree,
1781 struct page *page)
1782{
1783 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1784 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1785 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
d1310b2e
CM
1786 SetPageUptodate(page);
1787 return 0;
1788}
1789
1790/*
1791 * helper function to unlock a page if all the extents in the tree
1792 * for that page are unlocked
1793 */
1794static int check_page_locked(struct extent_io_tree *tree,
1795 struct page *page)
1796{
1797 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1798 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1799 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
d1310b2e
CM
1800 unlock_page(page);
1801 return 0;
1802}
1803
1804/*
1805 * helper function to end page writeback if all the extents
1806 * in the tree for that page are done with writeback
1807 */
1808static int check_page_writeback(struct extent_io_tree *tree,
1809 struct page *page)
1810{
1edbb734 1811 end_page_writeback(page);
d1310b2e
CM
1812 return 0;
1813}
1814
4a54c8c1
JS
1815/*
1816 * When IO fails, either with EIO or csum verification fails, we
1817 * try other mirrors that might have a good copy of the data. This
1818 * io_failure_record is used to record state as we go through all the
1819 * mirrors. If another mirror has good data, the page is set up to date
1820 * and things continue. If a good mirror can't be found, the original
1821 * bio end_io callback is called to indicate things have failed.
1822 */
1823struct io_failure_record {
1824 struct page *page;
1825 u64 start;
1826 u64 len;
1827 u64 logical;
1828 unsigned long bio_flags;
1829 int this_mirror;
1830 int failed_mirror;
1831 int in_validation;
1832};
1833
1834static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1835 int did_repair)
1836{
1837 int ret;
1838 int err = 0;
1839 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1840
1841 set_state_private(failure_tree, rec->start, 0);
1842 ret = clear_extent_bits(failure_tree, rec->start,
1843 rec->start + rec->len - 1,
1844 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1845 if (ret)
1846 err = ret;
1847
1848 if (did_repair) {
1849 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1850 rec->start + rec->len - 1,
1851 EXTENT_DAMAGED, GFP_NOFS);
1852 if (ret && !err)
1853 err = ret;
1854 }
1855
1856 kfree(rec);
1857 return err;
1858}
1859
1860static void repair_io_failure_callback(struct bio *bio, int err)
1861{
1862 complete(bio->bi_private);
1863}
1864
1865/*
1866 * this bypasses the standard btrfs submit functions deliberately, as
1867 * the standard behavior is to write all copies in a raid setup. here we only
1868 * want to write the one bad copy. so we do the mapping for ourselves and issue
1869 * submit_bio directly.
1870 * to avoid any synchonization issues, wait for the data after writing, which
1871 * actually prevents the read that triggered the error from finishing.
1872 * currently, there can be no more than two copies of every data bit. thus,
1873 * exactly one rewrite is required.
1874 */
1875int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1876 u64 length, u64 logical, struct page *page,
1877 int mirror_num)
1878{
1879 struct bio *bio;
1880 struct btrfs_device *dev;
1881 DECLARE_COMPLETION_ONSTACK(compl);
1882 u64 map_length = 0;
1883 u64 sector;
1884 struct btrfs_bio *bbio = NULL;
1885 int ret;
1886
1887 BUG_ON(!mirror_num);
1888
1889 bio = bio_alloc(GFP_NOFS, 1);
1890 if (!bio)
1891 return -EIO;
1892 bio->bi_private = &compl;
1893 bio->bi_end_io = repair_io_failure_callback;
1894 bio->bi_size = 0;
1895 map_length = length;
1896
1897 ret = btrfs_map_block(map_tree, WRITE, logical,
1898 &map_length, &bbio, mirror_num);
1899 if (ret) {
1900 bio_put(bio);
1901 return -EIO;
1902 }
1903 BUG_ON(mirror_num != bbio->mirror_num);
1904 sector = bbio->stripes[mirror_num-1].physical >> 9;
1905 bio->bi_sector = sector;
1906 dev = bbio->stripes[mirror_num-1].dev;
1907 kfree(bbio);
1908 if (!dev || !dev->bdev || !dev->writeable) {
1909 bio_put(bio);
1910 return -EIO;
1911 }
1912 bio->bi_bdev = dev->bdev;
1913 bio_add_page(bio, page, length, start-page_offset(page));
21adbd5c 1914 btrfsic_submit_bio(WRITE_SYNC, bio);
4a54c8c1
JS
1915 wait_for_completion(&compl);
1916
1917 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1918 /* try to remap that extent elsewhere? */
1919 bio_put(bio);
1920 return -EIO;
1921 }
1922
1923 printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
1924 "sector %llu)\n", page->mapping->host->i_ino, start,
1925 dev->name, sector);
1926
1927 bio_put(bio);
1928 return 0;
1929}
1930
1931/*
1932 * each time an IO finishes, we do a fast check in the IO failure tree
1933 * to see if we need to process or clean up an io_failure_record
1934 */
1935static int clean_io_failure(u64 start, struct page *page)
1936{
1937 u64 private;
1938 u64 private_failure;
1939 struct io_failure_record *failrec;
1940 struct btrfs_mapping_tree *map_tree;
1941 struct extent_state *state;
1942 int num_copies;
1943 int did_repair = 0;
1944 int ret;
1945 struct inode *inode = page->mapping->host;
1946
1947 private = 0;
1948 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1949 (u64)-1, 1, EXTENT_DIRTY, 0);
1950 if (!ret)
1951 return 0;
1952
1953 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1954 &private_failure);
1955 if (ret)
1956 return 0;
1957
1958 failrec = (struct io_failure_record *)(unsigned long) private_failure;
1959 BUG_ON(!failrec->this_mirror);
1960
1961 if (failrec->in_validation) {
1962 /* there was no real error, just free the record */
1963 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1964 failrec->start);
1965 did_repair = 1;
1966 goto out;
1967 }
1968
1969 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1970 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1971 failrec->start,
1972 EXTENT_LOCKED);
1973 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1974
1975 if (state && state->start == failrec->start) {
1976 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
1977 num_copies = btrfs_num_copies(map_tree, failrec->logical,
1978 failrec->len);
1979 if (num_copies > 1) {
1980 ret = repair_io_failure(map_tree, start, failrec->len,
1981 failrec->logical, page,
1982 failrec->failed_mirror);
1983 did_repair = !ret;
1984 }
1985 }
1986
1987out:
1988 if (!ret)
1989 ret = free_io_failure(inode, failrec, did_repair);
1990
1991 return ret;
1992}
1993
1994/*
1995 * this is a generic handler for readpage errors (default
1996 * readpage_io_failed_hook). if other copies exist, read those and write back
1997 * good data to the failed position. does not investigate in remapping the
1998 * failed extent elsewhere, hoping the device will be smart enough to do this as
1999 * needed
2000 */
2001
2002static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2003 u64 start, u64 end, int failed_mirror,
2004 struct extent_state *state)
2005{
2006 struct io_failure_record *failrec = NULL;
2007 u64 private;
2008 struct extent_map *em;
2009 struct inode *inode = page->mapping->host;
2010 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2011 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2012 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2013 struct bio *bio;
2014 int num_copies;
2015 int ret;
2016 int read_mode;
2017 u64 logical;
2018
2019 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2020
2021 ret = get_state_private(failure_tree, start, &private);
2022 if (ret) {
2023 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2024 if (!failrec)
2025 return -ENOMEM;
2026 failrec->start = start;
2027 failrec->len = end - start + 1;
2028 failrec->this_mirror = 0;
2029 failrec->bio_flags = 0;
2030 failrec->in_validation = 0;
2031
2032 read_lock(&em_tree->lock);
2033 em = lookup_extent_mapping(em_tree, start, failrec->len);
2034 if (!em) {
2035 read_unlock(&em_tree->lock);
2036 kfree(failrec);
2037 return -EIO;
2038 }
2039
2040 if (em->start > start || em->start + em->len < start) {
2041 free_extent_map(em);
2042 em = NULL;
2043 }
2044 read_unlock(&em_tree->lock);
2045
2046 if (!em || IS_ERR(em)) {
2047 kfree(failrec);
2048 return -EIO;
2049 }
2050 logical = start - em->start;
2051 logical = em->block_start + logical;
2052 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2053 logical = em->block_start;
2054 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2055 extent_set_compress_type(&failrec->bio_flags,
2056 em->compress_type);
2057 }
2058 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2059 "len=%llu\n", logical, start, failrec->len);
2060 failrec->logical = logical;
2061 free_extent_map(em);
2062
2063 /* set the bits in the private failure tree */
2064 ret = set_extent_bits(failure_tree, start, end,
2065 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2066 if (ret >= 0)
2067 ret = set_state_private(failure_tree, start,
2068 (u64)(unsigned long)failrec);
2069 /* set the bits in the inode's tree */
2070 if (ret >= 0)
2071 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2072 GFP_NOFS);
2073 if (ret < 0) {
2074 kfree(failrec);
2075 return ret;
2076 }
2077 } else {
2078 failrec = (struct io_failure_record *)(unsigned long)private;
2079 pr_debug("bio_readpage_error: (found) logical=%llu, "
2080 "start=%llu, len=%llu, validation=%d\n",
2081 failrec->logical, failrec->start, failrec->len,
2082 failrec->in_validation);
2083 /*
2084 * when data can be on disk more than twice, add to failrec here
2085 * (e.g. with a list for failed_mirror) to make
2086 * clean_io_failure() clean all those errors at once.
2087 */
2088 }
2089 num_copies = btrfs_num_copies(
2090 &BTRFS_I(inode)->root->fs_info->mapping_tree,
2091 failrec->logical, failrec->len);
2092 if (num_copies == 1) {
2093 /*
2094 * we only have a single copy of the data, so don't bother with
2095 * all the retry and error correction code that follows. no
2096 * matter what the error is, it is very likely to persist.
2097 */
2098 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2099 "state=%p, num_copies=%d, next_mirror %d, "
2100 "failed_mirror %d\n", state, num_copies,
2101 failrec->this_mirror, failed_mirror);
2102 free_io_failure(inode, failrec, 0);
2103 return -EIO;
2104 }
2105
2106 if (!state) {
2107 spin_lock(&tree->lock);
2108 state = find_first_extent_bit_state(tree, failrec->start,
2109 EXTENT_LOCKED);
2110 if (state && state->start != failrec->start)
2111 state = NULL;
2112 spin_unlock(&tree->lock);
2113 }
2114
2115 /*
2116 * there are two premises:
2117 * a) deliver good data to the caller
2118 * b) correct the bad sectors on disk
2119 */
2120 if (failed_bio->bi_vcnt > 1) {
2121 /*
2122 * to fulfill b), we need to know the exact failing sectors, as
2123 * we don't want to rewrite any more than the failed ones. thus,
2124 * we need separate read requests for the failed bio
2125 *
2126 * if the following BUG_ON triggers, our validation request got
2127 * merged. we need separate requests for our algorithm to work.
2128 */
2129 BUG_ON(failrec->in_validation);
2130 failrec->in_validation = 1;
2131 failrec->this_mirror = failed_mirror;
2132 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2133 } else {
2134 /*
2135 * we're ready to fulfill a) and b) alongside. get a good copy
2136 * of the failed sector and if we succeed, we have setup
2137 * everything for repair_io_failure to do the rest for us.
2138 */
2139 if (failrec->in_validation) {
2140 BUG_ON(failrec->this_mirror != failed_mirror);
2141 failrec->in_validation = 0;
2142 failrec->this_mirror = 0;
2143 }
2144 failrec->failed_mirror = failed_mirror;
2145 failrec->this_mirror++;
2146 if (failrec->this_mirror == failed_mirror)
2147 failrec->this_mirror++;
2148 read_mode = READ_SYNC;
2149 }
2150
2151 if (!state || failrec->this_mirror > num_copies) {
2152 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2153 "next_mirror %d, failed_mirror %d\n", state,
2154 num_copies, failrec->this_mirror, failed_mirror);
2155 free_io_failure(inode, failrec, 0);
2156 return -EIO;
2157 }
2158
2159 bio = bio_alloc(GFP_NOFS, 1);
2160 bio->bi_private = state;
2161 bio->bi_end_io = failed_bio->bi_end_io;
2162 bio->bi_sector = failrec->logical >> 9;
2163 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2164 bio->bi_size = 0;
2165
2166 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2167
2168 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2169 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2170 failrec->this_mirror, num_copies, failrec->in_validation);
2171
013bd4c3
TI
2172 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2173 failrec->this_mirror,
2174 failrec->bio_flags, 0);
2175 return ret;
4a54c8c1
JS
2176}
2177
d1310b2e
CM
2178/* lots and lots of room for performance fixes in the end_bio funcs */
2179
87826df0
JM
2180int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2181{
2182 int uptodate = (err == 0);
2183 struct extent_io_tree *tree;
2184 int ret;
2185
2186 tree = &BTRFS_I(page->mapping->host)->io_tree;
2187
2188 if (tree->ops && tree->ops->writepage_end_io_hook) {
2189 ret = tree->ops->writepage_end_io_hook(page, start,
2190 end, NULL, uptodate);
2191 if (ret)
2192 uptodate = 0;
2193 }
2194
2195 if (!uptodate && tree->ops &&
2196 tree->ops->writepage_io_failed_hook) {
2197 ret = tree->ops->writepage_io_failed_hook(NULL, page,
2198 start, end, NULL);
2199 /* Writeback already completed */
2200 if (ret == 0)
2201 return 1;
355808c2 2202 BUG_ON(ret < 0);
87826df0
JM
2203 }
2204
2205 if (!uptodate) {
2206 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2207 ClearPageUptodate(page);
2208 SetPageError(page);
2209 }
2210 return 0;
2211}
2212
d1310b2e
CM
2213/*
2214 * after a writepage IO is done, we need to:
2215 * clear the uptodate bits on error
2216 * clear the writeback bits in the extent tree for this IO
2217 * end_page_writeback if the page has no more pending IO
2218 *
2219 * Scheduling is not allowed, so the extent state tree is expected
2220 * to have one and only one object corresponding to this IO.
2221 */
d1310b2e 2222static void end_bio_extent_writepage(struct bio *bio, int err)
d1310b2e 2223{
d1310b2e 2224 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 2225 struct extent_io_tree *tree;
d1310b2e
CM
2226 u64 start;
2227 u64 end;
2228 int whole_page;
2229
d1310b2e
CM
2230 do {
2231 struct page *page = bvec->bv_page;
902b22f3
DW
2232 tree = &BTRFS_I(page->mapping->host)->io_tree;
2233
d1310b2e
CM
2234 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2235 bvec->bv_offset;
2236 end = start + bvec->bv_len - 1;
2237
2238 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2239 whole_page = 1;
2240 else
2241 whole_page = 0;
2242
2243 if (--bvec >= bio->bi_io_vec)
2244 prefetchw(&bvec->bv_page->flags);
1259ab75 2245
87826df0
JM
2246 if (end_extent_writepage(page, err, start, end))
2247 continue;
70dec807 2248
d1310b2e
CM
2249 if (whole_page)
2250 end_page_writeback(page);
2251 else
2252 check_page_writeback(tree, page);
d1310b2e 2253 } while (bvec >= bio->bi_io_vec);
2b1f55b0 2254
d1310b2e 2255 bio_put(bio);
d1310b2e
CM
2256}
2257
2258/*
2259 * after a readpage IO is done, we need to:
2260 * clear the uptodate bits on error
2261 * set the uptodate bits if things worked
2262 * set the page up to date if all extents in the tree are uptodate
2263 * clear the lock bit in the extent tree
2264 * unlock the page if there are no other extents locked for it
2265 *
2266 * Scheduling is not allowed, so the extent state tree is expected
2267 * to have one and only one object corresponding to this IO.
2268 */
d1310b2e 2269static void end_bio_extent_readpage(struct bio *bio, int err)
d1310b2e
CM
2270{
2271 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4125bf76
CM
2272 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2273 struct bio_vec *bvec = bio->bi_io_vec;
902b22f3 2274 struct extent_io_tree *tree;
d1310b2e
CM
2275 u64 start;
2276 u64 end;
2277 int whole_page;
2278 int ret;
2279
d20f7043
CM
2280 if (err)
2281 uptodate = 0;
2282
d1310b2e
CM
2283 do {
2284 struct page *page = bvec->bv_page;
507903b8
AJ
2285 struct extent_state *cached = NULL;
2286 struct extent_state *state;
2287
4a54c8c1
JS
2288 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2289 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2290 (long int)bio->bi_bdev);
902b22f3
DW
2291 tree = &BTRFS_I(page->mapping->host)->io_tree;
2292
d1310b2e
CM
2293 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2294 bvec->bv_offset;
2295 end = start + bvec->bv_len - 1;
2296
2297 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2298 whole_page = 1;
2299 else
2300 whole_page = 0;
2301
4125bf76 2302 if (++bvec <= bvec_end)
d1310b2e
CM
2303 prefetchw(&bvec->bv_page->flags);
2304
507903b8 2305 spin_lock(&tree->lock);
0d399205 2306 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
109b36a2 2307 if (state && state->start == start) {
507903b8
AJ
2308 /*
2309 * take a reference on the state, unlock will drop
2310 * the ref
2311 */
2312 cache_state(state, &cached);
2313 }
2314 spin_unlock(&tree->lock);
2315
d1310b2e 2316 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
70dec807 2317 ret = tree->ops->readpage_end_io_hook(page, start, end,
507903b8 2318 state);
d1310b2e
CM
2319 if (ret)
2320 uptodate = 0;
4a54c8c1
JS
2321 else
2322 clean_io_failure(start, page);
d1310b2e 2323 }
4a54c8c1 2324 if (!uptodate) {
32240a91
JS
2325 int failed_mirror;
2326 failed_mirror = (int)(unsigned long)bio->bi_bdev;
f4a8e656
JS
2327 /*
2328 * The generic bio_readpage_error handles errors the
2329 * following way: If possible, new read requests are
2330 * created and submitted and will end up in
2331 * end_bio_extent_readpage as well (if we're lucky, not
2332 * in the !uptodate case). In that case it returns 0 and
2333 * we just go on with the next page in our bio. If it
2334 * can't handle the error it will return -EIO and we
2335 * remain responsible for that page.
2336 */
2337 ret = bio_readpage_error(bio, page, start, end,
2338 failed_mirror, NULL);
7e38326f 2339 if (ret == 0) {
f4a8e656 2340error_handled:
3b951516
CM
2341 uptodate =
2342 test_bit(BIO_UPTODATE, &bio->bi_flags);
d20f7043
CM
2343 if (err)
2344 uptodate = 0;
507903b8 2345 uncache_state(&cached);
7e38326f
CM
2346 continue;
2347 }
f4a8e656
JS
2348 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2349 ret = tree->ops->readpage_io_failed_hook(
2350 bio, page, start, end,
2351 failed_mirror, state);
2352 if (ret == 0)
2353 goto error_handled;
2354 }
355808c2 2355 BUG_ON(ret < 0);
7e38326f 2356 }
d1310b2e 2357
771ed689 2358 if (uptodate) {
507903b8 2359 set_extent_uptodate(tree, start, end, &cached,
902b22f3 2360 GFP_ATOMIC);
771ed689 2361 }
507903b8 2362 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
d1310b2e 2363
70dec807
CM
2364 if (whole_page) {
2365 if (uptodate) {
2366 SetPageUptodate(page);
2367 } else {
2368 ClearPageUptodate(page);
2369 SetPageError(page);
2370 }
d1310b2e 2371 unlock_page(page);
70dec807
CM
2372 } else {
2373 if (uptodate) {
2374 check_page_uptodate(tree, page);
2375 } else {
2376 ClearPageUptodate(page);
2377 SetPageError(page);
2378 }
d1310b2e 2379 check_page_locked(tree, page);
70dec807 2380 }
4125bf76 2381 } while (bvec <= bvec_end);
d1310b2e
CM
2382
2383 bio_put(bio);
d1310b2e
CM
2384}
2385
88f794ed
MX
2386struct bio *
2387btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2388 gfp_t gfp_flags)
d1310b2e
CM
2389{
2390 struct bio *bio;
2391
2392 bio = bio_alloc(gfp_flags, nr_vecs);
2393
2394 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2395 while (!bio && (nr_vecs /= 2))
2396 bio = bio_alloc(gfp_flags, nr_vecs);
2397 }
2398
2399 if (bio) {
e1c4b745 2400 bio->bi_size = 0;
d1310b2e
CM
2401 bio->bi_bdev = bdev;
2402 bio->bi_sector = first_sector;
2403 }
2404 return bio;
2405}
2406
355808c2
JM
2407static int __must_check submit_one_bio(int rw, struct bio *bio,
2408 int mirror_num, unsigned long bio_flags)
d1310b2e 2409{
d1310b2e 2410 int ret = 0;
70dec807
CM
2411 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2412 struct page *page = bvec->bv_page;
2413 struct extent_io_tree *tree = bio->bi_private;
70dec807 2414 u64 start;
70dec807
CM
2415
2416 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
70dec807 2417
902b22f3 2418 bio->bi_private = NULL;
d1310b2e
CM
2419
2420 bio_get(bio);
2421
065631f6 2422 if (tree->ops && tree->ops->submit_bio_hook)
6b82ce8d 2423 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
eaf25d93 2424 mirror_num, bio_flags, start);
0b86a832 2425 else
21adbd5c 2426 btrfsic_submit_bio(rw, bio);
4a54c8c1 2427
d1310b2e
CM
2428 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2429 ret = -EOPNOTSUPP;
2430 bio_put(bio);
2431 return ret;
2432}
2433
3444a972
JM
2434static int merge_bio(struct extent_io_tree *tree, struct page *page,
2435 unsigned long offset, size_t size, struct bio *bio,
2436 unsigned long bio_flags)
2437{
2438 int ret = 0;
2439 if (tree->ops && tree->ops->merge_bio_hook)
2440 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2441 bio_flags);
2442 BUG_ON(ret < 0);
2443 return ret;
2444
2445}
2446
d1310b2e
CM
2447static int submit_extent_page(int rw, struct extent_io_tree *tree,
2448 struct page *page, sector_t sector,
2449 size_t size, unsigned long offset,
2450 struct block_device *bdev,
2451 struct bio **bio_ret,
2452 unsigned long max_pages,
f188591e 2453 bio_end_io_t end_io_func,
c8b97818
CM
2454 int mirror_num,
2455 unsigned long prev_bio_flags,
2456 unsigned long bio_flags)
d1310b2e
CM
2457{
2458 int ret = 0;
2459 struct bio *bio;
2460 int nr;
c8b97818
CM
2461 int contig = 0;
2462 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2463 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
5b050f04 2464 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
d1310b2e
CM
2465
2466 if (bio_ret && *bio_ret) {
2467 bio = *bio_ret;
c8b97818
CM
2468 if (old_compressed)
2469 contig = bio->bi_sector == sector;
2470 else
2471 contig = bio->bi_sector + (bio->bi_size >> 9) ==
2472 sector;
2473
2474 if (prev_bio_flags != bio_flags || !contig ||
3444a972 2475 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
c8b97818
CM
2476 bio_add_page(bio, page, page_size, offset) < page_size) {
2477 ret = submit_one_bio(rw, bio, mirror_num,
2478 prev_bio_flags);
355808c2 2479 BUG_ON(ret < 0);
d1310b2e
CM
2480 bio = NULL;
2481 } else {
2482 return 0;
2483 }
2484 }
c8b97818
CM
2485 if (this_compressed)
2486 nr = BIO_MAX_PAGES;
2487 else
2488 nr = bio_get_nr_vecs(bdev);
2489
88f794ed 2490 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
5df67083
TI
2491 if (!bio)
2492 return -ENOMEM;
70dec807 2493
c8b97818 2494 bio_add_page(bio, page, page_size, offset);
d1310b2e
CM
2495 bio->bi_end_io = end_io_func;
2496 bio->bi_private = tree;
70dec807 2497
d397712b 2498 if (bio_ret)
d1310b2e 2499 *bio_ret = bio;
355808c2 2500 else {
c8b97818 2501 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
355808c2
JM
2502 BUG_ON(ret < 0);
2503 }
d1310b2e
CM
2504
2505 return ret;
2506}
2507
2508void set_page_extent_mapped(struct page *page)
2509{
2510 if (!PagePrivate(page)) {
2511 SetPagePrivate(page);
d1310b2e 2512 page_cache_get(page);
6af118ce 2513 set_page_private(page, EXTENT_PAGE_PRIVATE);
d1310b2e
CM
2514 }
2515}
2516
b2950863 2517static void set_page_extent_head(struct page *page, unsigned long len)
d1310b2e 2518{
eb14ab8e 2519 WARN_ON(!PagePrivate(page));
d1310b2e
CM
2520 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
2521}
2522
2523/*
2524 * basic readpage implementation. Locked extent state structs are inserted
2525 * into the tree that are removed when the IO is done (by the end_io
2526 * handlers)
2527 */
2528static int __extent_read_full_page(struct extent_io_tree *tree,
2529 struct page *page,
2530 get_extent_t *get_extent,
c8b97818
CM
2531 struct bio **bio, int mirror_num,
2532 unsigned long *bio_flags)
d1310b2e
CM
2533{
2534 struct inode *inode = page->mapping->host;
2535 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2536 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2537 u64 end;
2538 u64 cur = start;
2539 u64 extent_offset;
2540 u64 last_byte = i_size_read(inode);
2541 u64 block_start;
2542 u64 cur_end;
2543 sector_t sector;
2544 struct extent_map *em;
2545 struct block_device *bdev;
11c65dcc 2546 struct btrfs_ordered_extent *ordered;
d1310b2e
CM
2547 int ret;
2548 int nr = 0;
306e16ce 2549 size_t pg_offset = 0;
d1310b2e 2550 size_t iosize;
c8b97818 2551 size_t disk_io_size;
d1310b2e 2552 size_t blocksize = inode->i_sb->s_blocksize;
c8b97818 2553 unsigned long this_bio_flag = 0;
d1310b2e
CM
2554
2555 set_page_extent_mapped(page);
2556
90a887c9
DM
2557 if (!PageUptodate(page)) {
2558 if (cleancache_get_page(page) == 0) {
2559 BUG_ON(blocksize != PAGE_SIZE);
2560 goto out;
2561 }
2562 }
2563
d1310b2e 2564 end = page_end;
11c65dcc
JB
2565 while (1) {
2566 lock_extent(tree, start, end, GFP_NOFS);
2567 ordered = btrfs_lookup_ordered_extent(inode, start);
2568 if (!ordered)
2569 break;
2570 unlock_extent(tree, start, end, GFP_NOFS);
2571 btrfs_start_ordered_extent(inode, ordered, 1);
2572 btrfs_put_ordered_extent(ordered);
2573 }
d1310b2e 2574
c8b97818
CM
2575 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2576 char *userpage;
2577 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2578
2579 if (zero_offset) {
2580 iosize = PAGE_CACHE_SIZE - zero_offset;
2581 userpage = kmap_atomic(page, KM_USER0);
2582 memset(userpage + zero_offset, 0, iosize);
2583 flush_dcache_page(page);
2584 kunmap_atomic(userpage, KM_USER0);
2585 }
2586 }
d1310b2e
CM
2587 while (cur <= end) {
2588 if (cur >= last_byte) {
2589 char *userpage;
507903b8
AJ
2590 struct extent_state *cached = NULL;
2591
306e16ce 2592 iosize = PAGE_CACHE_SIZE - pg_offset;
d1310b2e 2593 userpage = kmap_atomic(page, KM_USER0);
306e16ce 2594 memset(userpage + pg_offset, 0, iosize);
d1310b2e
CM
2595 flush_dcache_page(page);
2596 kunmap_atomic(userpage, KM_USER0);
2597 set_extent_uptodate(tree, cur, cur + iosize - 1,
507903b8
AJ
2598 &cached, GFP_NOFS);
2599 unlock_extent_cached(tree, cur, cur + iosize - 1,
2600 &cached, GFP_NOFS);
d1310b2e
CM
2601 break;
2602 }
306e16ce 2603 em = get_extent(inode, page, pg_offset, cur,
d1310b2e 2604 end - cur + 1, 0);
c704005d 2605 if (IS_ERR_OR_NULL(em)) {
d1310b2e
CM
2606 SetPageError(page);
2607 unlock_extent(tree, cur, end, GFP_NOFS);
2608 break;
2609 }
d1310b2e
CM
2610 extent_offset = cur - em->start;
2611 BUG_ON(extent_map_end(em) <= cur);
2612 BUG_ON(end < cur);
2613
261507a0 2614 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
c8b97818 2615 this_bio_flag = EXTENT_BIO_COMPRESSED;
261507a0
LZ
2616 extent_set_compress_type(&this_bio_flag,
2617 em->compress_type);
2618 }
c8b97818 2619
d1310b2e
CM
2620 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2621 cur_end = min(extent_map_end(em) - 1, end);
2622 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
c8b97818
CM
2623 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2624 disk_io_size = em->block_len;
2625 sector = em->block_start >> 9;
2626 } else {
2627 sector = (em->block_start + extent_offset) >> 9;
2628 disk_io_size = iosize;
2629 }
d1310b2e
CM
2630 bdev = em->bdev;
2631 block_start = em->block_start;
d899e052
YZ
2632 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2633 block_start = EXTENT_MAP_HOLE;
d1310b2e
CM
2634 free_extent_map(em);
2635 em = NULL;
2636
2637 /* we've found a hole, just zero and go on */
2638 if (block_start == EXTENT_MAP_HOLE) {
2639 char *userpage;
507903b8
AJ
2640 struct extent_state *cached = NULL;
2641
d1310b2e 2642 userpage = kmap_atomic(page, KM_USER0);
306e16ce 2643 memset(userpage + pg_offset, 0, iosize);
d1310b2e
CM
2644 flush_dcache_page(page);
2645 kunmap_atomic(userpage, KM_USER0);
2646
2647 set_extent_uptodate(tree, cur, cur + iosize - 1,
507903b8
AJ
2648 &cached, GFP_NOFS);
2649 unlock_extent_cached(tree, cur, cur + iosize - 1,
2650 &cached, GFP_NOFS);
d1310b2e 2651 cur = cur + iosize;
306e16ce 2652 pg_offset += iosize;
d1310b2e
CM
2653 continue;
2654 }
2655 /* the get_extent function already copied into the page */
9655d298
CM
2656 if (test_range_bit(tree, cur, cur_end,
2657 EXTENT_UPTODATE, 1, NULL)) {
a1b32a59 2658 check_page_uptodate(tree, page);
d1310b2e
CM
2659 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2660 cur = cur + iosize;
306e16ce 2661 pg_offset += iosize;
d1310b2e
CM
2662 continue;
2663 }
70dec807
CM
2664 /* we have an inline extent but it didn't get marked up
2665 * to date. Error out
2666 */
2667 if (block_start == EXTENT_MAP_INLINE) {
2668 SetPageError(page);
2669 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2670 cur = cur + iosize;
306e16ce 2671 pg_offset += iosize;
70dec807
CM
2672 continue;
2673 }
d1310b2e
CM
2674
2675 ret = 0;
2676 if (tree->ops && tree->ops->readpage_io_hook) {
2677 ret = tree->ops->readpage_io_hook(page, cur,
2678 cur + iosize - 1);
2679 }
2680 if (!ret) {
89642229
CM
2681 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2682 pnr -= page->index;
d1310b2e 2683 ret = submit_extent_page(READ, tree, page,
306e16ce 2684 sector, disk_io_size, pg_offset,
89642229 2685 bdev, bio, pnr,
c8b97818
CM
2686 end_bio_extent_readpage, mirror_num,
2687 *bio_flags,
2688 this_bio_flag);
89642229 2689 nr++;
c8b97818 2690 *bio_flags = this_bio_flag;
d1310b2e
CM
2691 }
2692 if (ret)
2693 SetPageError(page);
2694 cur = cur + iosize;
306e16ce 2695 pg_offset += iosize;
d1310b2e 2696 }
90a887c9 2697out:
d1310b2e
CM
2698 if (!nr) {
2699 if (!PageError(page))
2700 SetPageUptodate(page);
2701 unlock_page(page);
2702 }
2703 return 0;
2704}
2705
2706int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
8ddc7d9c 2707 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
2708{
2709 struct bio *bio = NULL;
c8b97818 2710 unsigned long bio_flags = 0;
d1310b2e
CM
2711 int ret;
2712
8ddc7d9c 2713 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
c8b97818 2714 &bio_flags);
355808c2 2715 if (bio) {
8ddc7d9c 2716 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
355808c2
JM
2717 BUG_ON(ret < 0);
2718 }
d1310b2e
CM
2719 return ret;
2720}
d1310b2e 2721
11c8349b
CM
2722static noinline void update_nr_written(struct page *page,
2723 struct writeback_control *wbc,
2724 unsigned long nr_written)
2725{
2726 wbc->nr_to_write -= nr_written;
2727 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2728 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2729 page->mapping->writeback_index = page->index + nr_written;
2730}
2731
d1310b2e
CM
2732/*
2733 * the writepage semantics are similar to regular writepage. extent
2734 * records are inserted to lock ranges in the tree, and as dirty areas
2735 * are found, they are marked writeback. Then the lock bits are removed
2736 * and the end_io handler clears the writeback ranges
2737 */
2738static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2739 void *data)
2740{
2741 struct inode *inode = page->mapping->host;
2742 struct extent_page_data *epd = data;
2743 struct extent_io_tree *tree = epd->tree;
2744 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2745 u64 delalloc_start;
2746 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2747 u64 end;
2748 u64 cur = start;
2749 u64 extent_offset;
2750 u64 last_byte = i_size_read(inode);
2751 u64 block_start;
2752 u64 iosize;
2753 sector_t sector;
2c64c53d 2754 struct extent_state *cached_state = NULL;
d1310b2e
CM
2755 struct extent_map *em;
2756 struct block_device *bdev;
2757 int ret;
2758 int nr = 0;
7f3c74fb 2759 size_t pg_offset = 0;
d1310b2e
CM
2760 size_t blocksize;
2761 loff_t i_size = i_size_read(inode);
2762 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2763 u64 nr_delalloc;
2764 u64 delalloc_end;
c8b97818
CM
2765 int page_started;
2766 int compressed;
ffbd517d 2767 int write_flags;
771ed689 2768 unsigned long nr_written = 0;
9e487107 2769 bool fill_delalloc = true;
d1310b2e 2770
ffbd517d 2771 if (wbc->sync_mode == WB_SYNC_ALL)
721a9602 2772 write_flags = WRITE_SYNC;
ffbd517d
CM
2773 else
2774 write_flags = WRITE;
2775
1abe9b8a 2776 trace___extent_writepage(page, inode, wbc);
2777
d1310b2e 2778 WARN_ON(!PageLocked(page));
bf0da8c1
CM
2779
2780 ClearPageError(page);
2781
7f3c74fb 2782 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
211c17f5 2783 if (page->index > end_index ||
7f3c74fb 2784 (page->index == end_index && !pg_offset)) {
39be25cd 2785 page->mapping->a_ops->invalidatepage(page, 0);
d1310b2e
CM
2786 unlock_page(page);
2787 return 0;
2788 }
2789
2790 if (page->index == end_index) {
2791 char *userpage;
2792
d1310b2e 2793 userpage = kmap_atomic(page, KM_USER0);
7f3c74fb
CM
2794 memset(userpage + pg_offset, 0,
2795 PAGE_CACHE_SIZE - pg_offset);
d1310b2e 2796 kunmap_atomic(userpage, KM_USER0);
211c17f5 2797 flush_dcache_page(page);
d1310b2e 2798 }
7f3c74fb 2799 pg_offset = 0;
d1310b2e
CM
2800
2801 set_page_extent_mapped(page);
2802
9e487107
JB
2803 if (!tree->ops || !tree->ops->fill_delalloc)
2804 fill_delalloc = false;
2805
d1310b2e
CM
2806 delalloc_start = start;
2807 delalloc_end = 0;
c8b97818 2808 page_started = 0;
9e487107 2809 if (!epd->extent_locked && fill_delalloc) {
f85d7d6c 2810 u64 delalloc_to_write = 0;
11c8349b
CM
2811 /*
2812 * make sure the wbc mapping index is at least updated
2813 * to this page.
2814 */
2815 update_nr_written(page, wbc, 0);
2816
d397712b 2817 while (delalloc_end < page_end) {
771ed689 2818 nr_delalloc = find_lock_delalloc_range(inode, tree,
c8b97818
CM
2819 page,
2820 &delalloc_start,
d1310b2e
CM
2821 &delalloc_end,
2822 128 * 1024 * 1024);
771ed689
CM
2823 if (nr_delalloc == 0) {
2824 delalloc_start = delalloc_end + 1;
2825 continue;
2826 }
013bd4c3
TI
2827 ret = tree->ops->fill_delalloc(inode, page,
2828 delalloc_start,
2829 delalloc_end,
2830 &page_started,
2831 &nr_written);
2832 BUG_ON(ret);
f85d7d6c
CM
2833 /*
2834 * delalloc_end is already one less than the total
2835 * length, so we don't subtract one from
2836 * PAGE_CACHE_SIZE
2837 */
2838 delalloc_to_write += (delalloc_end - delalloc_start +
2839 PAGE_CACHE_SIZE) >>
2840 PAGE_CACHE_SHIFT;
d1310b2e 2841 delalloc_start = delalloc_end + 1;
d1310b2e 2842 }
f85d7d6c
CM
2843 if (wbc->nr_to_write < delalloc_to_write) {
2844 int thresh = 8192;
2845
2846 if (delalloc_to_write < thresh * 2)
2847 thresh = delalloc_to_write;
2848 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2849 thresh);
2850 }
c8b97818 2851
771ed689
CM
2852 /* did the fill delalloc function already unlock and start
2853 * the IO?
2854 */
2855 if (page_started) {
2856 ret = 0;
11c8349b
CM
2857 /*
2858 * we've unlocked the page, so we can't update
2859 * the mapping's writeback index, just update
2860 * nr_to_write.
2861 */
2862 wbc->nr_to_write -= nr_written;
2863 goto done_unlocked;
771ed689 2864 }
c8b97818 2865 }
247e743c 2866 if (tree->ops && tree->ops->writepage_start_hook) {
c8b97818
CM
2867 ret = tree->ops->writepage_start_hook(page, start,
2868 page_end);
87826df0
JM
2869 if (ret) {
2870 /* Fixup worker will requeue */
2871 if (ret == -EBUSY)
2872 wbc->pages_skipped++;
2873 else
2874 redirty_page_for_writepage(wbc, page);
11c8349b 2875 update_nr_written(page, wbc, nr_written);
247e743c 2876 unlock_page(page);
771ed689 2877 ret = 0;
11c8349b 2878 goto done_unlocked;
247e743c
CM
2879 }
2880 }
2881
11c8349b
CM
2882 /*
2883 * we don't want to touch the inode after unlocking the page,
2884 * so we update the mapping writeback index now
2885 */
2886 update_nr_written(page, wbc, nr_written + 1);
771ed689 2887
d1310b2e 2888 end = page_end;
d1310b2e 2889 if (last_byte <= start) {
e6dcd2dc
CM
2890 if (tree->ops && tree->ops->writepage_end_io_hook)
2891 tree->ops->writepage_end_io_hook(page, start,
2892 page_end, NULL, 1);
d1310b2e
CM
2893 goto done;
2894 }
2895
d1310b2e
CM
2896 blocksize = inode->i_sb->s_blocksize;
2897
2898 while (cur <= end) {
2899 if (cur >= last_byte) {
e6dcd2dc
CM
2900 if (tree->ops && tree->ops->writepage_end_io_hook)
2901 tree->ops->writepage_end_io_hook(page, cur,
2902 page_end, NULL, 1);
d1310b2e
CM
2903 break;
2904 }
7f3c74fb 2905 em = epd->get_extent(inode, page, pg_offset, cur,
d1310b2e 2906 end - cur + 1, 1);
c704005d 2907 if (IS_ERR_OR_NULL(em)) {
d1310b2e
CM
2908 SetPageError(page);
2909 break;
2910 }
2911
2912 extent_offset = cur - em->start;
2913 BUG_ON(extent_map_end(em) <= cur);
2914 BUG_ON(end < cur);
2915 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2916 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2917 sector = (em->block_start + extent_offset) >> 9;
2918 bdev = em->bdev;
2919 block_start = em->block_start;
c8b97818 2920 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
d1310b2e
CM
2921 free_extent_map(em);
2922 em = NULL;
2923
c8b97818
CM
2924 /*
2925 * compressed and inline extents are written through other
2926 * paths in the FS
2927 */
2928 if (compressed || block_start == EXTENT_MAP_HOLE ||
d1310b2e 2929 block_start == EXTENT_MAP_INLINE) {
c8b97818
CM
2930 /*
2931 * end_io notification does not happen here for
2932 * compressed extents
2933 */
2934 if (!compressed && tree->ops &&
2935 tree->ops->writepage_end_io_hook)
e6dcd2dc
CM
2936 tree->ops->writepage_end_io_hook(page, cur,
2937 cur + iosize - 1,
2938 NULL, 1);
c8b97818
CM
2939 else if (compressed) {
2940 /* we don't want to end_page_writeback on
2941 * a compressed extent. this happens
2942 * elsewhere
2943 */
2944 nr++;
2945 }
2946
2947 cur += iosize;
7f3c74fb 2948 pg_offset += iosize;
d1310b2e
CM
2949 continue;
2950 }
d1310b2e
CM
2951 /* leave this out until we have a page_mkwrite call */
2952 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
9655d298 2953 EXTENT_DIRTY, 0, NULL)) {
d1310b2e 2954 cur = cur + iosize;
7f3c74fb 2955 pg_offset += iosize;
d1310b2e
CM
2956 continue;
2957 }
c8b97818 2958
d1310b2e
CM
2959 if (tree->ops && tree->ops->writepage_io_hook) {
2960 ret = tree->ops->writepage_io_hook(page, cur,
2961 cur + iosize - 1);
2962 } else {
2963 ret = 0;
2964 }
1259ab75 2965 if (ret) {
d1310b2e 2966 SetPageError(page);
1259ab75 2967 } else {
d1310b2e 2968 unsigned long max_nr = end_index + 1;
7f3c74fb 2969
d1310b2e
CM
2970 set_range_writeback(tree, cur, cur + iosize - 1);
2971 if (!PageWriteback(page)) {
d397712b
CM
2972 printk(KERN_ERR "btrfs warning page %lu not "
2973 "writeback, cur %llu end %llu\n",
2974 page->index, (unsigned long long)cur,
d1310b2e
CM
2975 (unsigned long long)end);
2976 }
2977
ffbd517d
CM
2978 ret = submit_extent_page(write_flags, tree, page,
2979 sector, iosize, pg_offset,
2980 bdev, &epd->bio, max_nr,
c8b97818
CM
2981 end_bio_extent_writepage,
2982 0, 0, 0);
d1310b2e
CM
2983 if (ret)
2984 SetPageError(page);
2985 }
2986 cur = cur + iosize;
7f3c74fb 2987 pg_offset += iosize;
d1310b2e
CM
2988 nr++;
2989 }
2990done:
2991 if (nr == 0) {
2992 /* make sure the mapping tag for page dirty gets cleared */
2993 set_page_writeback(page);
2994 end_page_writeback(page);
2995 }
d1310b2e 2996 unlock_page(page);
771ed689 2997
11c8349b
CM
2998done_unlocked:
2999
2c64c53d
CM
3000 /* drop our reference on any cached states */
3001 free_extent_state(cached_state);
d1310b2e
CM
3002 return 0;
3003}
3004
d1310b2e 3005/**
4bef0848 3006 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
d1310b2e
CM
3007 * @mapping: address space structure to write
3008 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3009 * @writepage: function called for each page
3010 * @data: data passed to writepage function
3011 *
3012 * If a page is already under I/O, write_cache_pages() skips it, even
3013 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3014 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3015 * and msync() need to guarantee that all the data which was dirty at the time
3016 * the call was made get new I/O started against them. If wbc->sync_mode is
3017 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3018 * existing IO to complete.
3019 */
b2950863 3020static int extent_write_cache_pages(struct extent_io_tree *tree,
4bef0848
CM
3021 struct address_space *mapping,
3022 struct writeback_control *wbc,
d2c3f4f6
CM
3023 writepage_t writepage, void *data,
3024 void (*flush_fn)(void *))
d1310b2e 3025{
d1310b2e
CM
3026 int ret = 0;
3027 int done = 0;
f85d7d6c 3028 int nr_to_write_done = 0;
d1310b2e
CM
3029 struct pagevec pvec;
3030 int nr_pages;
3031 pgoff_t index;
3032 pgoff_t end; /* Inclusive */
3033 int scanned = 0;
f7aaa06b 3034 int tag;
d1310b2e 3035
d1310b2e
CM
3036 pagevec_init(&pvec, 0);
3037 if (wbc->range_cyclic) {
3038 index = mapping->writeback_index; /* Start from prev offset */
3039 end = -1;
3040 } else {
3041 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3042 end = wbc->range_end >> PAGE_CACHE_SHIFT;
d1310b2e
CM
3043 scanned = 1;
3044 }
f7aaa06b
JB
3045 if (wbc->sync_mode == WB_SYNC_ALL)
3046 tag = PAGECACHE_TAG_TOWRITE;
3047 else
3048 tag = PAGECACHE_TAG_DIRTY;
d1310b2e 3049retry:
f7aaa06b
JB
3050 if (wbc->sync_mode == WB_SYNC_ALL)
3051 tag_pages_for_writeback(mapping, index, end);
f85d7d6c 3052 while (!done && !nr_to_write_done && (index <= end) &&
f7aaa06b
JB
3053 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3054 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
d1310b2e
CM
3055 unsigned i;
3056
3057 scanned = 1;
3058 for (i = 0; i < nr_pages; i++) {
3059 struct page *page = pvec.pages[i];
3060
3061 /*
3062 * At this point we hold neither mapping->tree_lock nor
3063 * lock on the page itself: the page may be truncated or
3064 * invalidated (changing page->mapping to NULL), or even
3065 * swizzled back from swapper_space to tmpfs file
3066 * mapping
3067 */
01d658f2
CM
3068 if (tree->ops &&
3069 tree->ops->write_cache_pages_lock_hook) {
3070 tree->ops->write_cache_pages_lock_hook(page,
3071 data, flush_fn);
3072 } else {
3073 if (!trylock_page(page)) {
3074 flush_fn(data);
3075 lock_page(page);
3076 }
3077 }
d1310b2e
CM
3078
3079 if (unlikely(page->mapping != mapping)) {
3080 unlock_page(page);
3081 continue;
3082 }
3083
3084 if (!wbc->range_cyclic && page->index > end) {
3085 done = 1;
3086 unlock_page(page);
3087 continue;
3088 }
3089
d2c3f4f6 3090 if (wbc->sync_mode != WB_SYNC_NONE) {
0e6bd956
CM
3091 if (PageWriteback(page))
3092 flush_fn(data);
d1310b2e 3093 wait_on_page_writeback(page);
d2c3f4f6 3094 }
d1310b2e
CM
3095
3096 if (PageWriteback(page) ||
3097 !clear_page_dirty_for_io(page)) {
3098 unlock_page(page);
3099 continue;
3100 }
3101
3102 ret = (*writepage)(page, wbc, data);
3103
3104 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3105 unlock_page(page);
3106 ret = 0;
3107 }
f85d7d6c 3108 if (ret)
d1310b2e 3109 done = 1;
f85d7d6c
CM
3110
3111 /*
3112 * the filesystem may choose to bump up nr_to_write.
3113 * We have to make sure to honor the new nr_to_write
3114 * at any time
3115 */
3116 nr_to_write_done = wbc->nr_to_write <= 0;
d1310b2e
CM
3117 }
3118 pagevec_release(&pvec);
3119 cond_resched();
3120 }
3121 if (!scanned && !done) {
3122 /*
3123 * We hit the last page and there is more work to be done: wrap
3124 * back to the start of the file
3125 */
3126 scanned = 1;
3127 index = 0;
3128 goto retry;
3129 }
d1310b2e
CM
3130 return ret;
3131}
d1310b2e 3132
ffbd517d 3133static void flush_epd_write_bio(struct extent_page_data *epd)
d2c3f4f6 3134{
d2c3f4f6 3135 if (epd->bio) {
355808c2
JM
3136 int rw = WRITE;
3137 int ret;
3138
ffbd517d 3139 if (epd->sync_io)
355808c2
JM
3140 rw = WRITE_SYNC;
3141
3142 ret = submit_one_bio(rw, epd->bio, 0, 0);
3143 BUG_ON(ret < 0);
d2c3f4f6
CM
3144 epd->bio = NULL;
3145 }
3146}
3147
ffbd517d
CM
3148static noinline void flush_write_bio(void *data)
3149{
3150 struct extent_page_data *epd = data;
3151 flush_epd_write_bio(epd);
3152}
3153
d1310b2e
CM
3154int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3155 get_extent_t *get_extent,
3156 struct writeback_control *wbc)
3157{
3158 int ret;
d1310b2e
CM
3159 struct extent_page_data epd = {
3160 .bio = NULL,
3161 .tree = tree,
3162 .get_extent = get_extent,
771ed689 3163 .extent_locked = 0,
ffbd517d 3164 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
d1310b2e 3165 };
d1310b2e 3166
d1310b2e
CM
3167 ret = __extent_writepage(page, wbc, &epd);
3168
ffbd517d 3169 flush_epd_write_bio(&epd);
d1310b2e
CM
3170 return ret;
3171}
d1310b2e 3172
771ed689
CM
3173int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3174 u64 start, u64 end, get_extent_t *get_extent,
3175 int mode)
3176{
3177 int ret = 0;
3178 struct address_space *mapping = inode->i_mapping;
3179 struct page *page;
3180 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3181 PAGE_CACHE_SHIFT;
3182
3183 struct extent_page_data epd = {
3184 .bio = NULL,
3185 .tree = tree,
3186 .get_extent = get_extent,
3187 .extent_locked = 1,
ffbd517d 3188 .sync_io = mode == WB_SYNC_ALL,
771ed689
CM
3189 };
3190 struct writeback_control wbc_writepages = {
771ed689 3191 .sync_mode = mode,
771ed689
CM
3192 .nr_to_write = nr_pages * 2,
3193 .range_start = start,
3194 .range_end = end + 1,
3195 };
3196
d397712b 3197 while (start <= end) {
771ed689
CM
3198 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3199 if (clear_page_dirty_for_io(page))
3200 ret = __extent_writepage(page, &wbc_writepages, &epd);
3201 else {
3202 if (tree->ops && tree->ops->writepage_end_io_hook)
3203 tree->ops->writepage_end_io_hook(page, start,
3204 start + PAGE_CACHE_SIZE - 1,
3205 NULL, 1);
3206 unlock_page(page);
3207 }
3208 page_cache_release(page);
3209 start += PAGE_CACHE_SIZE;
3210 }
3211
ffbd517d 3212 flush_epd_write_bio(&epd);
771ed689
CM
3213 return ret;
3214}
d1310b2e
CM
3215
3216int extent_writepages(struct extent_io_tree *tree,
3217 struct address_space *mapping,
3218 get_extent_t *get_extent,
3219 struct writeback_control *wbc)
3220{
3221 int ret = 0;
3222 struct extent_page_data epd = {
3223 .bio = NULL,
3224 .tree = tree,
3225 .get_extent = get_extent,
771ed689 3226 .extent_locked = 0,
ffbd517d 3227 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
d1310b2e
CM
3228 };
3229
4bef0848 3230 ret = extent_write_cache_pages(tree, mapping, wbc,
d2c3f4f6
CM
3231 __extent_writepage, &epd,
3232 flush_write_bio);
ffbd517d 3233 flush_epd_write_bio(&epd);
d1310b2e
CM
3234 return ret;
3235}
d1310b2e
CM
3236
3237int extent_readpages(struct extent_io_tree *tree,
3238 struct address_space *mapping,
3239 struct list_head *pages, unsigned nr_pages,
3240 get_extent_t get_extent)
3241{
3242 struct bio *bio = NULL;
3243 unsigned page_idx;
c8b97818 3244 unsigned long bio_flags = 0;
d1310b2e 3245
d1310b2e
CM
3246 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3247 struct page *page = list_entry(pages->prev, struct page, lru);
3248
3249 prefetchw(&page->flags);
3250 list_del(&page->lru);
28ecb609 3251 if (!add_to_page_cache_lru(page, mapping,
43e817a1 3252 page->index, GFP_NOFS)) {
f188591e 3253 __extent_read_full_page(tree, page, get_extent,
c8b97818 3254 &bio, 0, &bio_flags);
d1310b2e
CM
3255 }
3256 page_cache_release(page);
3257 }
d1310b2e 3258 BUG_ON(!list_empty(pages));
355808c2
JM
3259 if (bio) {
3260 int ret = submit_one_bio(READ, bio, 0, bio_flags);
3261 BUG_ON(ret < 0);
3262 }
d1310b2e
CM
3263 return 0;
3264}
d1310b2e
CM
3265
3266/*
3267 * basic invalidatepage code, this waits on any locked or writeback
3268 * ranges corresponding to the page, and then deletes any extent state
3269 * records from the tree
3270 */
3271int extent_invalidatepage(struct extent_io_tree *tree,
3272 struct page *page, unsigned long offset)
3273{
2ac55d41 3274 struct extent_state *cached_state = NULL;
d1310b2e
CM
3275 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3276 u64 end = start + PAGE_CACHE_SIZE - 1;
3277 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3278
d397712b 3279 start += (offset + blocksize - 1) & ~(blocksize - 1);
d1310b2e
CM
3280 if (start > end)
3281 return 0;
3282
2ac55d41 3283 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
1edbb734 3284 wait_on_page_writeback(page);
d1310b2e 3285 clear_extent_bit(tree, start, end,
32c00aff
JB
3286 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3287 EXTENT_DO_ACCOUNTING,
2ac55d41 3288 1, 1, &cached_state, GFP_NOFS);
d1310b2e
CM
3289 return 0;
3290}
d1310b2e 3291
7b13b7b1
CM
3292/*
3293 * a helper for releasepage, this tests for areas of the page that
3294 * are locked or under IO and drops the related state bits if it is safe
3295 * to drop the page.
3296 */
3297int try_release_extent_state(struct extent_map_tree *map,
3298 struct extent_io_tree *tree, struct page *page,
3299 gfp_t mask)
3300{
3301 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3302 u64 end = start + PAGE_CACHE_SIZE - 1;
3303 int ret = 1;
3304
211f90e6 3305 if (test_range_bit(tree, start, end,
8b62b72b 3306 EXTENT_IOBITS, 0, NULL))
7b13b7b1
CM
3307 ret = 0;
3308 else {
3309 if ((mask & GFP_NOFS) == GFP_NOFS)
3310 mask = GFP_NOFS;
11ef160f
CM
3311 /*
3312 * at this point we can safely clear everything except the
3313 * locked bit and the nodatasum bit
3314 */
e3f24cc5 3315 ret = clear_extent_bit(tree, start, end,
11ef160f
CM
3316 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3317 0, 0, NULL, mask);
e3f24cc5
CM
3318
3319 /* if clear_extent_bit failed for enomem reasons,
3320 * we can't allow the release to continue.
3321 */
3322 if (ret < 0)
3323 ret = 0;
3324 else
3325 ret = 1;
7b13b7b1
CM
3326 }
3327 return ret;
3328}
7b13b7b1 3329
d1310b2e
CM
3330/*
3331 * a helper for releasepage. As long as there are no locked extents
3332 * in the range corresponding to the page, both state records and extent
3333 * map records are removed
3334 */
3335int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
3336 struct extent_io_tree *tree, struct page *page,
3337 gfp_t mask)
d1310b2e
CM
3338{
3339 struct extent_map *em;
3340 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3341 u64 end = start + PAGE_CACHE_SIZE - 1;
7b13b7b1 3342
70dec807
CM
3343 if ((mask & __GFP_WAIT) &&
3344 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 3345 u64 len;
70dec807 3346 while (start <= end) {
39b5637f 3347 len = end - start + 1;
890871be 3348 write_lock(&map->lock);
39b5637f 3349 em = lookup_extent_mapping(map, start, len);
285190d9 3350 if (!em) {
890871be 3351 write_unlock(&map->lock);
70dec807
CM
3352 break;
3353 }
7f3c74fb
CM
3354 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3355 em->start != start) {
890871be 3356 write_unlock(&map->lock);
70dec807
CM
3357 free_extent_map(em);
3358 break;
3359 }
3360 if (!test_range_bit(tree, em->start,
3361 extent_map_end(em) - 1,
8b62b72b 3362 EXTENT_LOCKED | EXTENT_WRITEBACK,
9655d298 3363 0, NULL)) {
70dec807
CM
3364 remove_extent_mapping(map, em);
3365 /* once for the rb tree */
3366 free_extent_map(em);
3367 }
3368 start = extent_map_end(em);
890871be 3369 write_unlock(&map->lock);
70dec807
CM
3370
3371 /* once for us */
d1310b2e
CM
3372 free_extent_map(em);
3373 }
d1310b2e 3374 }
7b13b7b1 3375 return try_release_extent_state(map, tree, page, mask);
d1310b2e 3376}
d1310b2e 3377
ec29ed5b
CM
3378/*
3379 * helper function for fiemap, which doesn't want to see any holes.
3380 * This maps until we find something past 'last'
3381 */
3382static struct extent_map *get_extent_skip_holes(struct inode *inode,
3383 u64 offset,
3384 u64 last,
3385 get_extent_t *get_extent)
3386{
3387 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3388 struct extent_map *em;
3389 u64 len;
3390
3391 if (offset >= last)
3392 return NULL;
3393
3394 while(1) {
3395 len = last - offset;
3396 if (len == 0)
3397 break;
3398 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3399 em = get_extent(inode, NULL, 0, offset, len, 0);
c704005d 3400 if (IS_ERR_OR_NULL(em))
ec29ed5b
CM
3401 return em;
3402
3403 /* if this isn't a hole return it */
3404 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3405 em->block_start != EXTENT_MAP_HOLE) {
3406 return em;
3407 }
3408
3409 /* this is a hole, advance to the next extent */
3410 offset = extent_map_end(em);
3411 free_extent_map(em);
3412 if (offset >= last)
3413 break;
3414 }
3415 return NULL;
3416}
3417
1506fcc8
YS
3418int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3419 __u64 start, __u64 len, get_extent_t *get_extent)
3420{
975f84fe 3421 int ret = 0;
1506fcc8
YS
3422 u64 off = start;
3423 u64 max = start + len;
3424 u32 flags = 0;
975f84fe
JB
3425 u32 found_type;
3426 u64 last;
ec29ed5b 3427 u64 last_for_get_extent = 0;
1506fcc8 3428 u64 disko = 0;
ec29ed5b 3429 u64 isize = i_size_read(inode);
975f84fe 3430 struct btrfs_key found_key;
1506fcc8 3431 struct extent_map *em = NULL;
2ac55d41 3432 struct extent_state *cached_state = NULL;
975f84fe
JB
3433 struct btrfs_path *path;
3434 struct btrfs_file_extent_item *item;
1506fcc8 3435 int end = 0;
ec29ed5b
CM
3436 u64 em_start = 0;
3437 u64 em_len = 0;
3438 u64 em_end = 0;
1506fcc8 3439 unsigned long emflags;
1506fcc8
YS
3440
3441 if (len == 0)
3442 return -EINVAL;
3443
975f84fe
JB
3444 path = btrfs_alloc_path();
3445 if (!path)
3446 return -ENOMEM;
3447 path->leave_spinning = 1;
3448
4d479cf0
JB
3449 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3450 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3451
ec29ed5b
CM
3452 /*
3453 * lookup the last file extent. We're not using i_size here
3454 * because there might be preallocation past i_size
3455 */
975f84fe 3456 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
33345d01 3457 path, btrfs_ino(inode), -1, 0);
975f84fe
JB
3458 if (ret < 0) {
3459 btrfs_free_path(path);
3460 return ret;
3461 }
3462 WARN_ON(!ret);
3463 path->slots[0]--;
3464 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3465 struct btrfs_file_extent_item);
3466 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3467 found_type = btrfs_key_type(&found_key);
3468
ec29ed5b 3469 /* No extents, but there might be delalloc bits */
33345d01 3470 if (found_key.objectid != btrfs_ino(inode) ||
975f84fe 3471 found_type != BTRFS_EXTENT_DATA_KEY) {
ec29ed5b
CM
3472 /* have to trust i_size as the end */
3473 last = (u64)-1;
3474 last_for_get_extent = isize;
3475 } else {
3476 /*
3477 * remember the start of the last extent. There are a
3478 * bunch of different factors that go into the length of the
3479 * extent, so its much less complex to remember where it started
3480 */
3481 last = found_key.offset;
3482 last_for_get_extent = last + 1;
975f84fe 3483 }
975f84fe
JB
3484 btrfs_free_path(path);
3485
ec29ed5b
CM
3486 /*
3487 * we might have some extents allocated but more delalloc past those
3488 * extents. so, we trust isize unless the start of the last extent is
3489 * beyond isize
3490 */
3491 if (last < isize) {
3492 last = (u64)-1;
3493 last_for_get_extent = isize;
3494 }
3495
2ac55d41
JB
3496 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3497 &cached_state, GFP_NOFS);
ec29ed5b 3498
4d479cf0 3499 em = get_extent_skip_holes(inode, start, last_for_get_extent,
ec29ed5b 3500 get_extent);
1506fcc8
YS
3501 if (!em)
3502 goto out;
3503 if (IS_ERR(em)) {
3504 ret = PTR_ERR(em);
3505 goto out;
3506 }
975f84fe 3507
1506fcc8 3508 while (!end) {
ea8efc74
CM
3509 u64 offset_in_extent;
3510
3511 /* break if the extent we found is outside the range */
3512 if (em->start >= max || extent_map_end(em) < off)
3513 break;
3514
3515 /*
3516 * get_extent may return an extent that starts before our
3517 * requested range. We have to make sure the ranges
3518 * we return to fiemap always move forward and don't
3519 * overlap, so adjust the offsets here
3520 */
3521 em_start = max(em->start, off);
1506fcc8 3522
ea8efc74
CM
3523 /*
3524 * record the offset from the start of the extent
3525 * for adjusting the disk offset below
3526 */
3527 offset_in_extent = em_start - em->start;
ec29ed5b 3528 em_end = extent_map_end(em);
ea8efc74 3529 em_len = em_end - em_start;
ec29ed5b 3530 emflags = em->flags;
1506fcc8
YS
3531 disko = 0;
3532 flags = 0;
3533
ea8efc74
CM
3534 /*
3535 * bump off for our next call to get_extent
3536 */
3537 off = extent_map_end(em);
3538 if (off >= max)
3539 end = 1;
3540
93dbfad7 3541 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
1506fcc8
YS
3542 end = 1;
3543 flags |= FIEMAP_EXTENT_LAST;
93dbfad7 3544 } else if (em->block_start == EXTENT_MAP_INLINE) {
1506fcc8
YS
3545 flags |= (FIEMAP_EXTENT_DATA_INLINE |
3546 FIEMAP_EXTENT_NOT_ALIGNED);
93dbfad7 3547 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
1506fcc8
YS
3548 flags |= (FIEMAP_EXTENT_DELALLOC |
3549 FIEMAP_EXTENT_UNKNOWN);
93dbfad7 3550 } else {
ea8efc74 3551 disko = em->block_start + offset_in_extent;
1506fcc8
YS
3552 }
3553 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3554 flags |= FIEMAP_EXTENT_ENCODED;
3555
1506fcc8
YS
3556 free_extent_map(em);
3557 em = NULL;
ec29ed5b
CM
3558 if ((em_start >= last) || em_len == (u64)-1 ||
3559 (last == (u64)-1 && isize <= em_end)) {
1506fcc8
YS
3560 flags |= FIEMAP_EXTENT_LAST;
3561 end = 1;
3562 }
3563
ec29ed5b
CM
3564 /* now scan forward to see if this is really the last extent. */
3565 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3566 get_extent);
3567 if (IS_ERR(em)) {
3568 ret = PTR_ERR(em);
3569 goto out;
3570 }
3571 if (!em) {
975f84fe
JB
3572 flags |= FIEMAP_EXTENT_LAST;
3573 end = 1;
3574 }
ec29ed5b
CM
3575 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3576 em_len, flags);
3577 if (ret)
3578 goto out_free;
1506fcc8
YS
3579 }
3580out_free:
3581 free_extent_map(em);
3582out:
2ac55d41
JB
3583 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3584 &cached_state, GFP_NOFS);
1506fcc8
YS
3585 return ret;
3586}
3587
4a54c8c1 3588inline struct page *extent_buffer_page(struct extent_buffer *eb,
d1310b2e
CM
3589 unsigned long i)
3590{
3591 struct page *p;
3592 struct address_space *mapping;
3593
3594 if (i == 0)
3595 return eb->first_page;
3596 i += eb->start >> PAGE_CACHE_SHIFT;
3597 mapping = eb->first_page->mapping;
33958dc6
CM
3598 if (!mapping)
3599 return NULL;
0ee0fda0
SW
3600
3601 /*
3602 * extent_buffer_page is only called after pinning the page
3603 * by increasing the reference count. So we know the page must
3604 * be in the radix tree.
3605 */
0ee0fda0 3606 rcu_read_lock();
d1310b2e 3607 p = radix_tree_lookup(&mapping->page_tree, i);
0ee0fda0 3608 rcu_read_unlock();
2b1f55b0 3609
d1310b2e
CM
3610 return p;
3611}
3612
4a54c8c1 3613inline unsigned long num_extent_pages(u64 start, u64 len)
728131d8 3614{
6af118ce
CM
3615 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3616 (start >> PAGE_CACHE_SHIFT);
728131d8
CM
3617}
3618
d1310b2e
CM
3619static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3620 u64 start,
3621 unsigned long len,
3622 gfp_t mask)
3623{
3624 struct extent_buffer *eb = NULL;
3935127c 3625#if LEAK_DEBUG
2d2ae547 3626 unsigned long flags;
4bef0848 3627#endif
d1310b2e 3628
d1310b2e 3629 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
91ca338d
TI
3630 if (eb == NULL)
3631 return NULL;
d1310b2e
CM
3632 eb->start = start;
3633 eb->len = len;
bd681513
CM
3634 rwlock_init(&eb->lock);
3635 atomic_set(&eb->write_locks, 0);
3636 atomic_set(&eb->read_locks, 0);
3637 atomic_set(&eb->blocking_readers, 0);
3638 atomic_set(&eb->blocking_writers, 0);
3639 atomic_set(&eb->spinning_readers, 0);
3640 atomic_set(&eb->spinning_writers, 0);
5b25f70f 3641 eb->lock_nested = 0;
bd681513
CM
3642 init_waitqueue_head(&eb->write_lock_wq);
3643 init_waitqueue_head(&eb->read_lock_wq);
b4ce94de 3644
3935127c 3645#if LEAK_DEBUG
2d2ae547
CM
3646 spin_lock_irqsave(&leak_lock, flags);
3647 list_add(&eb->leak_list, &buffers);
3648 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 3649#endif
d1310b2e
CM
3650 atomic_set(&eb->refs, 1);
3651
3652 return eb;
3653}
3654
3655static void __free_extent_buffer(struct extent_buffer *eb)
3656{
3935127c 3657#if LEAK_DEBUG
2d2ae547
CM
3658 unsigned long flags;
3659 spin_lock_irqsave(&leak_lock, flags);
3660 list_del(&eb->leak_list);
3661 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 3662#endif
d1310b2e
CM
3663 kmem_cache_free(extent_buffer_cache, eb);
3664}
3665
897ca6e9
MX
3666/*
3667 * Helper for releasing extent buffer page.
3668 */
3669static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3670 unsigned long start_idx)
3671{
3672 unsigned long index;
3673 struct page *page;
3674
3675 if (!eb->first_page)
3676 return;
3677
3678 index = num_extent_pages(eb->start, eb->len);
3679 if (start_idx >= index)
3680 return;
3681
3682 do {
3683 index--;
3684 page = extent_buffer_page(eb, index);
3685 if (page)
3686 page_cache_release(page);
3687 } while (index != start_idx);
3688}
3689
3690/*
3691 * Helper for releasing the extent buffer.
3692 */
3693static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3694{
3695 btrfs_release_extent_buffer_page(eb, 0);
3696 __free_extent_buffer(eb);
3697}
3698
d1310b2e
CM
3699struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3700 u64 start, unsigned long len,
ba144192 3701 struct page *page0)
d1310b2e
CM
3702{
3703 unsigned long num_pages = num_extent_pages(start, len);
3704 unsigned long i;
3705 unsigned long index = start >> PAGE_CACHE_SHIFT;
3706 struct extent_buffer *eb;
6af118ce 3707 struct extent_buffer *exists = NULL;
d1310b2e
CM
3708 struct page *p;
3709 struct address_space *mapping = tree->mapping;
3710 int uptodate = 1;
19fe0a8b 3711 int ret;
d1310b2e 3712
19fe0a8b
MX
3713 rcu_read_lock();
3714 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3715 if (eb && atomic_inc_not_zero(&eb->refs)) {
3716 rcu_read_unlock();
0f9dd46c 3717 mark_page_accessed(eb->first_page);
6af118ce
CM
3718 return eb;
3719 }
19fe0a8b 3720 rcu_read_unlock();
6af118ce 3721
ba144192 3722 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
2b114d1d 3723 if (!eb)
d1310b2e
CM
3724 return NULL;
3725
d1310b2e
CM
3726 if (page0) {
3727 eb->first_page = page0;
3728 i = 1;
3729 index++;
3730 page_cache_get(page0);
3731 mark_page_accessed(page0);
3732 set_page_extent_mapped(page0);
d1310b2e 3733 set_page_extent_head(page0, len);
f188591e 3734 uptodate = PageUptodate(page0);
d1310b2e
CM
3735 } else {
3736 i = 0;
3737 }
3738 for (; i < num_pages; i++, index++) {
a6591715 3739 p = find_or_create_page(mapping, index, GFP_NOFS);
d1310b2e
CM
3740 if (!p) {
3741 WARN_ON(1);
6af118ce 3742 goto free_eb;
d1310b2e
CM
3743 }
3744 set_page_extent_mapped(p);
3745 mark_page_accessed(p);
3746 if (i == 0) {
3747 eb->first_page = p;
3748 set_page_extent_head(p, len);
3749 } else {
3750 set_page_private(p, EXTENT_PAGE_PRIVATE);
3751 }
3752 if (!PageUptodate(p))
3753 uptodate = 0;
eb14ab8e
CM
3754
3755 /*
3756 * see below about how we avoid a nasty race with release page
3757 * and why we unlock later
3758 */
3759 if (i != 0)
3760 unlock_page(p);
d1310b2e
CM
3761 }
3762 if (uptodate)
b4ce94de 3763 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 3764
19fe0a8b
MX
3765 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3766 if (ret)
3767 goto free_eb;
3768
6af118ce 3769 spin_lock(&tree->buffer_lock);
19fe0a8b
MX
3770 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3771 if (ret == -EEXIST) {
3772 exists = radix_tree_lookup(&tree->buffer,
3773 start >> PAGE_CACHE_SHIFT);
6af118ce
CM
3774 /* add one reference for the caller */
3775 atomic_inc(&exists->refs);
3776 spin_unlock(&tree->buffer_lock);
19fe0a8b 3777 radix_tree_preload_end();
6af118ce
CM
3778 goto free_eb;
3779 }
6af118ce
CM
3780 /* add one reference for the tree */
3781 atomic_inc(&eb->refs);
f044ba78 3782 spin_unlock(&tree->buffer_lock);
19fe0a8b 3783 radix_tree_preload_end();
eb14ab8e
CM
3784
3785 /*
3786 * there is a race where release page may have
3787 * tried to find this extent buffer in the radix
3788 * but failed. It will tell the VM it is safe to
3789 * reclaim the, and it will clear the page private bit.
3790 * We must make sure to set the page private bit properly
3791 * after the extent buffer is in the radix tree so
3792 * it doesn't get lost
3793 */
3794 set_page_extent_mapped(eb->first_page);
3795 set_page_extent_head(eb->first_page, eb->len);
3796 if (!page0)
3797 unlock_page(eb->first_page);
d1310b2e
CM
3798 return eb;
3799
6af118ce 3800free_eb:
eb14ab8e
CM
3801 if (eb->first_page && !page0)
3802 unlock_page(eb->first_page);
3803
d1310b2e 3804 if (!atomic_dec_and_test(&eb->refs))
6af118ce 3805 return exists;
897ca6e9 3806 btrfs_release_extent_buffer(eb);
6af118ce 3807 return exists;
d1310b2e 3808}
d1310b2e
CM
3809
3810struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
f09d1f60 3811 u64 start, unsigned long len)
d1310b2e 3812{
d1310b2e 3813 struct extent_buffer *eb;
d1310b2e 3814
19fe0a8b
MX
3815 rcu_read_lock();
3816 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3817 if (eb && atomic_inc_not_zero(&eb->refs)) {
3818 rcu_read_unlock();
0f9dd46c 3819 mark_page_accessed(eb->first_page);
19fe0a8b
MX
3820 return eb;
3821 }
3822 rcu_read_unlock();
0f9dd46c 3823
19fe0a8b 3824 return NULL;
d1310b2e 3825}
d1310b2e
CM
3826
3827void free_extent_buffer(struct extent_buffer *eb)
3828{
d1310b2e
CM
3829 if (!eb)
3830 return;
3831
3832 if (!atomic_dec_and_test(&eb->refs))
3833 return;
3834
6af118ce 3835 WARN_ON(1);
d1310b2e 3836}
d1310b2e
CM
3837
3838int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3839 struct extent_buffer *eb)
3840{
d1310b2e
CM
3841 unsigned long i;
3842 unsigned long num_pages;
3843 struct page *page;
3844
d1310b2e
CM
3845 num_pages = num_extent_pages(eb->start, eb->len);
3846
3847 for (i = 0; i < num_pages; i++) {
3848 page = extent_buffer_page(eb, i);
b9473439 3849 if (!PageDirty(page))
d2c3f4f6
CM
3850 continue;
3851
a61e6f29 3852 lock_page(page);
eb14ab8e
CM
3853 WARN_ON(!PagePrivate(page));
3854
3855 set_page_extent_mapped(page);
d1310b2e
CM
3856 if (i == 0)
3857 set_page_extent_head(page, eb->len);
d1310b2e 3858
d1310b2e 3859 clear_page_dirty_for_io(page);
0ee0fda0 3860 spin_lock_irq(&page->mapping->tree_lock);
d1310b2e
CM
3861 if (!PageDirty(page)) {
3862 radix_tree_tag_clear(&page->mapping->page_tree,
3863 page_index(page),
3864 PAGECACHE_TAG_DIRTY);
3865 }
0ee0fda0 3866 spin_unlock_irq(&page->mapping->tree_lock);
bf0da8c1 3867 ClearPageError(page);
a61e6f29 3868 unlock_page(page);
d1310b2e
CM
3869 }
3870 return 0;
3871}
d1310b2e 3872
d1310b2e
CM
3873int set_extent_buffer_dirty(struct extent_io_tree *tree,
3874 struct extent_buffer *eb)
3875{
3876 unsigned long i;
3877 unsigned long num_pages;
b9473439 3878 int was_dirty = 0;
d1310b2e 3879
b9473439 3880 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
d1310b2e 3881 num_pages = num_extent_pages(eb->start, eb->len);
b9473439 3882 for (i = 0; i < num_pages; i++)
d1310b2e 3883 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
b9473439 3884 return was_dirty;
d1310b2e 3885}
d1310b2e 3886
19b6caf4
CM
3887static int __eb_straddles_pages(u64 start, u64 len)
3888{
3889 if (len < PAGE_CACHE_SIZE)
3890 return 1;
3891 if (start & (PAGE_CACHE_SIZE - 1))
3892 return 1;
3893 if ((start + len) & (PAGE_CACHE_SIZE - 1))
3894 return 1;
3895 return 0;
3896}
3897
3898static int eb_straddles_pages(struct extent_buffer *eb)
3899{
3900 return __eb_straddles_pages(eb->start, eb->len);
3901}
3902
1259ab75 3903int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2ac55d41
JB
3904 struct extent_buffer *eb,
3905 struct extent_state **cached_state)
1259ab75
CM
3906{
3907 unsigned long i;
3908 struct page *page;
3909 unsigned long num_pages;
3910
3911 num_pages = num_extent_pages(eb->start, eb->len);
b4ce94de 3912 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1259ab75 3913
50653190
CM
3914 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3915 cached_state, GFP_NOFS);
3916
1259ab75
CM
3917 for (i = 0; i < num_pages; i++) {
3918 page = extent_buffer_page(eb, i);
33958dc6
CM
3919 if (page)
3920 ClearPageUptodate(page);
1259ab75
CM
3921 }
3922 return 0;
3923}
3924
d1310b2e
CM
3925int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3926 struct extent_buffer *eb)
3927{
3928 unsigned long i;
3929 struct page *page;
3930 unsigned long num_pages;
3931
3932 num_pages = num_extent_pages(eb->start, eb->len);
3933
19b6caf4
CM
3934 if (eb_straddles_pages(eb)) {
3935 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3936 NULL, GFP_NOFS);
3937 }
d1310b2e
CM
3938 for (i = 0; i < num_pages; i++) {
3939 page = extent_buffer_page(eb, i);
3940 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3941 ((i == num_pages - 1) &&
3942 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3943 check_page_uptodate(tree, page);
3944 continue;
3945 }
3946 SetPageUptodate(page);
3947 }
3948 return 0;
3949}
d1310b2e 3950
ce9adaa5
CM
3951int extent_range_uptodate(struct extent_io_tree *tree,
3952 u64 start, u64 end)
3953{
3954 struct page *page;
3955 int ret;
3956 int pg_uptodate = 1;
3957 int uptodate;
3958 unsigned long index;
3959
19b6caf4
CM
3960 if (__eb_straddles_pages(start, end - start + 1)) {
3961 ret = test_range_bit(tree, start, end,
3962 EXTENT_UPTODATE, 1, NULL);
3963 if (ret)
3964 return 1;
3965 }
d397712b 3966 while (start <= end) {
ce9adaa5
CM
3967 index = start >> PAGE_CACHE_SHIFT;
3968 page = find_get_page(tree->mapping, index);
8bedd51b
MH
3969 if (!page)
3970 return 1;
ce9adaa5
CM
3971 uptodate = PageUptodate(page);
3972 page_cache_release(page);
3973 if (!uptodate) {
3974 pg_uptodate = 0;
3975 break;
3976 }
3977 start += PAGE_CACHE_SIZE;
3978 }
3979 return pg_uptodate;
3980}
3981
d1310b2e 3982int extent_buffer_uptodate(struct extent_io_tree *tree,
2ac55d41
JB
3983 struct extent_buffer *eb,
3984 struct extent_state *cached_state)
d1310b2e 3985{
728131d8 3986 int ret = 0;
ce9adaa5
CM
3987 unsigned long num_pages;
3988 unsigned long i;
728131d8
CM
3989 struct page *page;
3990 int pg_uptodate = 1;
3991
b4ce94de 3992 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4235298e 3993 return 1;
728131d8 3994
19b6caf4
CM
3995 if (eb_straddles_pages(eb)) {
3996 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3997 EXTENT_UPTODATE, 1, cached_state);
3998 if (ret)
3999 return ret;
4000 }
728131d8
CM
4001
4002 num_pages = num_extent_pages(eb->start, eb->len);
4003 for (i = 0; i < num_pages; i++) {
4004 page = extent_buffer_page(eb, i);
4005 if (!PageUptodate(page)) {
4006 pg_uptodate = 0;
4007 break;
4008 }
4009 }
4235298e 4010 return pg_uptodate;
d1310b2e 4011}
d1310b2e
CM
4012
4013int read_extent_buffer_pages(struct extent_io_tree *tree,
bb82ab88 4014 struct extent_buffer *eb, u64 start, int wait,
f188591e 4015 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
4016{
4017 unsigned long i;
4018 unsigned long start_i;
4019 struct page *page;
4020 int err;
4021 int ret = 0;
ce9adaa5
CM
4022 int locked_pages = 0;
4023 int all_uptodate = 1;
4024 int inc_all_pages = 0;
d1310b2e 4025 unsigned long num_pages;
a86c12c7 4026 struct bio *bio = NULL;
c8b97818 4027 unsigned long bio_flags = 0;
a86c12c7 4028
b4ce94de 4029 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
d1310b2e
CM
4030 return 0;
4031
19b6caf4
CM
4032 if (eb_straddles_pages(eb)) {
4033 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
4034 EXTENT_UPTODATE, 1, NULL)) {
4035 return 0;
4036 }
d1310b2e
CM
4037 }
4038
4039 if (start) {
4040 WARN_ON(start < eb->start);
4041 start_i = (start >> PAGE_CACHE_SHIFT) -
4042 (eb->start >> PAGE_CACHE_SHIFT);
4043 } else {
4044 start_i = 0;
4045 }
4046
4047 num_pages = num_extent_pages(eb->start, eb->len);
4048 for (i = start_i; i < num_pages; i++) {
4049 page = extent_buffer_page(eb, i);
bb82ab88 4050 if (wait == WAIT_NONE) {
2db04966 4051 if (!trylock_page(page))
ce9adaa5 4052 goto unlock_exit;
d1310b2e
CM
4053 } else {
4054 lock_page(page);
4055 }
ce9adaa5 4056 locked_pages++;
d397712b 4057 if (!PageUptodate(page))
ce9adaa5 4058 all_uptodate = 0;
ce9adaa5
CM
4059 }
4060 if (all_uptodate) {
4061 if (start_i == 0)
b4ce94de 4062 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
ce9adaa5
CM
4063 goto unlock_exit;
4064 }
4065
4066 for (i = start_i; i < num_pages; i++) {
4067 page = extent_buffer_page(eb, i);
eb14ab8e
CM
4068
4069 WARN_ON(!PagePrivate(page));
4070
4071 set_page_extent_mapped(page);
4072 if (i == 0)
4073 set_page_extent_head(page, eb->len);
4074
ce9adaa5
CM
4075 if (inc_all_pages)
4076 page_cache_get(page);
4077 if (!PageUptodate(page)) {
4078 if (start_i == 0)
4079 inc_all_pages = 1;
f188591e 4080 ClearPageError(page);
a86c12c7 4081 err = __extent_read_full_page(tree, page,
f188591e 4082 get_extent, &bio,
c8b97818 4083 mirror_num, &bio_flags);
d397712b 4084 if (err)
d1310b2e 4085 ret = err;
d1310b2e
CM
4086 } else {
4087 unlock_page(page);
4088 }
4089 }
4090
355808c2
JM
4091 if (bio) {
4092 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4093 BUG_ON(err < 0);
4094 }
a86c12c7 4095
bb82ab88 4096 if (ret || wait != WAIT_COMPLETE)
d1310b2e 4097 return ret;
d397712b 4098
d1310b2e
CM
4099 for (i = start_i; i < num_pages; i++) {
4100 page = extent_buffer_page(eb, i);
4101 wait_on_page_locked(page);
d397712b 4102 if (!PageUptodate(page))
d1310b2e 4103 ret = -EIO;
d1310b2e 4104 }
d397712b 4105
d1310b2e 4106 if (!ret)
b4ce94de 4107 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 4108 return ret;
ce9adaa5
CM
4109
4110unlock_exit:
4111 i = start_i;
d397712b 4112 while (locked_pages > 0) {
ce9adaa5
CM
4113 page = extent_buffer_page(eb, i);
4114 i++;
4115 unlock_page(page);
4116 locked_pages--;
4117 }
4118 return ret;
d1310b2e 4119}
d1310b2e
CM
4120
4121void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4122 unsigned long start,
4123 unsigned long len)
4124{
4125 size_t cur;
4126 size_t offset;
4127 struct page *page;
4128 char *kaddr;
4129 char *dst = (char *)dstv;
4130 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4131 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
4132
4133 WARN_ON(start > eb->len);
4134 WARN_ON(start + len > eb->start + eb->len);
4135
4136 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4137
d397712b 4138 while (len > 0) {
d1310b2e 4139 page = extent_buffer_page(eb, i);
d1310b2e
CM
4140
4141 cur = min(len, (PAGE_CACHE_SIZE - offset));
a6591715 4142 kaddr = page_address(page);
d1310b2e 4143 memcpy(dst, kaddr + offset, cur);
d1310b2e
CM
4144
4145 dst += cur;
4146 len -= cur;
4147 offset = 0;
4148 i++;
4149 }
4150}
d1310b2e
CM
4151
4152int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
a6591715 4153 unsigned long min_len, char **map,
d1310b2e 4154 unsigned long *map_start,
a6591715 4155 unsigned long *map_len)
d1310b2e
CM
4156{
4157 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4158 char *kaddr;
4159 struct page *p;
4160 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4161 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4162 unsigned long end_i = (start_offset + start + min_len - 1) >>
4163 PAGE_CACHE_SHIFT;
4164
4165 if (i != end_i)
4166 return -EINVAL;
4167
4168 if (i == 0) {
4169 offset = start_offset;
4170 *map_start = 0;
4171 } else {
4172 offset = 0;
4173 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4174 }
d397712b 4175
d1310b2e 4176 if (start + min_len > eb->len) {
d397712b
CM
4177 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4178 "wanted %lu %lu\n", (unsigned long long)eb->start,
4179 eb->len, start, min_len);
d1310b2e 4180 WARN_ON(1);
85026533 4181 return -EINVAL;
d1310b2e
CM
4182 }
4183
4184 p = extent_buffer_page(eb, i);
a6591715 4185 kaddr = page_address(p);
d1310b2e
CM
4186 *map = kaddr + offset;
4187 *map_len = PAGE_CACHE_SIZE - offset;
4188 return 0;
4189}
d1310b2e 4190
d1310b2e
CM
4191int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4192 unsigned long start,
4193 unsigned long len)
4194{
4195 size_t cur;
4196 size_t offset;
4197 struct page *page;
4198 char *kaddr;
4199 char *ptr = (char *)ptrv;
4200 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4201 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4202 int ret = 0;
4203
4204 WARN_ON(start > eb->len);
4205 WARN_ON(start + len > eb->start + eb->len);
4206
4207 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4208
d397712b 4209 while (len > 0) {
d1310b2e 4210 page = extent_buffer_page(eb, i);
d1310b2e
CM
4211
4212 cur = min(len, (PAGE_CACHE_SIZE - offset));
4213
a6591715 4214 kaddr = page_address(page);
d1310b2e 4215 ret = memcmp(ptr, kaddr + offset, cur);
d1310b2e
CM
4216 if (ret)
4217 break;
4218
4219 ptr += cur;
4220 len -= cur;
4221 offset = 0;
4222 i++;
4223 }
4224 return ret;
4225}
d1310b2e
CM
4226
4227void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4228 unsigned long start, unsigned long len)
4229{
4230 size_t cur;
4231 size_t offset;
4232 struct page *page;
4233 char *kaddr;
4234 char *src = (char *)srcv;
4235 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4236 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4237
4238 WARN_ON(start > eb->len);
4239 WARN_ON(start + len > eb->start + eb->len);
4240
4241 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4242
d397712b 4243 while (len > 0) {
d1310b2e
CM
4244 page = extent_buffer_page(eb, i);
4245 WARN_ON(!PageUptodate(page));
4246
4247 cur = min(len, PAGE_CACHE_SIZE - offset);
a6591715 4248 kaddr = page_address(page);
d1310b2e 4249 memcpy(kaddr + offset, src, cur);
d1310b2e
CM
4250
4251 src += cur;
4252 len -= cur;
4253 offset = 0;
4254 i++;
4255 }
4256}
d1310b2e
CM
4257
4258void memset_extent_buffer(struct extent_buffer *eb, char c,
4259 unsigned long start, unsigned long len)
4260{
4261 size_t cur;
4262 size_t offset;
4263 struct page *page;
4264 char *kaddr;
4265 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4266 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4267
4268 WARN_ON(start > eb->len);
4269 WARN_ON(start + len > eb->start + eb->len);
4270
4271 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4272
d397712b 4273 while (len > 0) {
d1310b2e
CM
4274 page = extent_buffer_page(eb, i);
4275 WARN_ON(!PageUptodate(page));
4276
4277 cur = min(len, PAGE_CACHE_SIZE - offset);
a6591715 4278 kaddr = page_address(page);
d1310b2e 4279 memset(kaddr + offset, c, cur);
d1310b2e
CM
4280
4281 len -= cur;
4282 offset = 0;
4283 i++;
4284 }
4285}
d1310b2e
CM
4286
4287void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4288 unsigned long dst_offset, unsigned long src_offset,
4289 unsigned long len)
4290{
4291 u64 dst_len = dst->len;
4292 size_t cur;
4293 size_t offset;
4294 struct page *page;
4295 char *kaddr;
4296 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4297 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4298
4299 WARN_ON(src->len != dst_len);
4300
4301 offset = (start_offset + dst_offset) &
4302 ((unsigned long)PAGE_CACHE_SIZE - 1);
4303
d397712b 4304 while (len > 0) {
d1310b2e
CM
4305 page = extent_buffer_page(dst, i);
4306 WARN_ON(!PageUptodate(page));
4307
4308 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4309
a6591715 4310 kaddr = page_address(page);
d1310b2e 4311 read_extent_buffer(src, kaddr + offset, src_offset, cur);
d1310b2e
CM
4312
4313 src_offset += cur;
4314 len -= cur;
4315 offset = 0;
4316 i++;
4317 }
4318}
d1310b2e
CM
4319
4320static void move_pages(struct page *dst_page, struct page *src_page,
4321 unsigned long dst_off, unsigned long src_off,
4322 unsigned long len)
4323{
a6591715 4324 char *dst_kaddr = page_address(dst_page);
d1310b2e
CM
4325 if (dst_page == src_page) {
4326 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4327 } else {
a6591715 4328 char *src_kaddr = page_address(src_page);
d1310b2e
CM
4329 char *p = dst_kaddr + dst_off + len;
4330 char *s = src_kaddr + src_off + len;
4331
4332 while (len--)
4333 *--p = *--s;
d1310b2e 4334 }
d1310b2e
CM
4335}
4336
3387206f
ST
4337static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4338{
4339 unsigned long distance = (src > dst) ? src - dst : dst - src;
4340 return distance < len;
4341}
4342
d1310b2e
CM
4343static void copy_pages(struct page *dst_page, struct page *src_page,
4344 unsigned long dst_off, unsigned long src_off,
4345 unsigned long len)
4346{
a6591715 4347 char *dst_kaddr = page_address(dst_page);
d1310b2e
CM
4348 char *src_kaddr;
4349
3387206f 4350 if (dst_page != src_page) {
a6591715 4351 src_kaddr = page_address(src_page);
3387206f 4352 } else {
d1310b2e 4353 src_kaddr = dst_kaddr;
3387206f
ST
4354 BUG_ON(areas_overlap(src_off, dst_off, len));
4355 }
d1310b2e
CM
4356
4357 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
d1310b2e
CM
4358}
4359
4360void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4361 unsigned long src_offset, unsigned long len)
4362{
4363 size_t cur;
4364 size_t dst_off_in_page;
4365 size_t src_off_in_page;
4366 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4367 unsigned long dst_i;
4368 unsigned long src_i;
4369
4370 if (src_offset + len > dst->len) {
d397712b
CM
4371 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4372 "len %lu dst len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
4373 BUG_ON(1);
4374 }
4375 if (dst_offset + len > dst->len) {
d397712b
CM
4376 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4377 "len %lu dst len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
4378 BUG_ON(1);
4379 }
4380
d397712b 4381 while (len > 0) {
d1310b2e
CM
4382 dst_off_in_page = (start_offset + dst_offset) &
4383 ((unsigned long)PAGE_CACHE_SIZE - 1);
4384 src_off_in_page = (start_offset + src_offset) &
4385 ((unsigned long)PAGE_CACHE_SIZE - 1);
4386
4387 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4388 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4389
4390 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4391 src_off_in_page));
4392 cur = min_t(unsigned long, cur,
4393 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4394
4395 copy_pages(extent_buffer_page(dst, dst_i),
4396 extent_buffer_page(dst, src_i),
4397 dst_off_in_page, src_off_in_page, cur);
4398
4399 src_offset += cur;
4400 dst_offset += cur;
4401 len -= cur;
4402 }
4403}
d1310b2e
CM
4404
4405void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4406 unsigned long src_offset, unsigned long len)
4407{
4408 size_t cur;
4409 size_t dst_off_in_page;
4410 size_t src_off_in_page;
4411 unsigned long dst_end = dst_offset + len - 1;
4412 unsigned long src_end = src_offset + len - 1;
4413 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4414 unsigned long dst_i;
4415 unsigned long src_i;
4416
4417 if (src_offset + len > dst->len) {
d397712b
CM
4418 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4419 "len %lu len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
4420 BUG_ON(1);
4421 }
4422 if (dst_offset + len > dst->len) {
d397712b
CM
4423 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4424 "len %lu len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
4425 BUG_ON(1);
4426 }
3387206f 4427 if (!areas_overlap(src_offset, dst_offset, len)) {
d1310b2e
CM
4428 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4429 return;
4430 }
d397712b 4431 while (len > 0) {
d1310b2e
CM
4432 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4433 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4434
4435 dst_off_in_page = (start_offset + dst_end) &
4436 ((unsigned long)PAGE_CACHE_SIZE - 1);
4437 src_off_in_page = (start_offset + src_end) &
4438 ((unsigned long)PAGE_CACHE_SIZE - 1);
4439
4440 cur = min_t(unsigned long, len, src_off_in_page + 1);
4441 cur = min(cur, dst_off_in_page + 1);
4442 move_pages(extent_buffer_page(dst, dst_i),
4443 extent_buffer_page(dst, src_i),
4444 dst_off_in_page - cur + 1,
4445 src_off_in_page - cur + 1, cur);
4446
4447 dst_end -= cur;
4448 src_end -= cur;
4449 len -= cur;
4450 }
4451}
6af118ce 4452
19fe0a8b
MX
4453static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4454{
4455 struct extent_buffer *eb =
4456 container_of(head, struct extent_buffer, rcu_head);
4457
4458 btrfs_release_extent_buffer(eb);
4459}
4460
6af118ce
CM
4461int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
4462{
4463 u64 start = page_offset(page);
4464 struct extent_buffer *eb;
4465 int ret = 1;
6af118ce
CM
4466
4467 spin_lock(&tree->buffer_lock);
19fe0a8b 4468 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
45f49bce
CM
4469 if (!eb) {
4470 spin_unlock(&tree->buffer_lock);
4471 return ret;
4472 }
6af118ce 4473
19fe0a8b 4474 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
6af118ce
CM
4475 ret = 0;
4476 goto out;
4477 }
19fe0a8b
MX
4478
4479 /*
4480 * set @eb->refs to 0 if it is already 1, and then release the @eb.
4481 * Or go back.
4482 */
4483 if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
b9473439
CM
4484 ret = 0;
4485 goto out;
4486 }
897ca6e9 4487
19fe0a8b 4488 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
6af118ce
CM
4489out:
4490 spin_unlock(&tree->buffer_lock);
19fe0a8b
MX
4491
4492 /* at this point we can safely release the extent buffer */
4493 if (atomic_read(&eb->refs) == 0)
4494 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
6af118ce
CM
4495 return ret;
4496}