Btrfs: Remove superfluous casts from u64 to unsigned long long
[linux-2.6-block.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
d1310b2e
CM
5#include <linux/pagemap.h>
6#include <linux/page-flags.h>
d1310b2e
CM
7#include <linux/spinlock.h>
8#include <linux/blkdev.h>
9#include <linux/swap.h>
d1310b2e
CM
10#include <linux/writeback.h>
11#include <linux/pagevec.h>
268bb0ce 12#include <linux/prefetch.h>
90a887c9 13#include <linux/cleancache.h>
d1310b2e
CM
14#include "extent_io.h"
15#include "extent_map.h"
2db04966 16#include "compat.h"
902b22f3
DW
17#include "ctree.h"
18#include "btrfs_inode.h"
4a54c8c1 19#include "volumes.h"
21adbd5c 20#include "check-integrity.h"
0b32f4bb 21#include "locking.h"
606686ee 22#include "rcu-string.h"
d1310b2e 23
d1310b2e
CM
24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
9be3395b 26static struct bio_set *btrfs_bioset;
d1310b2e 27
6d49ba1b 28#ifdef CONFIG_BTRFS_DEBUG
d1310b2e
CM
29static LIST_HEAD(buffers);
30static LIST_HEAD(states);
4bef0848 31
d397712b 32static DEFINE_SPINLOCK(leak_lock);
6d49ba1b
ES
33
34static inline
35void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
36{
37 unsigned long flags;
38
39 spin_lock_irqsave(&leak_lock, flags);
40 list_add(new, head);
41 spin_unlock_irqrestore(&leak_lock, flags);
42}
43
44static inline
45void btrfs_leak_debug_del(struct list_head *entry)
46{
47 unsigned long flags;
48
49 spin_lock_irqsave(&leak_lock, flags);
50 list_del(entry);
51 spin_unlock_irqrestore(&leak_lock, flags);
52}
53
54static inline
55void btrfs_leak_debug_check(void)
56{
57 struct extent_state *state;
58 struct extent_buffer *eb;
59
60 while (!list_empty(&states)) {
61 state = list_entry(states.next, struct extent_state, leak_list);
62 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
63 "state %lu in tree %p refs %d\n",
c1c9ff7c
GU
64 state->start, state->end, state->state, state->tree,
65 atomic_read(&state->refs));
6d49ba1b
ES
66 list_del(&state->leak_list);
67 kmem_cache_free(extent_state_cache, state);
68 }
69
70 while (!list_empty(&buffers)) {
71 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
72 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
c1c9ff7c
GU
73 "refs %d\n",
74 eb->start, eb->len, atomic_read(&eb->refs));
6d49ba1b
ES
75 list_del(&eb->leak_list);
76 kmem_cache_free(extent_buffer_cache, eb);
77 }
78}
8d599ae1
DS
79
80#define btrfs_debug_check_extent_io_range(inode, start, end) \
81 __btrfs_debug_check_extent_io_range(__func__, (inode), (start), (end))
82static inline void __btrfs_debug_check_extent_io_range(const char *caller,
83 struct inode *inode, u64 start, u64 end)
84{
85 u64 isize = i_size_read(inode);
86
87 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
88 printk_ratelimited(KERN_DEBUG
89 "btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
c1c9ff7c 90 caller, btrfs_ino(inode), isize, start, end);
8d599ae1
DS
91 }
92}
6d49ba1b
ES
93#else
94#define btrfs_leak_debug_add(new, head) do {} while (0)
95#define btrfs_leak_debug_del(entry) do {} while (0)
96#define btrfs_leak_debug_check() do {} while (0)
8d599ae1 97#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
4bef0848 98#endif
d1310b2e 99
d1310b2e
CM
100#define BUFFER_LRU_MAX 64
101
102struct tree_entry {
103 u64 start;
104 u64 end;
d1310b2e
CM
105 struct rb_node rb_node;
106};
107
108struct extent_page_data {
109 struct bio *bio;
110 struct extent_io_tree *tree;
111 get_extent_t *get_extent;
de0022b9 112 unsigned long bio_flags;
771ed689
CM
113
114 /* tells writepage not to lock the state bits for this range
115 * it still does the unlocking
116 */
ffbd517d
CM
117 unsigned int extent_locked:1;
118
119 /* tells the submit_bio code to use a WRITE_SYNC */
120 unsigned int sync_io:1;
d1310b2e
CM
121};
122
0b32f4bb 123static noinline void flush_write_bio(void *data);
c2d904e0
JM
124static inline struct btrfs_fs_info *
125tree_fs_info(struct extent_io_tree *tree)
126{
127 return btrfs_sb(tree->mapping->host->i_sb);
128}
0b32f4bb 129
d1310b2e
CM
130int __init extent_io_init(void)
131{
837e1972 132 extent_state_cache = kmem_cache_create("btrfs_extent_state",
9601e3f6
CH
133 sizeof(struct extent_state), 0,
134 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
135 if (!extent_state_cache)
136 return -ENOMEM;
137
837e1972 138 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
9601e3f6
CH
139 sizeof(struct extent_buffer), 0,
140 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
141 if (!extent_buffer_cache)
142 goto free_state_cache;
9be3395b
CM
143
144 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
145 offsetof(struct btrfs_io_bio, bio));
146 if (!btrfs_bioset)
147 goto free_buffer_cache;
d1310b2e
CM
148 return 0;
149
9be3395b
CM
150free_buffer_cache:
151 kmem_cache_destroy(extent_buffer_cache);
152 extent_buffer_cache = NULL;
153
d1310b2e
CM
154free_state_cache:
155 kmem_cache_destroy(extent_state_cache);
9be3395b 156 extent_state_cache = NULL;
d1310b2e
CM
157 return -ENOMEM;
158}
159
160void extent_io_exit(void)
161{
6d49ba1b 162 btrfs_leak_debug_check();
8c0a8537
KS
163
164 /*
165 * Make sure all delayed rcu free are flushed before we
166 * destroy caches.
167 */
168 rcu_barrier();
d1310b2e
CM
169 if (extent_state_cache)
170 kmem_cache_destroy(extent_state_cache);
171 if (extent_buffer_cache)
172 kmem_cache_destroy(extent_buffer_cache);
9be3395b
CM
173 if (btrfs_bioset)
174 bioset_free(btrfs_bioset);
d1310b2e
CM
175}
176
177void extent_io_tree_init(struct extent_io_tree *tree,
f993c883 178 struct address_space *mapping)
d1310b2e 179{
6bef4d31 180 tree->state = RB_ROOT;
19fe0a8b 181 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
d1310b2e
CM
182 tree->ops = NULL;
183 tree->dirty_bytes = 0;
70dec807 184 spin_lock_init(&tree->lock);
6af118ce 185 spin_lock_init(&tree->buffer_lock);
d1310b2e 186 tree->mapping = mapping;
d1310b2e 187}
d1310b2e 188
b2950863 189static struct extent_state *alloc_extent_state(gfp_t mask)
d1310b2e
CM
190{
191 struct extent_state *state;
d1310b2e
CM
192
193 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 194 if (!state)
d1310b2e
CM
195 return state;
196 state->state = 0;
d1310b2e 197 state->private = 0;
70dec807 198 state->tree = NULL;
6d49ba1b 199 btrfs_leak_debug_add(&state->leak_list, &states);
d1310b2e
CM
200 atomic_set(&state->refs, 1);
201 init_waitqueue_head(&state->wq);
143bede5 202 trace_alloc_extent_state(state, mask, _RET_IP_);
d1310b2e
CM
203 return state;
204}
d1310b2e 205
4845e44f 206void free_extent_state(struct extent_state *state)
d1310b2e 207{
d1310b2e
CM
208 if (!state)
209 return;
210 if (atomic_dec_and_test(&state->refs)) {
70dec807 211 WARN_ON(state->tree);
6d49ba1b 212 btrfs_leak_debug_del(&state->leak_list);
143bede5 213 trace_free_extent_state(state, _RET_IP_);
d1310b2e
CM
214 kmem_cache_free(extent_state_cache, state);
215 }
216}
d1310b2e
CM
217
218static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
219 struct rb_node *node)
220{
d397712b
CM
221 struct rb_node **p = &root->rb_node;
222 struct rb_node *parent = NULL;
d1310b2e
CM
223 struct tree_entry *entry;
224
d397712b 225 while (*p) {
d1310b2e
CM
226 parent = *p;
227 entry = rb_entry(parent, struct tree_entry, rb_node);
228
229 if (offset < entry->start)
230 p = &(*p)->rb_left;
231 else if (offset > entry->end)
232 p = &(*p)->rb_right;
233 else
234 return parent;
235 }
236
d1310b2e
CM
237 rb_link_node(node, parent, p);
238 rb_insert_color(node, root);
239 return NULL;
240}
241
80ea96b1 242static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
243 struct rb_node **prev_ret,
244 struct rb_node **next_ret)
245{
80ea96b1 246 struct rb_root *root = &tree->state;
d397712b 247 struct rb_node *n = root->rb_node;
d1310b2e
CM
248 struct rb_node *prev = NULL;
249 struct rb_node *orig_prev = NULL;
250 struct tree_entry *entry;
251 struct tree_entry *prev_entry = NULL;
252
d397712b 253 while (n) {
d1310b2e
CM
254 entry = rb_entry(n, struct tree_entry, rb_node);
255 prev = n;
256 prev_entry = entry;
257
258 if (offset < entry->start)
259 n = n->rb_left;
260 else if (offset > entry->end)
261 n = n->rb_right;
d397712b 262 else
d1310b2e
CM
263 return n;
264 }
265
266 if (prev_ret) {
267 orig_prev = prev;
d397712b 268 while (prev && offset > prev_entry->end) {
d1310b2e
CM
269 prev = rb_next(prev);
270 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
271 }
272 *prev_ret = prev;
273 prev = orig_prev;
274 }
275
276 if (next_ret) {
277 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
d397712b 278 while (prev && offset < prev_entry->start) {
d1310b2e
CM
279 prev = rb_prev(prev);
280 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
281 }
282 *next_ret = prev;
283 }
284 return NULL;
285}
286
80ea96b1
CM
287static inline struct rb_node *tree_search(struct extent_io_tree *tree,
288 u64 offset)
d1310b2e 289{
70dec807 290 struct rb_node *prev = NULL;
d1310b2e 291 struct rb_node *ret;
70dec807 292
80ea96b1 293 ret = __etree_search(tree, offset, &prev, NULL);
d397712b 294 if (!ret)
d1310b2e
CM
295 return prev;
296 return ret;
297}
298
9ed74f2d
JB
299static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
300 struct extent_state *other)
301{
302 if (tree->ops && tree->ops->merge_extent_hook)
303 tree->ops->merge_extent_hook(tree->mapping->host, new,
304 other);
305}
306
d1310b2e
CM
307/*
308 * utility function to look for merge candidates inside a given range.
309 * Any extents with matching state are merged together into a single
310 * extent in the tree. Extents with EXTENT_IO in their state field
311 * are not merged because the end_io handlers need to be able to do
312 * operations on them without sleeping (or doing allocations/splits).
313 *
314 * This should be called with the tree lock held.
315 */
1bf85046
JM
316static void merge_state(struct extent_io_tree *tree,
317 struct extent_state *state)
d1310b2e
CM
318{
319 struct extent_state *other;
320 struct rb_node *other_node;
321
5b21f2ed 322 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
1bf85046 323 return;
d1310b2e
CM
324
325 other_node = rb_prev(&state->rb_node);
326 if (other_node) {
327 other = rb_entry(other_node, struct extent_state, rb_node);
328 if (other->end == state->start - 1 &&
329 other->state == state->state) {
9ed74f2d 330 merge_cb(tree, state, other);
d1310b2e 331 state->start = other->start;
70dec807 332 other->tree = NULL;
d1310b2e
CM
333 rb_erase(&other->rb_node, &tree->state);
334 free_extent_state(other);
335 }
336 }
337 other_node = rb_next(&state->rb_node);
338 if (other_node) {
339 other = rb_entry(other_node, struct extent_state, rb_node);
340 if (other->start == state->end + 1 &&
341 other->state == state->state) {
9ed74f2d 342 merge_cb(tree, state, other);
df98b6e2
JB
343 state->end = other->end;
344 other->tree = NULL;
345 rb_erase(&other->rb_node, &tree->state);
346 free_extent_state(other);
d1310b2e
CM
347 }
348 }
d1310b2e
CM
349}
350
1bf85046 351static void set_state_cb(struct extent_io_tree *tree,
41074888 352 struct extent_state *state, unsigned long *bits)
291d673e 353{
1bf85046
JM
354 if (tree->ops && tree->ops->set_bit_hook)
355 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
291d673e
CM
356}
357
358static void clear_state_cb(struct extent_io_tree *tree,
41074888 359 struct extent_state *state, unsigned long *bits)
291d673e 360{
9ed74f2d
JB
361 if (tree->ops && tree->ops->clear_bit_hook)
362 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
291d673e
CM
363}
364
3150b699 365static void set_state_bits(struct extent_io_tree *tree,
41074888 366 struct extent_state *state, unsigned long *bits);
3150b699 367
d1310b2e
CM
368/*
369 * insert an extent_state struct into the tree. 'bits' are set on the
370 * struct before it is inserted.
371 *
372 * This may return -EEXIST if the extent is already there, in which case the
373 * state struct is freed.
374 *
375 * The tree lock is not taken internally. This is a utility function and
376 * probably isn't what you want to call (see set/clear_extent_bit).
377 */
378static int insert_state(struct extent_io_tree *tree,
379 struct extent_state *state, u64 start, u64 end,
41074888 380 unsigned long *bits)
d1310b2e
CM
381{
382 struct rb_node *node;
383
31b1a2bd
JL
384 if (end < start)
385 WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",
c1c9ff7c 386 end, start);
d1310b2e
CM
387 state->start = start;
388 state->end = end;
9ed74f2d 389
3150b699
XG
390 set_state_bits(tree, state, bits);
391
d1310b2e
CM
392 node = tree_insert(&tree->state, end, &state->rb_node);
393 if (node) {
394 struct extent_state *found;
395 found = rb_entry(node, struct extent_state, rb_node);
d397712b 396 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
c1c9ff7c
GU
397 "%llu %llu\n",
398 found->start, found->end, start, end);
d1310b2e
CM
399 return -EEXIST;
400 }
70dec807 401 state->tree = tree;
d1310b2e
CM
402 merge_state(tree, state);
403 return 0;
404}
405
1bf85046 406static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
9ed74f2d
JB
407 u64 split)
408{
409 if (tree->ops && tree->ops->split_extent_hook)
1bf85046 410 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
9ed74f2d
JB
411}
412
d1310b2e
CM
413/*
414 * split a given extent state struct in two, inserting the preallocated
415 * struct 'prealloc' as the newly created second half. 'split' indicates an
416 * offset inside 'orig' where it should be split.
417 *
418 * Before calling,
419 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
420 * are two extent state structs in the tree:
421 * prealloc: [orig->start, split - 1]
422 * orig: [ split, orig->end ]
423 *
424 * The tree locks are not taken by this function. They need to be held
425 * by the caller.
426 */
427static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
428 struct extent_state *prealloc, u64 split)
429{
430 struct rb_node *node;
9ed74f2d
JB
431
432 split_cb(tree, orig, split);
433
d1310b2e
CM
434 prealloc->start = orig->start;
435 prealloc->end = split - 1;
436 prealloc->state = orig->state;
437 orig->start = split;
438
439 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
440 if (node) {
d1310b2e
CM
441 free_extent_state(prealloc);
442 return -EEXIST;
443 }
70dec807 444 prealloc->tree = tree;
d1310b2e
CM
445 return 0;
446}
447
cdc6a395
LZ
448static struct extent_state *next_state(struct extent_state *state)
449{
450 struct rb_node *next = rb_next(&state->rb_node);
451 if (next)
452 return rb_entry(next, struct extent_state, rb_node);
453 else
454 return NULL;
455}
456
d1310b2e
CM
457/*
458 * utility function to clear some bits in an extent state struct.
1b303fc0 459 * it will optionally wake up any one waiting on this state (wake == 1).
d1310b2e
CM
460 *
461 * If no bits are set on the state struct after clearing things, the
462 * struct is freed and removed from the tree
463 */
cdc6a395
LZ
464static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
465 struct extent_state *state,
41074888 466 unsigned long *bits, int wake)
d1310b2e 467{
cdc6a395 468 struct extent_state *next;
41074888 469 unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
d1310b2e 470
0ca1f7ce 471 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
d1310b2e
CM
472 u64 range = state->end - state->start + 1;
473 WARN_ON(range > tree->dirty_bytes);
474 tree->dirty_bytes -= range;
475 }
291d673e 476 clear_state_cb(tree, state, bits);
32c00aff 477 state->state &= ~bits_to_clear;
d1310b2e
CM
478 if (wake)
479 wake_up(&state->wq);
0ca1f7ce 480 if (state->state == 0) {
cdc6a395 481 next = next_state(state);
70dec807 482 if (state->tree) {
d1310b2e 483 rb_erase(&state->rb_node, &tree->state);
70dec807 484 state->tree = NULL;
d1310b2e
CM
485 free_extent_state(state);
486 } else {
487 WARN_ON(1);
488 }
489 } else {
490 merge_state(tree, state);
cdc6a395 491 next = next_state(state);
d1310b2e 492 }
cdc6a395 493 return next;
d1310b2e
CM
494}
495
8233767a
XG
496static struct extent_state *
497alloc_extent_state_atomic(struct extent_state *prealloc)
498{
499 if (!prealloc)
500 prealloc = alloc_extent_state(GFP_ATOMIC);
501
502 return prealloc;
503}
504
48a3b636 505static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
c2d904e0
JM
506{
507 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
508 "Extent tree was modified by another "
509 "thread while locked.");
510}
511
d1310b2e
CM
512/*
513 * clear some bits on a range in the tree. This may require splitting
514 * or inserting elements in the tree, so the gfp mask is used to
515 * indicate which allocations or sleeping are allowed.
516 *
517 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
518 * the given range from the tree regardless of state (ie for truncate).
519 *
520 * the range [start, end] is inclusive.
521 *
6763af84 522 * This takes the tree lock, and returns 0 on success and < 0 on error.
d1310b2e
CM
523 */
524int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
41074888 525 unsigned long bits, int wake, int delete,
2c64c53d
CM
526 struct extent_state **cached_state,
527 gfp_t mask)
d1310b2e
CM
528{
529 struct extent_state *state;
2c64c53d 530 struct extent_state *cached;
d1310b2e
CM
531 struct extent_state *prealloc = NULL;
532 struct rb_node *node;
5c939df5 533 u64 last_end;
d1310b2e 534 int err;
2ac55d41 535 int clear = 0;
d1310b2e 536
8d599ae1
DS
537 btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
538
7ee9e440
JB
539 if (bits & EXTENT_DELALLOC)
540 bits |= EXTENT_NORESERVE;
541
0ca1f7ce
YZ
542 if (delete)
543 bits |= ~EXTENT_CTLBITS;
544 bits |= EXTENT_FIRST_DELALLOC;
545
2ac55d41
JB
546 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
547 clear = 1;
d1310b2e
CM
548again:
549 if (!prealloc && (mask & __GFP_WAIT)) {
550 prealloc = alloc_extent_state(mask);
551 if (!prealloc)
552 return -ENOMEM;
553 }
554
cad321ad 555 spin_lock(&tree->lock);
2c64c53d
CM
556 if (cached_state) {
557 cached = *cached_state;
2ac55d41
JB
558
559 if (clear) {
560 *cached_state = NULL;
561 cached_state = NULL;
562 }
563
df98b6e2
JB
564 if (cached && cached->tree && cached->start <= start &&
565 cached->end > start) {
2ac55d41
JB
566 if (clear)
567 atomic_dec(&cached->refs);
2c64c53d 568 state = cached;
42daec29 569 goto hit_next;
2c64c53d 570 }
2ac55d41
JB
571 if (clear)
572 free_extent_state(cached);
2c64c53d 573 }
d1310b2e
CM
574 /*
575 * this search will find the extents that end after
576 * our range starts
577 */
80ea96b1 578 node = tree_search(tree, start);
d1310b2e
CM
579 if (!node)
580 goto out;
581 state = rb_entry(node, struct extent_state, rb_node);
2c64c53d 582hit_next:
d1310b2e
CM
583 if (state->start > end)
584 goto out;
585 WARN_ON(state->end < start);
5c939df5 586 last_end = state->end;
d1310b2e 587
0449314a 588 /* the state doesn't have the wanted bits, go ahead */
cdc6a395
LZ
589 if (!(state->state & bits)) {
590 state = next_state(state);
0449314a 591 goto next;
cdc6a395 592 }
0449314a 593
d1310b2e
CM
594 /*
595 * | ---- desired range ---- |
596 * | state | or
597 * | ------------- state -------------- |
598 *
599 * We need to split the extent we found, and may flip
600 * bits on second half.
601 *
602 * If the extent we found extends past our range, we
603 * just split and search again. It'll get split again
604 * the next time though.
605 *
606 * If the extent we found is inside our range, we clear
607 * the desired bit on it.
608 */
609
610 if (state->start < start) {
8233767a
XG
611 prealloc = alloc_extent_state_atomic(prealloc);
612 BUG_ON(!prealloc);
d1310b2e 613 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
614 if (err)
615 extent_io_tree_panic(tree, err);
616
d1310b2e
CM
617 prealloc = NULL;
618 if (err)
619 goto out;
620 if (state->end <= end) {
d1ac6e41
LB
621 state = clear_state_bit(tree, state, &bits, wake);
622 goto next;
d1310b2e
CM
623 }
624 goto search_again;
625 }
626 /*
627 * | ---- desired range ---- |
628 * | state |
629 * We need to split the extent, and clear the bit
630 * on the first half
631 */
632 if (state->start <= end && state->end > end) {
8233767a
XG
633 prealloc = alloc_extent_state_atomic(prealloc);
634 BUG_ON(!prealloc);
d1310b2e 635 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
636 if (err)
637 extent_io_tree_panic(tree, err);
638
d1310b2e
CM
639 if (wake)
640 wake_up(&state->wq);
42daec29 641
6763af84 642 clear_state_bit(tree, prealloc, &bits, wake);
9ed74f2d 643
d1310b2e
CM
644 prealloc = NULL;
645 goto out;
646 }
42daec29 647
cdc6a395 648 state = clear_state_bit(tree, state, &bits, wake);
0449314a 649next:
5c939df5
YZ
650 if (last_end == (u64)-1)
651 goto out;
652 start = last_end + 1;
cdc6a395 653 if (start <= end && state && !need_resched())
692e5759 654 goto hit_next;
d1310b2e
CM
655 goto search_again;
656
657out:
cad321ad 658 spin_unlock(&tree->lock);
d1310b2e
CM
659 if (prealloc)
660 free_extent_state(prealloc);
661
6763af84 662 return 0;
d1310b2e
CM
663
664search_again:
665 if (start > end)
666 goto out;
cad321ad 667 spin_unlock(&tree->lock);
d1310b2e
CM
668 if (mask & __GFP_WAIT)
669 cond_resched();
670 goto again;
671}
d1310b2e 672
143bede5
JM
673static void wait_on_state(struct extent_io_tree *tree,
674 struct extent_state *state)
641f5219
CH
675 __releases(tree->lock)
676 __acquires(tree->lock)
d1310b2e
CM
677{
678 DEFINE_WAIT(wait);
679 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
cad321ad 680 spin_unlock(&tree->lock);
d1310b2e 681 schedule();
cad321ad 682 spin_lock(&tree->lock);
d1310b2e 683 finish_wait(&state->wq, &wait);
d1310b2e
CM
684}
685
686/*
687 * waits for one or more bits to clear on a range in the state tree.
688 * The range [start, end] is inclusive.
689 * The tree lock is taken by this function
690 */
41074888
DS
691static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
692 unsigned long bits)
d1310b2e
CM
693{
694 struct extent_state *state;
695 struct rb_node *node;
696
8d599ae1
DS
697 btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
698
cad321ad 699 spin_lock(&tree->lock);
d1310b2e
CM
700again:
701 while (1) {
702 /*
703 * this search will find all the extents that end after
704 * our range starts
705 */
80ea96b1 706 node = tree_search(tree, start);
d1310b2e
CM
707 if (!node)
708 break;
709
710 state = rb_entry(node, struct extent_state, rb_node);
711
712 if (state->start > end)
713 goto out;
714
715 if (state->state & bits) {
716 start = state->start;
717 atomic_inc(&state->refs);
718 wait_on_state(tree, state);
719 free_extent_state(state);
720 goto again;
721 }
722 start = state->end + 1;
723
724 if (start > end)
725 break;
726
ded91f08 727 cond_resched_lock(&tree->lock);
d1310b2e
CM
728 }
729out:
cad321ad 730 spin_unlock(&tree->lock);
d1310b2e 731}
d1310b2e 732
1bf85046 733static void set_state_bits(struct extent_io_tree *tree,
d1310b2e 734 struct extent_state *state,
41074888 735 unsigned long *bits)
d1310b2e 736{
41074888 737 unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
9ed74f2d 738
1bf85046 739 set_state_cb(tree, state, bits);
0ca1f7ce 740 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
d1310b2e
CM
741 u64 range = state->end - state->start + 1;
742 tree->dirty_bytes += range;
743 }
0ca1f7ce 744 state->state |= bits_to_set;
d1310b2e
CM
745}
746
2c64c53d
CM
747static void cache_state(struct extent_state *state,
748 struct extent_state **cached_ptr)
749{
750 if (cached_ptr && !(*cached_ptr)) {
751 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
752 *cached_ptr = state;
753 atomic_inc(&state->refs);
754 }
755 }
756}
757
d1310b2e 758/*
1edbb734
CM
759 * set some bits on a range in the tree. This may require allocations or
760 * sleeping, so the gfp mask is used to indicate what is allowed.
d1310b2e 761 *
1edbb734
CM
762 * If any of the exclusive bits are set, this will fail with -EEXIST if some
763 * part of the range already has the desired bits set. The start of the
764 * existing range is returned in failed_start in this case.
d1310b2e 765 *
1edbb734 766 * [start, end] is inclusive This takes the tree lock.
d1310b2e 767 */
1edbb734 768
3fbe5c02
JM
769static int __must_check
770__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
41074888
DS
771 unsigned long bits, unsigned long exclusive_bits,
772 u64 *failed_start, struct extent_state **cached_state,
773 gfp_t mask)
d1310b2e
CM
774{
775 struct extent_state *state;
776 struct extent_state *prealloc = NULL;
777 struct rb_node *node;
d1310b2e 778 int err = 0;
d1310b2e
CM
779 u64 last_start;
780 u64 last_end;
42daec29 781
8d599ae1
DS
782 btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
783
0ca1f7ce 784 bits |= EXTENT_FIRST_DELALLOC;
d1310b2e
CM
785again:
786 if (!prealloc && (mask & __GFP_WAIT)) {
787 prealloc = alloc_extent_state(mask);
8233767a 788 BUG_ON(!prealloc);
d1310b2e
CM
789 }
790
cad321ad 791 spin_lock(&tree->lock);
9655d298
CM
792 if (cached_state && *cached_state) {
793 state = *cached_state;
df98b6e2
JB
794 if (state->start <= start && state->end > start &&
795 state->tree) {
9655d298
CM
796 node = &state->rb_node;
797 goto hit_next;
798 }
799 }
d1310b2e
CM
800 /*
801 * this search will find all the extents that end after
802 * our range starts.
803 */
80ea96b1 804 node = tree_search(tree, start);
d1310b2e 805 if (!node) {
8233767a
XG
806 prealloc = alloc_extent_state_atomic(prealloc);
807 BUG_ON(!prealloc);
0ca1f7ce 808 err = insert_state(tree, prealloc, start, end, &bits);
c2d904e0
JM
809 if (err)
810 extent_io_tree_panic(tree, err);
811
d1310b2e 812 prealloc = NULL;
d1310b2e
CM
813 goto out;
814 }
d1310b2e 815 state = rb_entry(node, struct extent_state, rb_node);
40431d6c 816hit_next:
d1310b2e
CM
817 last_start = state->start;
818 last_end = state->end;
819
820 /*
821 * | ---- desired range ---- |
822 * | state |
823 *
824 * Just lock what we found and keep going
825 */
826 if (state->start == start && state->end <= end) {
1edbb734 827 if (state->state & exclusive_bits) {
d1310b2e
CM
828 *failed_start = state->start;
829 err = -EEXIST;
830 goto out;
831 }
42daec29 832
1bf85046 833 set_state_bits(tree, state, &bits);
2c64c53d 834 cache_state(state, cached_state);
d1310b2e 835 merge_state(tree, state);
5c939df5
YZ
836 if (last_end == (u64)-1)
837 goto out;
838 start = last_end + 1;
d1ac6e41
LB
839 state = next_state(state);
840 if (start < end && state && state->start == start &&
841 !need_resched())
842 goto hit_next;
d1310b2e
CM
843 goto search_again;
844 }
845
846 /*
847 * | ---- desired range ---- |
848 * | state |
849 * or
850 * | ------------- state -------------- |
851 *
852 * We need to split the extent we found, and may flip bits on
853 * second half.
854 *
855 * If the extent we found extends past our
856 * range, we just split and search again. It'll get split
857 * again the next time though.
858 *
859 * If the extent we found is inside our range, we set the
860 * desired bit on it.
861 */
862 if (state->start < start) {
1edbb734 863 if (state->state & exclusive_bits) {
d1310b2e
CM
864 *failed_start = start;
865 err = -EEXIST;
866 goto out;
867 }
8233767a
XG
868
869 prealloc = alloc_extent_state_atomic(prealloc);
870 BUG_ON(!prealloc);
d1310b2e 871 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
872 if (err)
873 extent_io_tree_panic(tree, err);
874
d1310b2e
CM
875 prealloc = NULL;
876 if (err)
877 goto out;
878 if (state->end <= end) {
1bf85046 879 set_state_bits(tree, state, &bits);
2c64c53d 880 cache_state(state, cached_state);
d1310b2e 881 merge_state(tree, state);
5c939df5
YZ
882 if (last_end == (u64)-1)
883 goto out;
884 start = last_end + 1;
d1ac6e41
LB
885 state = next_state(state);
886 if (start < end && state && state->start == start &&
887 !need_resched())
888 goto hit_next;
d1310b2e
CM
889 }
890 goto search_again;
891 }
892 /*
893 * | ---- desired range ---- |
894 * | state | or | state |
895 *
896 * There's a hole, we need to insert something in it and
897 * ignore the extent we found.
898 */
899 if (state->start > start) {
900 u64 this_end;
901 if (end < last_start)
902 this_end = end;
903 else
d397712b 904 this_end = last_start - 1;
8233767a
XG
905
906 prealloc = alloc_extent_state_atomic(prealloc);
907 BUG_ON(!prealloc);
c7f895a2
XG
908
909 /*
910 * Avoid to free 'prealloc' if it can be merged with
911 * the later extent.
912 */
d1310b2e 913 err = insert_state(tree, prealloc, start, this_end,
0ca1f7ce 914 &bits);
c2d904e0
JM
915 if (err)
916 extent_io_tree_panic(tree, err);
917
9ed74f2d
JB
918 cache_state(prealloc, cached_state);
919 prealloc = NULL;
d1310b2e
CM
920 start = this_end + 1;
921 goto search_again;
922 }
923 /*
924 * | ---- desired range ---- |
925 * | state |
926 * We need to split the extent, and set the bit
927 * on the first half
928 */
929 if (state->start <= end && state->end > end) {
1edbb734 930 if (state->state & exclusive_bits) {
d1310b2e
CM
931 *failed_start = start;
932 err = -EEXIST;
933 goto out;
934 }
8233767a
XG
935
936 prealloc = alloc_extent_state_atomic(prealloc);
937 BUG_ON(!prealloc);
d1310b2e 938 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
939 if (err)
940 extent_io_tree_panic(tree, err);
d1310b2e 941
1bf85046 942 set_state_bits(tree, prealloc, &bits);
2c64c53d 943 cache_state(prealloc, cached_state);
d1310b2e
CM
944 merge_state(tree, prealloc);
945 prealloc = NULL;
946 goto out;
947 }
948
949 goto search_again;
950
951out:
cad321ad 952 spin_unlock(&tree->lock);
d1310b2e
CM
953 if (prealloc)
954 free_extent_state(prealloc);
955
956 return err;
957
958search_again:
959 if (start > end)
960 goto out;
cad321ad 961 spin_unlock(&tree->lock);
d1310b2e
CM
962 if (mask & __GFP_WAIT)
963 cond_resched();
964 goto again;
965}
d1310b2e 966
41074888
DS
967int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
968 unsigned long bits, u64 * failed_start,
969 struct extent_state **cached_state, gfp_t mask)
3fbe5c02
JM
970{
971 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
972 cached_state, mask);
973}
974
975
462d6fac 976/**
10983f2e
LB
977 * convert_extent_bit - convert all bits in a given range from one bit to
978 * another
462d6fac
JB
979 * @tree: the io tree to search
980 * @start: the start offset in bytes
981 * @end: the end offset in bytes (inclusive)
982 * @bits: the bits to set in this range
983 * @clear_bits: the bits to clear in this range
e6138876 984 * @cached_state: state that we're going to cache
462d6fac
JB
985 * @mask: the allocation mask
986 *
987 * This will go through and set bits for the given range. If any states exist
988 * already in this range they are set with the given bit and cleared of the
989 * clear_bits. This is only meant to be used by things that are mergeable, ie
990 * converting from say DELALLOC to DIRTY. This is not meant to be used with
991 * boundary bits like LOCK.
992 */
993int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
41074888 994 unsigned long bits, unsigned long clear_bits,
e6138876 995 struct extent_state **cached_state, gfp_t mask)
462d6fac
JB
996{
997 struct extent_state *state;
998 struct extent_state *prealloc = NULL;
999 struct rb_node *node;
1000 int err = 0;
1001 u64 last_start;
1002 u64 last_end;
1003
8d599ae1
DS
1004 btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
1005
462d6fac
JB
1006again:
1007 if (!prealloc && (mask & __GFP_WAIT)) {
1008 prealloc = alloc_extent_state(mask);
1009 if (!prealloc)
1010 return -ENOMEM;
1011 }
1012
1013 spin_lock(&tree->lock);
e6138876
JB
1014 if (cached_state && *cached_state) {
1015 state = *cached_state;
1016 if (state->start <= start && state->end > start &&
1017 state->tree) {
1018 node = &state->rb_node;
1019 goto hit_next;
1020 }
1021 }
1022
462d6fac
JB
1023 /*
1024 * this search will find all the extents that end after
1025 * our range starts.
1026 */
1027 node = tree_search(tree, start);
1028 if (!node) {
1029 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1030 if (!prealloc) {
1031 err = -ENOMEM;
1032 goto out;
1033 }
462d6fac
JB
1034 err = insert_state(tree, prealloc, start, end, &bits);
1035 prealloc = NULL;
c2d904e0
JM
1036 if (err)
1037 extent_io_tree_panic(tree, err);
462d6fac
JB
1038 goto out;
1039 }
1040 state = rb_entry(node, struct extent_state, rb_node);
1041hit_next:
1042 last_start = state->start;
1043 last_end = state->end;
1044
1045 /*
1046 * | ---- desired range ---- |
1047 * | state |
1048 *
1049 * Just lock what we found and keep going
1050 */
1051 if (state->start == start && state->end <= end) {
462d6fac 1052 set_state_bits(tree, state, &bits);
e6138876 1053 cache_state(state, cached_state);
d1ac6e41 1054 state = clear_state_bit(tree, state, &clear_bits, 0);
462d6fac
JB
1055 if (last_end == (u64)-1)
1056 goto out;
462d6fac 1057 start = last_end + 1;
d1ac6e41
LB
1058 if (start < end && state && state->start == start &&
1059 !need_resched())
1060 goto hit_next;
462d6fac
JB
1061 goto search_again;
1062 }
1063
1064 /*
1065 * | ---- desired range ---- |
1066 * | state |
1067 * or
1068 * | ------------- state -------------- |
1069 *
1070 * We need to split the extent we found, and may flip bits on
1071 * second half.
1072 *
1073 * If the extent we found extends past our
1074 * range, we just split and search again. It'll get split
1075 * again the next time though.
1076 *
1077 * If the extent we found is inside our range, we set the
1078 * desired bit on it.
1079 */
1080 if (state->start < start) {
1081 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1082 if (!prealloc) {
1083 err = -ENOMEM;
1084 goto out;
1085 }
462d6fac 1086 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
1087 if (err)
1088 extent_io_tree_panic(tree, err);
462d6fac
JB
1089 prealloc = NULL;
1090 if (err)
1091 goto out;
1092 if (state->end <= end) {
1093 set_state_bits(tree, state, &bits);
e6138876 1094 cache_state(state, cached_state);
d1ac6e41 1095 state = clear_state_bit(tree, state, &clear_bits, 0);
462d6fac
JB
1096 if (last_end == (u64)-1)
1097 goto out;
1098 start = last_end + 1;
d1ac6e41
LB
1099 if (start < end && state && state->start == start &&
1100 !need_resched())
1101 goto hit_next;
462d6fac
JB
1102 }
1103 goto search_again;
1104 }
1105 /*
1106 * | ---- desired range ---- |
1107 * | state | or | state |
1108 *
1109 * There's a hole, we need to insert something in it and
1110 * ignore the extent we found.
1111 */
1112 if (state->start > start) {
1113 u64 this_end;
1114 if (end < last_start)
1115 this_end = end;
1116 else
1117 this_end = last_start - 1;
1118
1119 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1120 if (!prealloc) {
1121 err = -ENOMEM;
1122 goto out;
1123 }
462d6fac
JB
1124
1125 /*
1126 * Avoid to free 'prealloc' if it can be merged with
1127 * the later extent.
1128 */
1129 err = insert_state(tree, prealloc, start, this_end,
1130 &bits);
c2d904e0
JM
1131 if (err)
1132 extent_io_tree_panic(tree, err);
e6138876 1133 cache_state(prealloc, cached_state);
462d6fac
JB
1134 prealloc = NULL;
1135 start = this_end + 1;
1136 goto search_again;
1137 }
1138 /*
1139 * | ---- desired range ---- |
1140 * | state |
1141 * We need to split the extent, and set the bit
1142 * on the first half
1143 */
1144 if (state->start <= end && state->end > end) {
1145 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1146 if (!prealloc) {
1147 err = -ENOMEM;
1148 goto out;
1149 }
462d6fac
JB
1150
1151 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
1152 if (err)
1153 extent_io_tree_panic(tree, err);
462d6fac
JB
1154
1155 set_state_bits(tree, prealloc, &bits);
e6138876 1156 cache_state(prealloc, cached_state);
462d6fac 1157 clear_state_bit(tree, prealloc, &clear_bits, 0);
462d6fac
JB
1158 prealloc = NULL;
1159 goto out;
1160 }
1161
1162 goto search_again;
1163
1164out:
1165 spin_unlock(&tree->lock);
1166 if (prealloc)
1167 free_extent_state(prealloc);
1168
1169 return err;
1170
1171search_again:
1172 if (start > end)
1173 goto out;
1174 spin_unlock(&tree->lock);
1175 if (mask & __GFP_WAIT)
1176 cond_resched();
1177 goto again;
1178}
1179
d1310b2e
CM
1180/* wrappers around set/clear extent bit */
1181int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1182 gfp_t mask)
1183{
3fbe5c02 1184 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
2c64c53d 1185 NULL, mask);
d1310b2e 1186}
d1310b2e
CM
1187
1188int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
41074888 1189 unsigned long bits, gfp_t mask)
d1310b2e 1190{
3fbe5c02 1191 return set_extent_bit(tree, start, end, bits, NULL,
2c64c53d 1192 NULL, mask);
d1310b2e 1193}
d1310b2e
CM
1194
1195int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
41074888 1196 unsigned long bits, gfp_t mask)
d1310b2e 1197{
2c64c53d 1198 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
d1310b2e 1199}
d1310b2e
CM
1200
1201int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
2ac55d41 1202 struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
1203{
1204 return set_extent_bit(tree, start, end,
fee187d9 1205 EXTENT_DELALLOC | EXTENT_UPTODATE,
3fbe5c02 1206 NULL, cached_state, mask);
d1310b2e 1207}
d1310b2e 1208
9e8a4a8b
LB
1209int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1210 struct extent_state **cached_state, gfp_t mask)
1211{
1212 return set_extent_bit(tree, start, end,
1213 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1214 NULL, cached_state, mask);
1215}
1216
d1310b2e
CM
1217int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1218 gfp_t mask)
1219{
1220 return clear_extent_bit(tree, start, end,
32c00aff 1221 EXTENT_DIRTY | EXTENT_DELALLOC |
0ca1f7ce 1222 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
d1310b2e 1223}
d1310b2e
CM
1224
1225int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1226 gfp_t mask)
1227{
3fbe5c02 1228 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
2c64c53d 1229 NULL, mask);
d1310b2e 1230}
d1310b2e 1231
d1310b2e 1232int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
507903b8 1233 struct extent_state **cached_state, gfp_t mask)
d1310b2e 1234{
6b67a320 1235 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
3fbe5c02 1236 cached_state, mask);
d1310b2e 1237}
d1310b2e 1238
5fd02043
JB
1239int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1240 struct extent_state **cached_state, gfp_t mask)
d1310b2e 1241{
2c64c53d 1242 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
2ac55d41 1243 cached_state, mask);
d1310b2e 1244}
d1310b2e 1245
d352ac68
CM
1246/*
1247 * either insert or lock state struct between start and end use mask to tell
1248 * us if waiting is desired.
1249 */
1edbb734 1250int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
41074888 1251 unsigned long bits, struct extent_state **cached_state)
d1310b2e
CM
1252{
1253 int err;
1254 u64 failed_start;
1255 while (1) {
3fbe5c02
JM
1256 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1257 EXTENT_LOCKED, &failed_start,
1258 cached_state, GFP_NOFS);
d0082371 1259 if (err == -EEXIST) {
d1310b2e
CM
1260 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1261 start = failed_start;
d0082371 1262 } else
d1310b2e 1263 break;
d1310b2e
CM
1264 WARN_ON(start > end);
1265 }
1266 return err;
1267}
d1310b2e 1268
d0082371 1269int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1edbb734 1270{
d0082371 1271 return lock_extent_bits(tree, start, end, 0, NULL);
1edbb734
CM
1272}
1273
d0082371 1274int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
25179201
JB
1275{
1276 int err;
1277 u64 failed_start;
1278
3fbe5c02
JM
1279 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1280 &failed_start, NULL, GFP_NOFS);
6643558d
YZ
1281 if (err == -EEXIST) {
1282 if (failed_start > start)
1283 clear_extent_bit(tree, start, failed_start - 1,
d0082371 1284 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
25179201 1285 return 0;
6643558d 1286 }
25179201
JB
1287 return 1;
1288}
25179201 1289
2c64c53d
CM
1290int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1291 struct extent_state **cached, gfp_t mask)
1292{
1293 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1294 mask);
1295}
1296
d0082371 1297int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
d1310b2e 1298{
2c64c53d 1299 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
d0082371 1300 GFP_NOFS);
d1310b2e 1301}
d1310b2e 1302
4adaa611
CM
1303int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1304{
1305 unsigned long index = start >> PAGE_CACHE_SHIFT;
1306 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1307 struct page *page;
1308
1309 while (index <= end_index) {
1310 page = find_get_page(inode->i_mapping, index);
1311 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1312 clear_page_dirty_for_io(page);
1313 page_cache_release(page);
1314 index++;
1315 }
1316 return 0;
1317}
1318
1319int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1320{
1321 unsigned long index = start >> PAGE_CACHE_SHIFT;
1322 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1323 struct page *page;
1324
1325 while (index <= end_index) {
1326 page = find_get_page(inode->i_mapping, index);
1327 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1328 account_page_redirty(page);
1329 __set_page_dirty_nobuffers(page);
1330 page_cache_release(page);
1331 index++;
1332 }
1333 return 0;
1334}
1335
d1310b2e
CM
1336/*
1337 * helper function to set both pages and extents in the tree writeback
1338 */
b2950863 1339static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
d1310b2e
CM
1340{
1341 unsigned long index = start >> PAGE_CACHE_SHIFT;
1342 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1343 struct page *page;
1344
1345 while (index <= end_index) {
1346 page = find_get_page(tree->mapping, index);
79787eaa 1347 BUG_ON(!page); /* Pages should be in the extent_io_tree */
d1310b2e
CM
1348 set_page_writeback(page);
1349 page_cache_release(page);
1350 index++;
1351 }
d1310b2e
CM
1352 return 0;
1353}
d1310b2e 1354
d352ac68
CM
1355/* find the first state struct with 'bits' set after 'start', and
1356 * return it. tree->lock must be held. NULL will returned if
1357 * nothing was found after 'start'
1358 */
48a3b636
ES
1359static struct extent_state *
1360find_first_extent_bit_state(struct extent_io_tree *tree,
41074888 1361 u64 start, unsigned long bits)
d7fc640e
CM
1362{
1363 struct rb_node *node;
1364 struct extent_state *state;
1365
1366 /*
1367 * this search will find all the extents that end after
1368 * our range starts.
1369 */
1370 node = tree_search(tree, start);
d397712b 1371 if (!node)
d7fc640e 1372 goto out;
d7fc640e 1373
d397712b 1374 while (1) {
d7fc640e 1375 state = rb_entry(node, struct extent_state, rb_node);
d397712b 1376 if (state->end >= start && (state->state & bits))
d7fc640e 1377 return state;
d397712b 1378
d7fc640e
CM
1379 node = rb_next(node);
1380 if (!node)
1381 break;
1382 }
1383out:
1384 return NULL;
1385}
d7fc640e 1386
69261c4b
XG
1387/*
1388 * find the first offset in the io tree with 'bits' set. zero is
1389 * returned if we find something, and *start_ret and *end_ret are
1390 * set to reflect the state struct that was found.
1391 *
477d7eaf 1392 * If nothing was found, 1 is returned. If found something, return 0.
69261c4b
XG
1393 */
1394int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
41074888 1395 u64 *start_ret, u64 *end_ret, unsigned long bits,
e6138876 1396 struct extent_state **cached_state)
69261c4b
XG
1397{
1398 struct extent_state *state;
e6138876 1399 struct rb_node *n;
69261c4b
XG
1400 int ret = 1;
1401
1402 spin_lock(&tree->lock);
e6138876
JB
1403 if (cached_state && *cached_state) {
1404 state = *cached_state;
1405 if (state->end == start - 1 && state->tree) {
1406 n = rb_next(&state->rb_node);
1407 while (n) {
1408 state = rb_entry(n, struct extent_state,
1409 rb_node);
1410 if (state->state & bits)
1411 goto got_it;
1412 n = rb_next(n);
1413 }
1414 free_extent_state(*cached_state);
1415 *cached_state = NULL;
1416 goto out;
1417 }
1418 free_extent_state(*cached_state);
1419 *cached_state = NULL;
1420 }
1421
69261c4b 1422 state = find_first_extent_bit_state(tree, start, bits);
e6138876 1423got_it:
69261c4b 1424 if (state) {
e6138876 1425 cache_state(state, cached_state);
69261c4b
XG
1426 *start_ret = state->start;
1427 *end_ret = state->end;
1428 ret = 0;
1429 }
e6138876 1430out:
69261c4b
XG
1431 spin_unlock(&tree->lock);
1432 return ret;
1433}
1434
d352ac68
CM
1435/*
1436 * find a contiguous range of bytes in the file marked as delalloc, not
1437 * more than 'max_bytes'. start and end are used to return the range,
1438 *
1439 * 1 is returned if we find something, 0 if nothing was in the tree
1440 */
c8b97818 1441static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
c2a128d2
JB
1442 u64 *start, u64 *end, u64 max_bytes,
1443 struct extent_state **cached_state)
d1310b2e
CM
1444{
1445 struct rb_node *node;
1446 struct extent_state *state;
1447 u64 cur_start = *start;
1448 u64 found = 0;
1449 u64 total_bytes = 0;
1450
cad321ad 1451 spin_lock(&tree->lock);
c8b97818 1452
d1310b2e
CM
1453 /*
1454 * this search will find all the extents that end after
1455 * our range starts.
1456 */
80ea96b1 1457 node = tree_search(tree, cur_start);
2b114d1d 1458 if (!node) {
3b951516
CM
1459 if (!found)
1460 *end = (u64)-1;
d1310b2e
CM
1461 goto out;
1462 }
1463
d397712b 1464 while (1) {
d1310b2e 1465 state = rb_entry(node, struct extent_state, rb_node);
5b21f2ed
ZY
1466 if (found && (state->start != cur_start ||
1467 (state->state & EXTENT_BOUNDARY))) {
d1310b2e
CM
1468 goto out;
1469 }
1470 if (!(state->state & EXTENT_DELALLOC)) {
1471 if (!found)
1472 *end = state->end;
1473 goto out;
1474 }
c2a128d2 1475 if (!found) {
d1310b2e 1476 *start = state->start;
c2a128d2
JB
1477 *cached_state = state;
1478 atomic_inc(&state->refs);
1479 }
d1310b2e
CM
1480 found++;
1481 *end = state->end;
1482 cur_start = state->end + 1;
1483 node = rb_next(node);
1484 if (!node)
1485 break;
1486 total_bytes += state->end - state->start + 1;
1487 if (total_bytes >= max_bytes)
1488 break;
1489 }
1490out:
cad321ad 1491 spin_unlock(&tree->lock);
d1310b2e
CM
1492 return found;
1493}
1494
143bede5
JM
1495static noinline void __unlock_for_delalloc(struct inode *inode,
1496 struct page *locked_page,
1497 u64 start, u64 end)
c8b97818
CM
1498{
1499 int ret;
1500 struct page *pages[16];
1501 unsigned long index = start >> PAGE_CACHE_SHIFT;
1502 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1503 unsigned long nr_pages = end_index - index + 1;
1504 int i;
1505
1506 if (index == locked_page->index && end_index == index)
143bede5 1507 return;
c8b97818 1508
d397712b 1509 while (nr_pages > 0) {
c8b97818 1510 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1511 min_t(unsigned long, nr_pages,
1512 ARRAY_SIZE(pages)), pages);
c8b97818
CM
1513 for (i = 0; i < ret; i++) {
1514 if (pages[i] != locked_page)
1515 unlock_page(pages[i]);
1516 page_cache_release(pages[i]);
1517 }
1518 nr_pages -= ret;
1519 index += ret;
1520 cond_resched();
1521 }
c8b97818
CM
1522}
1523
1524static noinline int lock_delalloc_pages(struct inode *inode,
1525 struct page *locked_page,
1526 u64 delalloc_start,
1527 u64 delalloc_end)
1528{
1529 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1530 unsigned long start_index = index;
1531 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1532 unsigned long pages_locked = 0;
1533 struct page *pages[16];
1534 unsigned long nrpages;
1535 int ret;
1536 int i;
1537
1538 /* the caller is responsible for locking the start index */
1539 if (index == locked_page->index && index == end_index)
1540 return 0;
1541
1542 /* skip the page at the start index */
1543 nrpages = end_index - index + 1;
d397712b 1544 while (nrpages > 0) {
c8b97818 1545 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1546 min_t(unsigned long,
1547 nrpages, ARRAY_SIZE(pages)), pages);
c8b97818
CM
1548 if (ret == 0) {
1549 ret = -EAGAIN;
1550 goto done;
1551 }
1552 /* now we have an array of pages, lock them all */
1553 for (i = 0; i < ret; i++) {
1554 /*
1555 * the caller is taking responsibility for
1556 * locked_page
1557 */
771ed689 1558 if (pages[i] != locked_page) {
c8b97818 1559 lock_page(pages[i]);
f2b1c41c
CM
1560 if (!PageDirty(pages[i]) ||
1561 pages[i]->mapping != inode->i_mapping) {
771ed689
CM
1562 ret = -EAGAIN;
1563 unlock_page(pages[i]);
1564 page_cache_release(pages[i]);
1565 goto done;
1566 }
1567 }
c8b97818 1568 page_cache_release(pages[i]);
771ed689 1569 pages_locked++;
c8b97818 1570 }
c8b97818
CM
1571 nrpages -= ret;
1572 index += ret;
1573 cond_resched();
1574 }
1575 ret = 0;
1576done:
1577 if (ret && pages_locked) {
1578 __unlock_for_delalloc(inode, locked_page,
1579 delalloc_start,
1580 ((u64)(start_index + pages_locked - 1)) <<
1581 PAGE_CACHE_SHIFT);
1582 }
1583 return ret;
1584}
1585
1586/*
1587 * find a contiguous range of bytes in the file marked as delalloc, not
1588 * more than 'max_bytes'. start and end are used to return the range,
1589 *
1590 * 1 is returned if we find something, 0 if nothing was in the tree
1591 */
1592static noinline u64 find_lock_delalloc_range(struct inode *inode,
1593 struct extent_io_tree *tree,
1594 struct page *locked_page,
1595 u64 *start, u64 *end,
1596 u64 max_bytes)
1597{
1598 u64 delalloc_start;
1599 u64 delalloc_end;
1600 u64 found;
9655d298 1601 struct extent_state *cached_state = NULL;
c8b97818
CM
1602 int ret;
1603 int loops = 0;
1604
1605again:
1606 /* step one, find a bunch of delalloc bytes starting at start */
1607 delalloc_start = *start;
1608 delalloc_end = 0;
1609 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
c2a128d2 1610 max_bytes, &cached_state);
70b99e69 1611 if (!found || delalloc_end <= *start) {
c8b97818
CM
1612 *start = delalloc_start;
1613 *end = delalloc_end;
c2a128d2 1614 free_extent_state(cached_state);
c8b97818
CM
1615 return found;
1616 }
1617
70b99e69
CM
1618 /*
1619 * start comes from the offset of locked_page. We have to lock
1620 * pages in order, so we can't process delalloc bytes before
1621 * locked_page
1622 */
d397712b 1623 if (delalloc_start < *start)
70b99e69 1624 delalloc_start = *start;
70b99e69 1625
c8b97818
CM
1626 /*
1627 * make sure to limit the number of pages we try to lock down
1628 * if we're looping.
1629 */
d397712b 1630 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
771ed689 1631 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
d397712b 1632
c8b97818
CM
1633 /* step two, lock all the pages after the page that has start */
1634 ret = lock_delalloc_pages(inode, locked_page,
1635 delalloc_start, delalloc_end);
1636 if (ret == -EAGAIN) {
1637 /* some of the pages are gone, lets avoid looping by
1638 * shortening the size of the delalloc range we're searching
1639 */
9655d298 1640 free_extent_state(cached_state);
c8b97818
CM
1641 if (!loops) {
1642 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1643 max_bytes = PAGE_CACHE_SIZE - offset;
1644 loops = 1;
1645 goto again;
1646 } else {
1647 found = 0;
1648 goto out_failed;
1649 }
1650 }
79787eaa 1651 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
c8b97818
CM
1652
1653 /* step three, lock the state bits for the whole range */
d0082371 1654 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
c8b97818
CM
1655
1656 /* then test to make sure it is all still delalloc */
1657 ret = test_range_bit(tree, delalloc_start, delalloc_end,
9655d298 1658 EXTENT_DELALLOC, 1, cached_state);
c8b97818 1659 if (!ret) {
9655d298
CM
1660 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1661 &cached_state, GFP_NOFS);
c8b97818
CM
1662 __unlock_for_delalloc(inode, locked_page,
1663 delalloc_start, delalloc_end);
1664 cond_resched();
1665 goto again;
1666 }
9655d298 1667 free_extent_state(cached_state);
c8b97818
CM
1668 *start = delalloc_start;
1669 *end = delalloc_end;
1670out_failed:
1671 return found;
1672}
1673
c2790a2e
JB
1674int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1675 struct page *locked_page,
1676 unsigned long clear_bits,
1677 unsigned long page_ops)
c8b97818 1678{
c2790a2e 1679 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
c8b97818
CM
1680 int ret;
1681 struct page *pages[16];
1682 unsigned long index = start >> PAGE_CACHE_SHIFT;
1683 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1684 unsigned long nr_pages = end_index - index + 1;
1685 int i;
771ed689 1686
2c64c53d 1687 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
c2790a2e 1688 if (page_ops == 0)
771ed689 1689 return 0;
c8b97818 1690
d397712b 1691 while (nr_pages > 0) {
c8b97818 1692 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1693 min_t(unsigned long,
1694 nr_pages, ARRAY_SIZE(pages)), pages);
c8b97818 1695 for (i = 0; i < ret; i++) {
8b62b72b 1696
c2790a2e 1697 if (page_ops & PAGE_SET_PRIVATE2)
8b62b72b
CM
1698 SetPagePrivate2(pages[i]);
1699
c8b97818
CM
1700 if (pages[i] == locked_page) {
1701 page_cache_release(pages[i]);
1702 continue;
1703 }
c2790a2e 1704 if (page_ops & PAGE_CLEAR_DIRTY)
c8b97818 1705 clear_page_dirty_for_io(pages[i]);
c2790a2e 1706 if (page_ops & PAGE_SET_WRITEBACK)
c8b97818 1707 set_page_writeback(pages[i]);
c2790a2e 1708 if (page_ops & PAGE_END_WRITEBACK)
c8b97818 1709 end_page_writeback(pages[i]);
c2790a2e 1710 if (page_ops & PAGE_UNLOCK)
771ed689 1711 unlock_page(pages[i]);
c8b97818
CM
1712 page_cache_release(pages[i]);
1713 }
1714 nr_pages -= ret;
1715 index += ret;
1716 cond_resched();
1717 }
1718 return 0;
1719}
c8b97818 1720
d352ac68
CM
1721/*
1722 * count the number of bytes in the tree that have a given bit(s)
1723 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1724 * cached. The total number found is returned.
1725 */
d1310b2e
CM
1726u64 count_range_bits(struct extent_io_tree *tree,
1727 u64 *start, u64 search_end, u64 max_bytes,
ec29ed5b 1728 unsigned long bits, int contig)
d1310b2e
CM
1729{
1730 struct rb_node *node;
1731 struct extent_state *state;
1732 u64 cur_start = *start;
1733 u64 total_bytes = 0;
ec29ed5b 1734 u64 last = 0;
d1310b2e
CM
1735 int found = 0;
1736
1737 if (search_end <= cur_start) {
d1310b2e
CM
1738 WARN_ON(1);
1739 return 0;
1740 }
1741
cad321ad 1742 spin_lock(&tree->lock);
d1310b2e
CM
1743 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1744 total_bytes = tree->dirty_bytes;
1745 goto out;
1746 }
1747 /*
1748 * this search will find all the extents that end after
1749 * our range starts.
1750 */
80ea96b1 1751 node = tree_search(tree, cur_start);
d397712b 1752 if (!node)
d1310b2e 1753 goto out;
d1310b2e 1754
d397712b 1755 while (1) {
d1310b2e
CM
1756 state = rb_entry(node, struct extent_state, rb_node);
1757 if (state->start > search_end)
1758 break;
ec29ed5b
CM
1759 if (contig && found && state->start > last + 1)
1760 break;
1761 if (state->end >= cur_start && (state->state & bits) == bits) {
d1310b2e
CM
1762 total_bytes += min(search_end, state->end) + 1 -
1763 max(cur_start, state->start);
1764 if (total_bytes >= max_bytes)
1765 break;
1766 if (!found) {
af60bed2 1767 *start = max(cur_start, state->start);
d1310b2e
CM
1768 found = 1;
1769 }
ec29ed5b
CM
1770 last = state->end;
1771 } else if (contig && found) {
1772 break;
d1310b2e
CM
1773 }
1774 node = rb_next(node);
1775 if (!node)
1776 break;
1777 }
1778out:
cad321ad 1779 spin_unlock(&tree->lock);
d1310b2e
CM
1780 return total_bytes;
1781}
b2950863 1782
d352ac68
CM
1783/*
1784 * set the private field for a given byte offset in the tree. If there isn't
1785 * an extent_state there already, this does nothing.
1786 */
171170c1 1787static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
d1310b2e
CM
1788{
1789 struct rb_node *node;
1790 struct extent_state *state;
1791 int ret = 0;
1792
cad321ad 1793 spin_lock(&tree->lock);
d1310b2e
CM
1794 /*
1795 * this search will find all the extents that end after
1796 * our range starts.
1797 */
80ea96b1 1798 node = tree_search(tree, start);
2b114d1d 1799 if (!node) {
d1310b2e
CM
1800 ret = -ENOENT;
1801 goto out;
1802 }
1803 state = rb_entry(node, struct extent_state, rb_node);
1804 if (state->start != start) {
1805 ret = -ENOENT;
1806 goto out;
1807 }
1808 state->private = private;
1809out:
cad321ad 1810 spin_unlock(&tree->lock);
d1310b2e
CM
1811 return ret;
1812}
1813
1814int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1815{
1816 struct rb_node *node;
1817 struct extent_state *state;
1818 int ret = 0;
1819
cad321ad 1820 spin_lock(&tree->lock);
d1310b2e
CM
1821 /*
1822 * this search will find all the extents that end after
1823 * our range starts.
1824 */
80ea96b1 1825 node = tree_search(tree, start);
2b114d1d 1826 if (!node) {
d1310b2e
CM
1827 ret = -ENOENT;
1828 goto out;
1829 }
1830 state = rb_entry(node, struct extent_state, rb_node);
1831 if (state->start != start) {
1832 ret = -ENOENT;
1833 goto out;
1834 }
1835 *private = state->private;
1836out:
cad321ad 1837 spin_unlock(&tree->lock);
d1310b2e
CM
1838 return ret;
1839}
1840
1841/*
1842 * searches a range in the state tree for a given mask.
70dec807 1843 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1844 * has the bits set. Otherwise, 1 is returned if any bit in the
1845 * range is found set.
1846 */
1847int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
41074888 1848 unsigned long bits, int filled, struct extent_state *cached)
d1310b2e
CM
1849{
1850 struct extent_state *state = NULL;
1851 struct rb_node *node;
1852 int bitset = 0;
d1310b2e 1853
cad321ad 1854 spin_lock(&tree->lock);
df98b6e2
JB
1855 if (cached && cached->tree && cached->start <= start &&
1856 cached->end > start)
9655d298
CM
1857 node = &cached->rb_node;
1858 else
1859 node = tree_search(tree, start);
d1310b2e
CM
1860 while (node && start <= end) {
1861 state = rb_entry(node, struct extent_state, rb_node);
1862
1863 if (filled && state->start > start) {
1864 bitset = 0;
1865 break;
1866 }
1867
1868 if (state->start > end)
1869 break;
1870
1871 if (state->state & bits) {
1872 bitset = 1;
1873 if (!filled)
1874 break;
1875 } else if (filled) {
1876 bitset = 0;
1877 break;
1878 }
46562cec
CM
1879
1880 if (state->end == (u64)-1)
1881 break;
1882
d1310b2e
CM
1883 start = state->end + 1;
1884 if (start > end)
1885 break;
1886 node = rb_next(node);
1887 if (!node) {
1888 if (filled)
1889 bitset = 0;
1890 break;
1891 }
1892 }
cad321ad 1893 spin_unlock(&tree->lock);
d1310b2e
CM
1894 return bitset;
1895}
d1310b2e
CM
1896
1897/*
1898 * helper function to set a given page up to date if all the
1899 * extents in the tree for that page are up to date
1900 */
143bede5 1901static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
d1310b2e 1902{
4eee4fa4 1903 u64 start = page_offset(page);
d1310b2e 1904 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1905 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
d1310b2e 1906 SetPageUptodate(page);
d1310b2e
CM
1907}
1908
4a54c8c1
JS
1909/*
1910 * When IO fails, either with EIO or csum verification fails, we
1911 * try other mirrors that might have a good copy of the data. This
1912 * io_failure_record is used to record state as we go through all the
1913 * mirrors. If another mirror has good data, the page is set up to date
1914 * and things continue. If a good mirror can't be found, the original
1915 * bio end_io callback is called to indicate things have failed.
1916 */
1917struct io_failure_record {
1918 struct page *page;
1919 u64 start;
1920 u64 len;
1921 u64 logical;
1922 unsigned long bio_flags;
1923 int this_mirror;
1924 int failed_mirror;
1925 int in_validation;
1926};
1927
1928static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1929 int did_repair)
1930{
1931 int ret;
1932 int err = 0;
1933 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1934
1935 set_state_private(failure_tree, rec->start, 0);
1936 ret = clear_extent_bits(failure_tree, rec->start,
1937 rec->start + rec->len - 1,
1938 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1939 if (ret)
1940 err = ret;
1941
53b381b3
DW
1942 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1943 rec->start + rec->len - 1,
1944 EXTENT_DAMAGED, GFP_NOFS);
1945 if (ret && !err)
1946 err = ret;
4a54c8c1
JS
1947
1948 kfree(rec);
1949 return err;
1950}
1951
1952static void repair_io_failure_callback(struct bio *bio, int err)
1953{
1954 complete(bio->bi_private);
1955}
1956
1957/*
1958 * this bypasses the standard btrfs submit functions deliberately, as
1959 * the standard behavior is to write all copies in a raid setup. here we only
1960 * want to write the one bad copy. so we do the mapping for ourselves and issue
1961 * submit_bio directly.
3ec706c8 1962 * to avoid any synchronization issues, wait for the data after writing, which
4a54c8c1
JS
1963 * actually prevents the read that triggered the error from finishing.
1964 * currently, there can be no more than two copies of every data bit. thus,
1965 * exactly one rewrite is required.
1966 */
3ec706c8 1967int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
4a54c8c1
JS
1968 u64 length, u64 logical, struct page *page,
1969 int mirror_num)
1970{
1971 struct bio *bio;
1972 struct btrfs_device *dev;
1973 DECLARE_COMPLETION_ONSTACK(compl);
1974 u64 map_length = 0;
1975 u64 sector;
1976 struct btrfs_bio *bbio = NULL;
53b381b3 1977 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4a54c8c1
JS
1978 int ret;
1979
1980 BUG_ON(!mirror_num);
1981
53b381b3
DW
1982 /* we can't repair anything in raid56 yet */
1983 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
1984 return 0;
1985
9be3395b 1986 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4a54c8c1
JS
1987 if (!bio)
1988 return -EIO;
1989 bio->bi_private = &compl;
1990 bio->bi_end_io = repair_io_failure_callback;
1991 bio->bi_size = 0;
1992 map_length = length;
1993
3ec706c8 1994 ret = btrfs_map_block(fs_info, WRITE, logical,
4a54c8c1
JS
1995 &map_length, &bbio, mirror_num);
1996 if (ret) {
1997 bio_put(bio);
1998 return -EIO;
1999 }
2000 BUG_ON(mirror_num != bbio->mirror_num);
2001 sector = bbio->stripes[mirror_num-1].physical >> 9;
2002 bio->bi_sector = sector;
2003 dev = bbio->stripes[mirror_num-1].dev;
2004 kfree(bbio);
2005 if (!dev || !dev->bdev || !dev->writeable) {
2006 bio_put(bio);
2007 return -EIO;
2008 }
2009 bio->bi_bdev = dev->bdev;
4eee4fa4 2010 bio_add_page(bio, page, length, start - page_offset(page));
21adbd5c 2011 btrfsic_submit_bio(WRITE_SYNC, bio);
4a54c8c1
JS
2012 wait_for_completion(&compl);
2013
2014 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2015 /* try to remap that extent elsewhere? */
2016 bio_put(bio);
442a4f63 2017 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4a54c8c1
JS
2018 return -EIO;
2019 }
2020
d5b025d5 2021 printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
606686ee
JB
2022 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
2023 start, rcu_str_deref(dev->name), sector);
4a54c8c1
JS
2024
2025 bio_put(bio);
2026 return 0;
2027}
2028
ea466794
JB
2029int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2030 int mirror_num)
2031{
ea466794
JB
2032 u64 start = eb->start;
2033 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
d95603b2 2034 int ret = 0;
ea466794
JB
2035
2036 for (i = 0; i < num_pages; i++) {
2037 struct page *p = extent_buffer_page(eb, i);
3ec706c8 2038 ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
ea466794
JB
2039 start, p, mirror_num);
2040 if (ret)
2041 break;
2042 start += PAGE_CACHE_SIZE;
2043 }
2044
2045 return ret;
2046}
2047
4a54c8c1
JS
2048/*
2049 * each time an IO finishes, we do a fast check in the IO failure tree
2050 * to see if we need to process or clean up an io_failure_record
2051 */
2052static int clean_io_failure(u64 start, struct page *page)
2053{
2054 u64 private;
2055 u64 private_failure;
2056 struct io_failure_record *failrec;
3ec706c8 2057 struct btrfs_fs_info *fs_info;
4a54c8c1
JS
2058 struct extent_state *state;
2059 int num_copies;
2060 int did_repair = 0;
2061 int ret;
2062 struct inode *inode = page->mapping->host;
2063
2064 private = 0;
2065 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2066 (u64)-1, 1, EXTENT_DIRTY, 0);
2067 if (!ret)
2068 return 0;
2069
2070 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2071 &private_failure);
2072 if (ret)
2073 return 0;
2074
2075 failrec = (struct io_failure_record *)(unsigned long) private_failure;
2076 BUG_ON(!failrec->this_mirror);
2077
2078 if (failrec->in_validation) {
2079 /* there was no real error, just free the record */
2080 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2081 failrec->start);
2082 did_repair = 1;
2083 goto out;
2084 }
2085
2086 spin_lock(&BTRFS_I(inode)->io_tree.lock);
2087 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2088 failrec->start,
2089 EXTENT_LOCKED);
2090 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2091
883d0de4
MX
2092 if (state && state->start <= failrec->start &&
2093 state->end >= failrec->start + failrec->len - 1) {
3ec706c8
SB
2094 fs_info = BTRFS_I(inode)->root->fs_info;
2095 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2096 failrec->len);
4a54c8c1 2097 if (num_copies > 1) {
3ec706c8 2098 ret = repair_io_failure(fs_info, start, failrec->len,
4a54c8c1
JS
2099 failrec->logical, page,
2100 failrec->failed_mirror);
2101 did_repair = !ret;
2102 }
53b381b3 2103 ret = 0;
4a54c8c1
JS
2104 }
2105
2106out:
2107 if (!ret)
2108 ret = free_io_failure(inode, failrec, did_repair);
2109
2110 return ret;
2111}
2112
2113/*
2114 * this is a generic handler for readpage errors (default
2115 * readpage_io_failed_hook). if other copies exist, read those and write back
2116 * good data to the failed position. does not investigate in remapping the
2117 * failed extent elsewhere, hoping the device will be smart enough to do this as
2118 * needed
2119 */
2120
facc8a22
MX
2121static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2122 struct page *page, u64 start, u64 end,
2123 int failed_mirror)
4a54c8c1
JS
2124{
2125 struct io_failure_record *failrec = NULL;
2126 u64 private;
2127 struct extent_map *em;
2128 struct inode *inode = page->mapping->host;
2129 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2130 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2131 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2132 struct bio *bio;
facc8a22
MX
2133 struct btrfs_io_bio *btrfs_failed_bio;
2134 struct btrfs_io_bio *btrfs_bio;
4a54c8c1
JS
2135 int num_copies;
2136 int ret;
2137 int read_mode;
2138 u64 logical;
2139
2140 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2141
2142 ret = get_state_private(failure_tree, start, &private);
2143 if (ret) {
2144 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2145 if (!failrec)
2146 return -ENOMEM;
2147 failrec->start = start;
2148 failrec->len = end - start + 1;
2149 failrec->this_mirror = 0;
2150 failrec->bio_flags = 0;
2151 failrec->in_validation = 0;
2152
2153 read_lock(&em_tree->lock);
2154 em = lookup_extent_mapping(em_tree, start, failrec->len);
2155 if (!em) {
2156 read_unlock(&em_tree->lock);
2157 kfree(failrec);
2158 return -EIO;
2159 }
2160
2161 if (em->start > start || em->start + em->len < start) {
2162 free_extent_map(em);
2163 em = NULL;
2164 }
2165 read_unlock(&em_tree->lock);
2166
7a2d6a64 2167 if (!em) {
4a54c8c1
JS
2168 kfree(failrec);
2169 return -EIO;
2170 }
2171 logical = start - em->start;
2172 logical = em->block_start + logical;
2173 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2174 logical = em->block_start;
2175 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2176 extent_set_compress_type(&failrec->bio_flags,
2177 em->compress_type);
2178 }
2179 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2180 "len=%llu\n", logical, start, failrec->len);
2181 failrec->logical = logical;
2182 free_extent_map(em);
2183
2184 /* set the bits in the private failure tree */
2185 ret = set_extent_bits(failure_tree, start, end,
2186 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2187 if (ret >= 0)
2188 ret = set_state_private(failure_tree, start,
2189 (u64)(unsigned long)failrec);
2190 /* set the bits in the inode's tree */
2191 if (ret >= 0)
2192 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2193 GFP_NOFS);
2194 if (ret < 0) {
2195 kfree(failrec);
2196 return ret;
2197 }
2198 } else {
2199 failrec = (struct io_failure_record *)(unsigned long)private;
2200 pr_debug("bio_readpage_error: (found) logical=%llu, "
2201 "start=%llu, len=%llu, validation=%d\n",
2202 failrec->logical, failrec->start, failrec->len,
2203 failrec->in_validation);
2204 /*
2205 * when data can be on disk more than twice, add to failrec here
2206 * (e.g. with a list for failed_mirror) to make
2207 * clean_io_failure() clean all those errors at once.
2208 */
2209 }
5d964051
SB
2210 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2211 failrec->logical, failrec->len);
4a54c8c1
JS
2212 if (num_copies == 1) {
2213 /*
2214 * we only have a single copy of the data, so don't bother with
2215 * all the retry and error correction code that follows. no
2216 * matter what the error is, it is very likely to persist.
2217 */
09a7f7a2
MX
2218 pr_debug("bio_readpage_error: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2219 num_copies, failrec->this_mirror, failed_mirror);
4a54c8c1
JS
2220 free_io_failure(inode, failrec, 0);
2221 return -EIO;
2222 }
2223
4a54c8c1
JS
2224 /*
2225 * there are two premises:
2226 * a) deliver good data to the caller
2227 * b) correct the bad sectors on disk
2228 */
2229 if (failed_bio->bi_vcnt > 1) {
2230 /*
2231 * to fulfill b), we need to know the exact failing sectors, as
2232 * we don't want to rewrite any more than the failed ones. thus,
2233 * we need separate read requests for the failed bio
2234 *
2235 * if the following BUG_ON triggers, our validation request got
2236 * merged. we need separate requests for our algorithm to work.
2237 */
2238 BUG_ON(failrec->in_validation);
2239 failrec->in_validation = 1;
2240 failrec->this_mirror = failed_mirror;
2241 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2242 } else {
2243 /*
2244 * we're ready to fulfill a) and b) alongside. get a good copy
2245 * of the failed sector and if we succeed, we have setup
2246 * everything for repair_io_failure to do the rest for us.
2247 */
2248 if (failrec->in_validation) {
2249 BUG_ON(failrec->this_mirror != failed_mirror);
2250 failrec->in_validation = 0;
2251 failrec->this_mirror = 0;
2252 }
2253 failrec->failed_mirror = failed_mirror;
2254 failrec->this_mirror++;
2255 if (failrec->this_mirror == failed_mirror)
2256 failrec->this_mirror++;
2257 read_mode = READ_SYNC;
2258 }
2259
facc8a22
MX
2260 if (failrec->this_mirror > num_copies) {
2261 pr_debug("bio_readpage_error: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
4a54c8c1
JS
2262 num_copies, failrec->this_mirror, failed_mirror);
2263 free_io_failure(inode, failrec, 0);
2264 return -EIO;
2265 }
2266
9be3395b 2267 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
e627ee7b
TI
2268 if (!bio) {
2269 free_io_failure(inode, failrec, 0);
2270 return -EIO;
2271 }
4a54c8c1
JS
2272 bio->bi_end_io = failed_bio->bi_end_io;
2273 bio->bi_sector = failrec->logical >> 9;
2274 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2275 bio->bi_size = 0;
2276
facc8a22
MX
2277 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2278 if (btrfs_failed_bio->csum) {
2279 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2280 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2281
2282 btrfs_bio = btrfs_io_bio(bio);
2283 btrfs_bio->csum = btrfs_bio->csum_inline;
2284 phy_offset >>= inode->i_sb->s_blocksize_bits;
2285 phy_offset *= csum_size;
2286 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + phy_offset,
2287 csum_size);
2288 }
2289
4a54c8c1
JS
2290 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2291
2292 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2293 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2294 failrec->this_mirror, num_copies, failrec->in_validation);
2295
013bd4c3
TI
2296 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2297 failrec->this_mirror,
2298 failrec->bio_flags, 0);
2299 return ret;
4a54c8c1
JS
2300}
2301
d1310b2e
CM
2302/* lots and lots of room for performance fixes in the end_bio funcs */
2303
87826df0
JM
2304int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2305{
2306 int uptodate = (err == 0);
2307 struct extent_io_tree *tree;
2308 int ret;
2309
2310 tree = &BTRFS_I(page->mapping->host)->io_tree;
2311
2312 if (tree->ops && tree->ops->writepage_end_io_hook) {
2313 ret = tree->ops->writepage_end_io_hook(page, start,
2314 end, NULL, uptodate);
2315 if (ret)
2316 uptodate = 0;
2317 }
2318
87826df0 2319 if (!uptodate) {
87826df0
JM
2320 ClearPageUptodate(page);
2321 SetPageError(page);
2322 }
2323 return 0;
2324}
2325
d1310b2e
CM
2326/*
2327 * after a writepage IO is done, we need to:
2328 * clear the uptodate bits on error
2329 * clear the writeback bits in the extent tree for this IO
2330 * end_page_writeback if the page has no more pending IO
2331 *
2332 * Scheduling is not allowed, so the extent state tree is expected
2333 * to have one and only one object corresponding to this IO.
2334 */
d1310b2e 2335static void end_bio_extent_writepage(struct bio *bio, int err)
d1310b2e 2336{
d1310b2e 2337 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 2338 struct extent_io_tree *tree;
d1310b2e
CM
2339 u64 start;
2340 u64 end;
d1310b2e 2341
d1310b2e
CM
2342 do {
2343 struct page *page = bvec->bv_page;
902b22f3
DW
2344 tree = &BTRFS_I(page->mapping->host)->io_tree;
2345
17a5adcc
AO
2346 /* We always issue full-page reads, but if some block
2347 * in a page fails to read, blk_update_request() will
2348 * advance bv_offset and adjust bv_len to compensate.
2349 * Print a warning for nonzero offsets, and an error
2350 * if they don't add up to a full page. */
2351 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
2352 printk("%s page write in btrfs with offset %u and length %u\n",
2353 bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
2354 ? KERN_ERR "partial" : KERN_INFO "incomplete",
2355 bvec->bv_offset, bvec->bv_len);
d1310b2e 2356
17a5adcc
AO
2357 start = page_offset(page);
2358 end = start + bvec->bv_offset + bvec->bv_len - 1;
d1310b2e
CM
2359
2360 if (--bvec >= bio->bi_io_vec)
2361 prefetchw(&bvec->bv_page->flags);
1259ab75 2362
87826df0
JM
2363 if (end_extent_writepage(page, err, start, end))
2364 continue;
70dec807 2365
17a5adcc 2366 end_page_writeback(page);
d1310b2e 2367 } while (bvec >= bio->bi_io_vec);
2b1f55b0 2368
d1310b2e 2369 bio_put(bio);
d1310b2e
CM
2370}
2371
883d0de4
MX
2372static void
2373endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2374 int uptodate)
2375{
2376 struct extent_state *cached = NULL;
2377 u64 end = start + len - 1;
2378
2379 if (uptodate && tree->track_uptodate)
2380 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2381 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2382}
2383
d1310b2e
CM
2384/*
2385 * after a readpage IO is done, we need to:
2386 * clear the uptodate bits on error
2387 * set the uptodate bits if things worked
2388 * set the page up to date if all extents in the tree are uptodate
2389 * clear the lock bit in the extent tree
2390 * unlock the page if there are no other extents locked for it
2391 *
2392 * Scheduling is not allowed, so the extent state tree is expected
2393 * to have one and only one object corresponding to this IO.
2394 */
d1310b2e 2395static void end_bio_extent_readpage(struct bio *bio, int err)
d1310b2e
CM
2396{
2397 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4125bf76
CM
2398 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2399 struct bio_vec *bvec = bio->bi_io_vec;
facc8a22 2400 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
902b22f3 2401 struct extent_io_tree *tree;
facc8a22 2402 u64 offset = 0;
d1310b2e
CM
2403 u64 start;
2404 u64 end;
facc8a22 2405 u64 len;
883d0de4
MX
2406 u64 extent_start = 0;
2407 u64 extent_len = 0;
5cf1ab56 2408 int mirror;
d1310b2e
CM
2409 int ret;
2410
d20f7043
CM
2411 if (err)
2412 uptodate = 0;
2413
d1310b2e
CM
2414 do {
2415 struct page *page = bvec->bv_page;
a71754fc 2416 struct inode *inode = page->mapping->host;
507903b8 2417
be3940c0 2418 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
9be3395b
CM
2419 "mirror=%lu\n", (u64)bio->bi_sector, err,
2420 io_bio->mirror_num);
a71754fc 2421 tree = &BTRFS_I(inode)->io_tree;
902b22f3 2422
17a5adcc
AO
2423 /* We always issue full-page reads, but if some block
2424 * in a page fails to read, blk_update_request() will
2425 * advance bv_offset and adjust bv_len to compensate.
2426 * Print a warning for nonzero offsets, and an error
2427 * if they don't add up to a full page. */
2428 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
2429 printk("%s page read in btrfs with offset %u and length %u\n",
2430 bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
2431 ? KERN_ERR "partial" : KERN_INFO "incomplete",
2432 bvec->bv_offset, bvec->bv_len);
d1310b2e 2433
17a5adcc
AO
2434 start = page_offset(page);
2435 end = start + bvec->bv_offset + bvec->bv_len - 1;
facc8a22 2436 len = bvec->bv_len;
d1310b2e 2437
4125bf76 2438 if (++bvec <= bvec_end)
d1310b2e
CM
2439 prefetchw(&bvec->bv_page->flags);
2440
9be3395b 2441 mirror = io_bio->mirror_num;
f2a09da9
MX
2442 if (likely(uptodate && tree->ops &&
2443 tree->ops->readpage_end_io_hook)) {
facc8a22
MX
2444 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2445 page, start, end,
2446 mirror);
5ee0844d 2447 if (ret)
d1310b2e 2448 uptodate = 0;
5ee0844d 2449 else
4a54c8c1 2450 clean_io_failure(start, page);
d1310b2e 2451 }
ea466794 2452
f2a09da9
MX
2453 if (likely(uptodate))
2454 goto readpage_ok;
2455
2456 if (tree->ops && tree->ops->readpage_io_failed_hook) {
5cf1ab56 2457 ret = tree->ops->readpage_io_failed_hook(page, mirror);
ea466794
JB
2458 if (!ret && !err &&
2459 test_bit(BIO_UPTODATE, &bio->bi_flags))
2460 uptodate = 1;
f2a09da9 2461 } else {
f4a8e656
JS
2462 /*
2463 * The generic bio_readpage_error handles errors the
2464 * following way: If possible, new read requests are
2465 * created and submitted and will end up in
2466 * end_bio_extent_readpage as well (if we're lucky, not
2467 * in the !uptodate case). In that case it returns 0 and
2468 * we just go on with the next page in our bio. If it
2469 * can't handle the error it will return -EIO and we
2470 * remain responsible for that page.
2471 */
facc8a22
MX
2472 ret = bio_readpage_error(bio, offset, page, start, end,
2473 mirror);
7e38326f 2474 if (ret == 0) {
3b951516
CM
2475 uptodate =
2476 test_bit(BIO_UPTODATE, &bio->bi_flags);
d20f7043
CM
2477 if (err)
2478 uptodate = 0;
7e38326f
CM
2479 continue;
2480 }
2481 }
f2a09da9 2482readpage_ok:
883d0de4 2483 if (likely(uptodate)) {
a71754fc
JB
2484 loff_t i_size = i_size_read(inode);
2485 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2486 unsigned offset;
2487
2488 /* Zero out the end if this page straddles i_size */
2489 offset = i_size & (PAGE_CACHE_SIZE-1);
2490 if (page->index == end_index && offset)
2491 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
17a5adcc 2492 SetPageUptodate(page);
70dec807 2493 } else {
17a5adcc
AO
2494 ClearPageUptodate(page);
2495 SetPageError(page);
70dec807 2496 }
17a5adcc 2497 unlock_page(page);
facc8a22 2498 offset += len;
883d0de4
MX
2499
2500 if (unlikely(!uptodate)) {
2501 if (extent_len) {
2502 endio_readpage_release_extent(tree,
2503 extent_start,
2504 extent_len, 1);
2505 extent_start = 0;
2506 extent_len = 0;
2507 }
2508 endio_readpage_release_extent(tree, start,
2509 end - start + 1, 0);
2510 } else if (!extent_len) {
2511 extent_start = start;
2512 extent_len = end + 1 - start;
2513 } else if (extent_start + extent_len == start) {
2514 extent_len += end + 1 - start;
2515 } else {
2516 endio_readpage_release_extent(tree, extent_start,
2517 extent_len, uptodate);
2518 extent_start = start;
2519 extent_len = end + 1 - start;
2520 }
4125bf76 2521 } while (bvec <= bvec_end);
d1310b2e 2522
883d0de4
MX
2523 if (extent_len)
2524 endio_readpage_release_extent(tree, extent_start, extent_len,
2525 uptodate);
facc8a22
MX
2526 if (io_bio->end_io)
2527 io_bio->end_io(io_bio, err);
d1310b2e 2528 bio_put(bio);
d1310b2e
CM
2529}
2530
9be3395b
CM
2531/*
2532 * this allocates from the btrfs_bioset. We're returning a bio right now
2533 * but you can call btrfs_io_bio for the appropriate container_of magic
2534 */
88f794ed
MX
2535struct bio *
2536btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2537 gfp_t gfp_flags)
d1310b2e 2538{
facc8a22 2539 struct btrfs_io_bio *btrfs_bio;
d1310b2e
CM
2540 struct bio *bio;
2541
9be3395b 2542 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
d1310b2e
CM
2543
2544 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
9be3395b
CM
2545 while (!bio && (nr_vecs /= 2)) {
2546 bio = bio_alloc_bioset(gfp_flags,
2547 nr_vecs, btrfs_bioset);
2548 }
d1310b2e
CM
2549 }
2550
2551 if (bio) {
e1c4b745 2552 bio->bi_size = 0;
d1310b2e
CM
2553 bio->bi_bdev = bdev;
2554 bio->bi_sector = first_sector;
facc8a22
MX
2555 btrfs_bio = btrfs_io_bio(bio);
2556 btrfs_bio->csum = NULL;
2557 btrfs_bio->csum_allocated = NULL;
2558 btrfs_bio->end_io = NULL;
d1310b2e
CM
2559 }
2560 return bio;
2561}
2562
9be3395b
CM
2563struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2564{
2565 return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2566}
2567
2568
2569/* this also allocates from the btrfs_bioset */
2570struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2571{
facc8a22
MX
2572 struct btrfs_io_bio *btrfs_bio;
2573 struct bio *bio;
2574
2575 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2576 if (bio) {
2577 btrfs_bio = btrfs_io_bio(bio);
2578 btrfs_bio->csum = NULL;
2579 btrfs_bio->csum_allocated = NULL;
2580 btrfs_bio->end_io = NULL;
2581 }
2582 return bio;
9be3395b
CM
2583}
2584
2585
355808c2
JM
2586static int __must_check submit_one_bio(int rw, struct bio *bio,
2587 int mirror_num, unsigned long bio_flags)
d1310b2e 2588{
d1310b2e 2589 int ret = 0;
70dec807
CM
2590 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2591 struct page *page = bvec->bv_page;
2592 struct extent_io_tree *tree = bio->bi_private;
70dec807 2593 u64 start;
70dec807 2594
4eee4fa4 2595 start = page_offset(page) + bvec->bv_offset;
70dec807 2596
902b22f3 2597 bio->bi_private = NULL;
d1310b2e
CM
2598
2599 bio_get(bio);
2600
065631f6 2601 if (tree->ops && tree->ops->submit_bio_hook)
6b82ce8d 2602 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
eaf25d93 2603 mirror_num, bio_flags, start);
0b86a832 2604 else
21adbd5c 2605 btrfsic_submit_bio(rw, bio);
4a54c8c1 2606
d1310b2e
CM
2607 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2608 ret = -EOPNOTSUPP;
2609 bio_put(bio);
2610 return ret;
2611}
2612
64a16701 2613static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
3444a972
JM
2614 unsigned long offset, size_t size, struct bio *bio,
2615 unsigned long bio_flags)
2616{
2617 int ret = 0;
2618 if (tree->ops && tree->ops->merge_bio_hook)
64a16701 2619 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
3444a972
JM
2620 bio_flags);
2621 BUG_ON(ret < 0);
2622 return ret;
2623
2624}
2625
d1310b2e
CM
2626static int submit_extent_page(int rw, struct extent_io_tree *tree,
2627 struct page *page, sector_t sector,
2628 size_t size, unsigned long offset,
2629 struct block_device *bdev,
2630 struct bio **bio_ret,
2631 unsigned long max_pages,
f188591e 2632 bio_end_io_t end_io_func,
c8b97818
CM
2633 int mirror_num,
2634 unsigned long prev_bio_flags,
2635 unsigned long bio_flags)
d1310b2e
CM
2636{
2637 int ret = 0;
2638 struct bio *bio;
2639 int nr;
c8b97818
CM
2640 int contig = 0;
2641 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2642 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
5b050f04 2643 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
d1310b2e
CM
2644
2645 if (bio_ret && *bio_ret) {
2646 bio = *bio_ret;
c8b97818
CM
2647 if (old_compressed)
2648 contig = bio->bi_sector == sector;
2649 else
f73a1c7d 2650 contig = bio_end_sector(bio) == sector;
c8b97818
CM
2651
2652 if (prev_bio_flags != bio_flags || !contig ||
64a16701 2653 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
c8b97818
CM
2654 bio_add_page(bio, page, page_size, offset) < page_size) {
2655 ret = submit_one_bio(rw, bio, mirror_num,
2656 prev_bio_flags);
79787eaa
JM
2657 if (ret < 0)
2658 return ret;
d1310b2e
CM
2659 bio = NULL;
2660 } else {
2661 return 0;
2662 }
2663 }
c8b97818
CM
2664 if (this_compressed)
2665 nr = BIO_MAX_PAGES;
2666 else
2667 nr = bio_get_nr_vecs(bdev);
2668
88f794ed 2669 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
5df67083
TI
2670 if (!bio)
2671 return -ENOMEM;
70dec807 2672
c8b97818 2673 bio_add_page(bio, page, page_size, offset);
d1310b2e
CM
2674 bio->bi_end_io = end_io_func;
2675 bio->bi_private = tree;
70dec807 2676
d397712b 2677 if (bio_ret)
d1310b2e 2678 *bio_ret = bio;
d397712b 2679 else
c8b97818 2680 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
d1310b2e
CM
2681
2682 return ret;
2683}
2684
48a3b636
ES
2685static void attach_extent_buffer_page(struct extent_buffer *eb,
2686 struct page *page)
d1310b2e
CM
2687{
2688 if (!PagePrivate(page)) {
2689 SetPagePrivate(page);
d1310b2e 2690 page_cache_get(page);
4f2de97a
JB
2691 set_page_private(page, (unsigned long)eb);
2692 } else {
2693 WARN_ON(page->private != (unsigned long)eb);
d1310b2e
CM
2694 }
2695}
2696
4f2de97a 2697void set_page_extent_mapped(struct page *page)
d1310b2e 2698{
4f2de97a
JB
2699 if (!PagePrivate(page)) {
2700 SetPagePrivate(page);
2701 page_cache_get(page);
2702 set_page_private(page, EXTENT_PAGE_PRIVATE);
2703 }
d1310b2e
CM
2704}
2705
125bac01
MX
2706static struct extent_map *
2707__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2708 u64 start, u64 len, get_extent_t *get_extent,
2709 struct extent_map **em_cached)
2710{
2711 struct extent_map *em;
2712
2713 if (em_cached && *em_cached) {
2714 em = *em_cached;
2715 if (em->in_tree && start >= em->start &&
2716 start < extent_map_end(em)) {
2717 atomic_inc(&em->refs);
2718 return em;
2719 }
2720
2721 free_extent_map(em);
2722 *em_cached = NULL;
2723 }
2724
2725 em = get_extent(inode, page, pg_offset, start, len, 0);
2726 if (em_cached && !IS_ERR_OR_NULL(em)) {
2727 BUG_ON(*em_cached);
2728 atomic_inc(&em->refs);
2729 *em_cached = em;
2730 }
2731 return em;
2732}
d1310b2e
CM
2733/*
2734 * basic readpage implementation. Locked extent state structs are inserted
2735 * into the tree that are removed when the IO is done (by the end_io
2736 * handlers)
79787eaa 2737 * XXX JDM: This needs looking at to ensure proper page locking
d1310b2e 2738 */
9974090b
MX
2739static int __do_readpage(struct extent_io_tree *tree,
2740 struct page *page,
2741 get_extent_t *get_extent,
125bac01 2742 struct extent_map **em_cached,
9974090b
MX
2743 struct bio **bio, int mirror_num,
2744 unsigned long *bio_flags, int rw)
d1310b2e
CM
2745{
2746 struct inode *inode = page->mapping->host;
4eee4fa4 2747 u64 start = page_offset(page);
d1310b2e
CM
2748 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2749 u64 end;
2750 u64 cur = start;
2751 u64 extent_offset;
2752 u64 last_byte = i_size_read(inode);
2753 u64 block_start;
2754 u64 cur_end;
2755 sector_t sector;
2756 struct extent_map *em;
2757 struct block_device *bdev;
2758 int ret;
2759 int nr = 0;
4b384318 2760 int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
306e16ce 2761 size_t pg_offset = 0;
d1310b2e 2762 size_t iosize;
c8b97818 2763 size_t disk_io_size;
d1310b2e 2764 size_t blocksize = inode->i_sb->s_blocksize;
4b384318 2765 unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
d1310b2e
CM
2766
2767 set_page_extent_mapped(page);
2768
9974090b 2769 end = page_end;
90a887c9
DM
2770 if (!PageUptodate(page)) {
2771 if (cleancache_get_page(page) == 0) {
2772 BUG_ON(blocksize != PAGE_SIZE);
9974090b 2773 unlock_extent(tree, start, end);
90a887c9
DM
2774 goto out;
2775 }
2776 }
2777
c8b97818
CM
2778 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2779 char *userpage;
2780 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2781
2782 if (zero_offset) {
2783 iosize = PAGE_CACHE_SIZE - zero_offset;
7ac687d9 2784 userpage = kmap_atomic(page);
c8b97818
CM
2785 memset(userpage + zero_offset, 0, iosize);
2786 flush_dcache_page(page);
7ac687d9 2787 kunmap_atomic(userpage);
c8b97818
CM
2788 }
2789 }
d1310b2e 2790 while (cur <= end) {
c8f2f24b
JB
2791 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2792
d1310b2e
CM
2793 if (cur >= last_byte) {
2794 char *userpage;
507903b8
AJ
2795 struct extent_state *cached = NULL;
2796
306e16ce 2797 iosize = PAGE_CACHE_SIZE - pg_offset;
7ac687d9 2798 userpage = kmap_atomic(page);
306e16ce 2799 memset(userpage + pg_offset, 0, iosize);
d1310b2e 2800 flush_dcache_page(page);
7ac687d9 2801 kunmap_atomic(userpage);
d1310b2e 2802 set_extent_uptodate(tree, cur, cur + iosize - 1,
507903b8 2803 &cached, GFP_NOFS);
4b384318
MF
2804 if (!parent_locked)
2805 unlock_extent_cached(tree, cur,
2806 cur + iosize - 1,
2807 &cached, GFP_NOFS);
d1310b2e
CM
2808 break;
2809 }
125bac01
MX
2810 em = __get_extent_map(inode, page, pg_offset, cur,
2811 end - cur + 1, get_extent, em_cached);
c704005d 2812 if (IS_ERR_OR_NULL(em)) {
d1310b2e 2813 SetPageError(page);
4b384318
MF
2814 if (!parent_locked)
2815 unlock_extent(tree, cur, end);
d1310b2e
CM
2816 break;
2817 }
d1310b2e
CM
2818 extent_offset = cur - em->start;
2819 BUG_ON(extent_map_end(em) <= cur);
2820 BUG_ON(end < cur);
2821
261507a0 2822 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4b384318 2823 this_bio_flag |= EXTENT_BIO_COMPRESSED;
261507a0
LZ
2824 extent_set_compress_type(&this_bio_flag,
2825 em->compress_type);
2826 }
c8b97818 2827
d1310b2e
CM
2828 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2829 cur_end = min(extent_map_end(em) - 1, end);
fda2832f 2830 iosize = ALIGN(iosize, blocksize);
c8b97818
CM
2831 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2832 disk_io_size = em->block_len;
2833 sector = em->block_start >> 9;
2834 } else {
2835 sector = (em->block_start + extent_offset) >> 9;
2836 disk_io_size = iosize;
2837 }
d1310b2e
CM
2838 bdev = em->bdev;
2839 block_start = em->block_start;
d899e052
YZ
2840 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2841 block_start = EXTENT_MAP_HOLE;
d1310b2e
CM
2842 free_extent_map(em);
2843 em = NULL;
2844
2845 /* we've found a hole, just zero and go on */
2846 if (block_start == EXTENT_MAP_HOLE) {
2847 char *userpage;
507903b8
AJ
2848 struct extent_state *cached = NULL;
2849
7ac687d9 2850 userpage = kmap_atomic(page);
306e16ce 2851 memset(userpage + pg_offset, 0, iosize);
d1310b2e 2852 flush_dcache_page(page);
7ac687d9 2853 kunmap_atomic(userpage);
d1310b2e
CM
2854
2855 set_extent_uptodate(tree, cur, cur + iosize - 1,
507903b8
AJ
2856 &cached, GFP_NOFS);
2857 unlock_extent_cached(tree, cur, cur + iosize - 1,
2858 &cached, GFP_NOFS);
d1310b2e 2859 cur = cur + iosize;
306e16ce 2860 pg_offset += iosize;
d1310b2e
CM
2861 continue;
2862 }
2863 /* the get_extent function already copied into the page */
9655d298
CM
2864 if (test_range_bit(tree, cur, cur_end,
2865 EXTENT_UPTODATE, 1, NULL)) {
a1b32a59 2866 check_page_uptodate(tree, page);
4b384318
MF
2867 if (!parent_locked)
2868 unlock_extent(tree, cur, cur + iosize - 1);
d1310b2e 2869 cur = cur + iosize;
306e16ce 2870 pg_offset += iosize;
d1310b2e
CM
2871 continue;
2872 }
70dec807
CM
2873 /* we have an inline extent but it didn't get marked up
2874 * to date. Error out
2875 */
2876 if (block_start == EXTENT_MAP_INLINE) {
2877 SetPageError(page);
4b384318
MF
2878 if (!parent_locked)
2879 unlock_extent(tree, cur, cur + iosize - 1);
70dec807 2880 cur = cur + iosize;
306e16ce 2881 pg_offset += iosize;
70dec807
CM
2882 continue;
2883 }
d1310b2e 2884
c8f2f24b 2885 pnr -= page->index;
d4c7ca86 2886 ret = submit_extent_page(rw, tree, page,
306e16ce 2887 sector, disk_io_size, pg_offset,
89642229 2888 bdev, bio, pnr,
c8b97818
CM
2889 end_bio_extent_readpage, mirror_num,
2890 *bio_flags,
2891 this_bio_flag);
c8f2f24b
JB
2892 if (!ret) {
2893 nr++;
2894 *bio_flags = this_bio_flag;
2895 } else {
d1310b2e 2896 SetPageError(page);
4b384318
MF
2897 if (!parent_locked)
2898 unlock_extent(tree, cur, cur + iosize - 1);
edd33c99 2899 }
d1310b2e 2900 cur = cur + iosize;
306e16ce 2901 pg_offset += iosize;
d1310b2e 2902 }
90a887c9 2903out:
d1310b2e
CM
2904 if (!nr) {
2905 if (!PageError(page))
2906 SetPageUptodate(page);
2907 unlock_page(page);
2908 }
2909 return 0;
2910}
2911
9974090b
MX
2912static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
2913 struct page *pages[], int nr_pages,
2914 u64 start, u64 end,
2915 get_extent_t *get_extent,
125bac01 2916 struct extent_map **em_cached,
9974090b
MX
2917 struct bio **bio, int mirror_num,
2918 unsigned long *bio_flags, int rw)
2919{
2920 struct inode *inode;
2921 struct btrfs_ordered_extent *ordered;
2922 int index;
2923
2924 inode = pages[0]->mapping->host;
2925 while (1) {
2926 lock_extent(tree, start, end);
2927 ordered = btrfs_lookup_ordered_range(inode, start,
2928 end - start + 1);
2929 if (!ordered)
2930 break;
2931 unlock_extent(tree, start, end);
2932 btrfs_start_ordered_extent(inode, ordered, 1);
2933 btrfs_put_ordered_extent(ordered);
2934 }
2935
2936 for (index = 0; index < nr_pages; index++) {
125bac01
MX
2937 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
2938 mirror_num, bio_flags, rw);
9974090b
MX
2939 page_cache_release(pages[index]);
2940 }
2941}
2942
2943static void __extent_readpages(struct extent_io_tree *tree,
2944 struct page *pages[],
2945 int nr_pages, get_extent_t *get_extent,
125bac01 2946 struct extent_map **em_cached,
9974090b
MX
2947 struct bio **bio, int mirror_num,
2948 unsigned long *bio_flags, int rw)
2949{
35a3621b 2950 u64 start = 0;
9974090b
MX
2951 u64 end = 0;
2952 u64 page_start;
2953 int index;
35a3621b 2954 int first_index = 0;
9974090b
MX
2955
2956 for (index = 0; index < nr_pages; index++) {
2957 page_start = page_offset(pages[index]);
2958 if (!end) {
2959 start = page_start;
2960 end = start + PAGE_CACHE_SIZE - 1;
2961 first_index = index;
2962 } else if (end + 1 == page_start) {
2963 end += PAGE_CACHE_SIZE;
2964 } else {
2965 __do_contiguous_readpages(tree, &pages[first_index],
2966 index - first_index, start,
125bac01
MX
2967 end, get_extent, em_cached,
2968 bio, mirror_num, bio_flags,
2969 rw);
9974090b
MX
2970 start = page_start;
2971 end = start + PAGE_CACHE_SIZE - 1;
2972 first_index = index;
2973 }
2974 }
2975
2976 if (end)
2977 __do_contiguous_readpages(tree, &pages[first_index],
2978 index - first_index, start,
125bac01 2979 end, get_extent, em_cached, bio,
9974090b
MX
2980 mirror_num, bio_flags, rw);
2981}
2982
2983static int __extent_read_full_page(struct extent_io_tree *tree,
2984 struct page *page,
2985 get_extent_t *get_extent,
2986 struct bio **bio, int mirror_num,
2987 unsigned long *bio_flags, int rw)
2988{
2989 struct inode *inode = page->mapping->host;
2990 struct btrfs_ordered_extent *ordered;
2991 u64 start = page_offset(page);
2992 u64 end = start + PAGE_CACHE_SIZE - 1;
2993 int ret;
2994
2995 while (1) {
2996 lock_extent(tree, start, end);
2997 ordered = btrfs_lookup_ordered_extent(inode, start);
2998 if (!ordered)
2999 break;
3000 unlock_extent(tree, start, end);
3001 btrfs_start_ordered_extent(inode, ordered, 1);
3002 btrfs_put_ordered_extent(ordered);
3003 }
3004
125bac01
MX
3005 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3006 bio_flags, rw);
9974090b
MX
3007 return ret;
3008}
3009
d1310b2e 3010int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
8ddc7d9c 3011 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
3012{
3013 struct bio *bio = NULL;
c8b97818 3014 unsigned long bio_flags = 0;
d1310b2e
CM
3015 int ret;
3016
8ddc7d9c 3017 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
d4c7ca86 3018 &bio_flags, READ);
d1310b2e 3019 if (bio)
8ddc7d9c 3020 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
d1310b2e
CM
3021 return ret;
3022}
d1310b2e 3023
4b384318
MF
3024int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3025 get_extent_t *get_extent, int mirror_num)
3026{
3027 struct bio *bio = NULL;
3028 unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
3029 int ret;
3030
3031 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3032 &bio_flags, READ);
3033 if (bio)
3034 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3035 return ret;
3036}
3037
11c8349b
CM
3038static noinline void update_nr_written(struct page *page,
3039 struct writeback_control *wbc,
3040 unsigned long nr_written)
3041{
3042 wbc->nr_to_write -= nr_written;
3043 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3044 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3045 page->mapping->writeback_index = page->index + nr_written;
3046}
3047
d1310b2e
CM
3048/*
3049 * the writepage semantics are similar to regular writepage. extent
3050 * records are inserted to lock ranges in the tree, and as dirty areas
3051 * are found, they are marked writeback. Then the lock bits are removed
3052 * and the end_io handler clears the writeback ranges
3053 */
3054static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3055 void *data)
3056{
3057 struct inode *inode = page->mapping->host;
3058 struct extent_page_data *epd = data;
3059 struct extent_io_tree *tree = epd->tree;
4eee4fa4 3060 u64 start = page_offset(page);
d1310b2e
CM
3061 u64 delalloc_start;
3062 u64 page_end = start + PAGE_CACHE_SIZE - 1;
3063 u64 end;
3064 u64 cur = start;
3065 u64 extent_offset;
3066 u64 last_byte = i_size_read(inode);
3067 u64 block_start;
3068 u64 iosize;
3069 sector_t sector;
2c64c53d 3070 struct extent_state *cached_state = NULL;
d1310b2e
CM
3071 struct extent_map *em;
3072 struct block_device *bdev;
3073 int ret;
3074 int nr = 0;
7f3c74fb 3075 size_t pg_offset = 0;
d1310b2e
CM
3076 size_t blocksize;
3077 loff_t i_size = i_size_read(inode);
3078 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
3079 u64 nr_delalloc;
3080 u64 delalloc_end;
c8b97818
CM
3081 int page_started;
3082 int compressed;
ffbd517d 3083 int write_flags;
771ed689 3084 unsigned long nr_written = 0;
9e487107 3085 bool fill_delalloc = true;
d1310b2e 3086
ffbd517d 3087 if (wbc->sync_mode == WB_SYNC_ALL)
721a9602 3088 write_flags = WRITE_SYNC;
ffbd517d
CM
3089 else
3090 write_flags = WRITE;
3091
1abe9b8a 3092 trace___extent_writepage(page, inode, wbc);
3093
d1310b2e 3094 WARN_ON(!PageLocked(page));
bf0da8c1
CM
3095
3096 ClearPageError(page);
3097
7f3c74fb 3098 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
211c17f5 3099 if (page->index > end_index ||
7f3c74fb 3100 (page->index == end_index && !pg_offset)) {
d47992f8 3101 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
d1310b2e
CM
3102 unlock_page(page);
3103 return 0;
3104 }
3105
3106 if (page->index == end_index) {
3107 char *userpage;
3108
7ac687d9 3109 userpage = kmap_atomic(page);
7f3c74fb
CM
3110 memset(userpage + pg_offset, 0,
3111 PAGE_CACHE_SIZE - pg_offset);
7ac687d9 3112 kunmap_atomic(userpage);
211c17f5 3113 flush_dcache_page(page);
d1310b2e 3114 }
7f3c74fb 3115 pg_offset = 0;
d1310b2e
CM
3116
3117 set_page_extent_mapped(page);
3118
9e487107
JB
3119 if (!tree->ops || !tree->ops->fill_delalloc)
3120 fill_delalloc = false;
3121
d1310b2e
CM
3122 delalloc_start = start;
3123 delalloc_end = 0;
c8b97818 3124 page_started = 0;
9e487107 3125 if (!epd->extent_locked && fill_delalloc) {
f85d7d6c 3126 u64 delalloc_to_write = 0;
11c8349b
CM
3127 /*
3128 * make sure the wbc mapping index is at least updated
3129 * to this page.
3130 */
3131 update_nr_written(page, wbc, 0);
3132
d397712b 3133 while (delalloc_end < page_end) {
771ed689 3134 nr_delalloc = find_lock_delalloc_range(inode, tree,
c8b97818
CM
3135 page,
3136 &delalloc_start,
d1310b2e
CM
3137 &delalloc_end,
3138 128 * 1024 * 1024);
771ed689
CM
3139 if (nr_delalloc == 0) {
3140 delalloc_start = delalloc_end + 1;
3141 continue;
3142 }
013bd4c3
TI
3143 ret = tree->ops->fill_delalloc(inode, page,
3144 delalloc_start,
3145 delalloc_end,
3146 &page_started,
3147 &nr_written);
79787eaa
JM
3148 /* File system has been set read-only */
3149 if (ret) {
3150 SetPageError(page);
3151 goto done;
3152 }
f85d7d6c
CM
3153 /*
3154 * delalloc_end is already one less than the total
3155 * length, so we don't subtract one from
3156 * PAGE_CACHE_SIZE
3157 */
3158 delalloc_to_write += (delalloc_end - delalloc_start +
3159 PAGE_CACHE_SIZE) >>
3160 PAGE_CACHE_SHIFT;
d1310b2e 3161 delalloc_start = delalloc_end + 1;
d1310b2e 3162 }
f85d7d6c
CM
3163 if (wbc->nr_to_write < delalloc_to_write) {
3164 int thresh = 8192;
3165
3166 if (delalloc_to_write < thresh * 2)
3167 thresh = delalloc_to_write;
3168 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3169 thresh);
3170 }
c8b97818 3171
771ed689
CM
3172 /* did the fill delalloc function already unlock and start
3173 * the IO?
3174 */
3175 if (page_started) {
3176 ret = 0;
11c8349b
CM
3177 /*
3178 * we've unlocked the page, so we can't update
3179 * the mapping's writeback index, just update
3180 * nr_to_write.
3181 */
3182 wbc->nr_to_write -= nr_written;
3183 goto done_unlocked;
771ed689 3184 }
c8b97818 3185 }
247e743c 3186 if (tree->ops && tree->ops->writepage_start_hook) {
c8b97818
CM
3187 ret = tree->ops->writepage_start_hook(page, start,
3188 page_end);
87826df0
JM
3189 if (ret) {
3190 /* Fixup worker will requeue */
3191 if (ret == -EBUSY)
3192 wbc->pages_skipped++;
3193 else
3194 redirty_page_for_writepage(wbc, page);
11c8349b 3195 update_nr_written(page, wbc, nr_written);
247e743c 3196 unlock_page(page);
771ed689 3197 ret = 0;
11c8349b 3198 goto done_unlocked;
247e743c
CM
3199 }
3200 }
3201
11c8349b
CM
3202 /*
3203 * we don't want to touch the inode after unlocking the page,
3204 * so we update the mapping writeback index now
3205 */
3206 update_nr_written(page, wbc, nr_written + 1);
771ed689 3207
d1310b2e 3208 end = page_end;
d1310b2e 3209 if (last_byte <= start) {
e6dcd2dc
CM
3210 if (tree->ops && tree->ops->writepage_end_io_hook)
3211 tree->ops->writepage_end_io_hook(page, start,
3212 page_end, NULL, 1);
d1310b2e
CM
3213 goto done;
3214 }
3215
d1310b2e
CM
3216 blocksize = inode->i_sb->s_blocksize;
3217
3218 while (cur <= end) {
3219 if (cur >= last_byte) {
e6dcd2dc
CM
3220 if (tree->ops && tree->ops->writepage_end_io_hook)
3221 tree->ops->writepage_end_io_hook(page, cur,
3222 page_end, NULL, 1);
d1310b2e
CM
3223 break;
3224 }
7f3c74fb 3225 em = epd->get_extent(inode, page, pg_offset, cur,
d1310b2e 3226 end - cur + 1, 1);
c704005d 3227 if (IS_ERR_OR_NULL(em)) {
d1310b2e
CM
3228 SetPageError(page);
3229 break;
3230 }
3231
3232 extent_offset = cur - em->start;
3233 BUG_ON(extent_map_end(em) <= cur);
3234 BUG_ON(end < cur);
3235 iosize = min(extent_map_end(em) - cur, end - cur + 1);
fda2832f 3236 iosize = ALIGN(iosize, blocksize);
d1310b2e
CM
3237 sector = (em->block_start + extent_offset) >> 9;
3238 bdev = em->bdev;
3239 block_start = em->block_start;
c8b97818 3240 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
d1310b2e
CM
3241 free_extent_map(em);
3242 em = NULL;
3243
c8b97818
CM
3244 /*
3245 * compressed and inline extents are written through other
3246 * paths in the FS
3247 */
3248 if (compressed || block_start == EXTENT_MAP_HOLE ||
d1310b2e 3249 block_start == EXTENT_MAP_INLINE) {
c8b97818
CM
3250 /*
3251 * end_io notification does not happen here for
3252 * compressed extents
3253 */
3254 if (!compressed && tree->ops &&
3255 tree->ops->writepage_end_io_hook)
e6dcd2dc
CM
3256 tree->ops->writepage_end_io_hook(page, cur,
3257 cur + iosize - 1,
3258 NULL, 1);
c8b97818
CM
3259 else if (compressed) {
3260 /* we don't want to end_page_writeback on
3261 * a compressed extent. this happens
3262 * elsewhere
3263 */
3264 nr++;
3265 }
3266
3267 cur += iosize;
7f3c74fb 3268 pg_offset += iosize;
d1310b2e
CM
3269 continue;
3270 }
d1310b2e
CM
3271 /* leave this out until we have a page_mkwrite call */
3272 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
9655d298 3273 EXTENT_DIRTY, 0, NULL)) {
d1310b2e 3274 cur = cur + iosize;
7f3c74fb 3275 pg_offset += iosize;
d1310b2e
CM
3276 continue;
3277 }
c8b97818 3278
d1310b2e
CM
3279 if (tree->ops && tree->ops->writepage_io_hook) {
3280 ret = tree->ops->writepage_io_hook(page, cur,
3281 cur + iosize - 1);
3282 } else {
3283 ret = 0;
3284 }
1259ab75 3285 if (ret) {
d1310b2e 3286 SetPageError(page);
1259ab75 3287 } else {
d1310b2e 3288 unsigned long max_nr = end_index + 1;
7f3c74fb 3289
d1310b2e
CM
3290 set_range_writeback(tree, cur, cur + iosize - 1);
3291 if (!PageWriteback(page)) {
d397712b
CM
3292 printk(KERN_ERR "btrfs warning page %lu not "
3293 "writeback, cur %llu end %llu\n",
c1c9ff7c 3294 page->index, cur, end);
d1310b2e
CM
3295 }
3296
ffbd517d
CM
3297 ret = submit_extent_page(write_flags, tree, page,
3298 sector, iosize, pg_offset,
3299 bdev, &epd->bio, max_nr,
c8b97818
CM
3300 end_bio_extent_writepage,
3301 0, 0, 0);
d1310b2e
CM
3302 if (ret)
3303 SetPageError(page);
3304 }
3305 cur = cur + iosize;
7f3c74fb 3306 pg_offset += iosize;
d1310b2e
CM
3307 nr++;
3308 }
3309done:
3310 if (nr == 0) {
3311 /* make sure the mapping tag for page dirty gets cleared */
3312 set_page_writeback(page);
3313 end_page_writeback(page);
3314 }
d1310b2e 3315 unlock_page(page);
771ed689 3316
11c8349b
CM
3317done_unlocked:
3318
2c64c53d
CM
3319 /* drop our reference on any cached states */
3320 free_extent_state(cached_state);
d1310b2e
CM
3321 return 0;
3322}
3323
0b32f4bb
JB
3324static int eb_wait(void *word)
3325{
3326 io_schedule();
3327 return 0;
3328}
3329
fd8b2b61 3330void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
0b32f4bb
JB
3331{
3332 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3333 TASK_UNINTERRUPTIBLE);
3334}
3335
3336static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3337 struct btrfs_fs_info *fs_info,
3338 struct extent_page_data *epd)
3339{
3340 unsigned long i, num_pages;
3341 int flush = 0;
3342 int ret = 0;
3343
3344 if (!btrfs_try_tree_write_lock(eb)) {
3345 flush = 1;
3346 flush_write_bio(epd);
3347 btrfs_tree_lock(eb);
3348 }
3349
3350 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3351 btrfs_tree_unlock(eb);
3352 if (!epd->sync_io)
3353 return 0;
3354 if (!flush) {
3355 flush_write_bio(epd);
3356 flush = 1;
3357 }
a098d8e8
CM
3358 while (1) {
3359 wait_on_extent_buffer_writeback(eb);
3360 btrfs_tree_lock(eb);
3361 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3362 break;
0b32f4bb 3363 btrfs_tree_unlock(eb);
0b32f4bb
JB
3364 }
3365 }
3366
51561ffe
JB
3367 /*
3368 * We need to do this to prevent races in people who check if the eb is
3369 * under IO since we can end up having no IO bits set for a short period
3370 * of time.
3371 */
3372 spin_lock(&eb->refs_lock);
0b32f4bb
JB
3373 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3374 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
51561ffe 3375 spin_unlock(&eb->refs_lock);
0b32f4bb 3376 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
e2d84521
MX
3377 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3378 -eb->len,
3379 fs_info->dirty_metadata_batch);
0b32f4bb 3380 ret = 1;
51561ffe
JB
3381 } else {
3382 spin_unlock(&eb->refs_lock);
0b32f4bb
JB
3383 }
3384
3385 btrfs_tree_unlock(eb);
3386
3387 if (!ret)
3388 return ret;
3389
3390 num_pages = num_extent_pages(eb->start, eb->len);
3391 for (i = 0; i < num_pages; i++) {
3392 struct page *p = extent_buffer_page(eb, i);
3393
3394 if (!trylock_page(p)) {
3395 if (!flush) {
3396 flush_write_bio(epd);
3397 flush = 1;
3398 }
3399 lock_page(p);
3400 }
3401 }
3402
3403 return ret;
3404}
3405
3406static void end_extent_buffer_writeback(struct extent_buffer *eb)
3407{
3408 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3409 smp_mb__after_clear_bit();
3410 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3411}
3412
3413static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3414{
3415 int uptodate = err == 0;
3416 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3417 struct extent_buffer *eb;
3418 int done;
3419
3420 do {
3421 struct page *page = bvec->bv_page;
3422
3423 bvec--;
3424 eb = (struct extent_buffer *)page->private;
3425 BUG_ON(!eb);
3426 done = atomic_dec_and_test(&eb->io_pages);
3427
3428 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3429 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3430 ClearPageUptodate(page);
3431 SetPageError(page);
3432 }
3433
3434 end_page_writeback(page);
3435
3436 if (!done)
3437 continue;
3438
3439 end_extent_buffer_writeback(eb);
3440 } while (bvec >= bio->bi_io_vec);
3441
3442 bio_put(bio);
3443
3444}
3445
3446static int write_one_eb(struct extent_buffer *eb,
3447 struct btrfs_fs_info *fs_info,
3448 struct writeback_control *wbc,
3449 struct extent_page_data *epd)
3450{
3451 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3452 u64 offset = eb->start;
3453 unsigned long i, num_pages;
de0022b9 3454 unsigned long bio_flags = 0;
d4c7ca86 3455 int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
d7dbe9e7 3456 int ret = 0;
0b32f4bb
JB
3457
3458 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3459 num_pages = num_extent_pages(eb->start, eb->len);
3460 atomic_set(&eb->io_pages, num_pages);
de0022b9
JB
3461 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3462 bio_flags = EXTENT_BIO_TREE_LOG;
3463
0b32f4bb
JB
3464 for (i = 0; i < num_pages; i++) {
3465 struct page *p = extent_buffer_page(eb, i);
3466
3467 clear_page_dirty_for_io(p);
3468 set_page_writeback(p);
3469 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3470 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3471 -1, end_bio_extent_buffer_writepage,
de0022b9
JB
3472 0, epd->bio_flags, bio_flags);
3473 epd->bio_flags = bio_flags;
0b32f4bb
JB
3474 if (ret) {
3475 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3476 SetPageError(p);
3477 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3478 end_extent_buffer_writeback(eb);
3479 ret = -EIO;
3480 break;
3481 }
3482 offset += PAGE_CACHE_SIZE;
3483 update_nr_written(p, wbc, 1);
3484 unlock_page(p);
3485 }
3486
3487 if (unlikely(ret)) {
3488 for (; i < num_pages; i++) {
3489 struct page *p = extent_buffer_page(eb, i);
3490 unlock_page(p);
3491 }
3492 }
3493
3494 return ret;
3495}
3496
3497int btree_write_cache_pages(struct address_space *mapping,
3498 struct writeback_control *wbc)
3499{
3500 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3501 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3502 struct extent_buffer *eb, *prev_eb = NULL;
3503 struct extent_page_data epd = {
3504 .bio = NULL,
3505 .tree = tree,
3506 .extent_locked = 0,
3507 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
de0022b9 3508 .bio_flags = 0,
0b32f4bb
JB
3509 };
3510 int ret = 0;
3511 int done = 0;
3512 int nr_to_write_done = 0;
3513 struct pagevec pvec;
3514 int nr_pages;
3515 pgoff_t index;
3516 pgoff_t end; /* Inclusive */
3517 int scanned = 0;
3518 int tag;
3519
3520 pagevec_init(&pvec, 0);
3521 if (wbc->range_cyclic) {
3522 index = mapping->writeback_index; /* Start from prev offset */
3523 end = -1;
3524 } else {
3525 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3526 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3527 scanned = 1;
3528 }
3529 if (wbc->sync_mode == WB_SYNC_ALL)
3530 tag = PAGECACHE_TAG_TOWRITE;
3531 else
3532 tag = PAGECACHE_TAG_DIRTY;
3533retry:
3534 if (wbc->sync_mode == WB_SYNC_ALL)
3535 tag_pages_for_writeback(mapping, index, end);
3536 while (!done && !nr_to_write_done && (index <= end) &&
3537 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3538 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3539 unsigned i;
3540
3541 scanned = 1;
3542 for (i = 0; i < nr_pages; i++) {
3543 struct page *page = pvec.pages[i];
3544
3545 if (!PagePrivate(page))
3546 continue;
3547
3548 if (!wbc->range_cyclic && page->index > end) {
3549 done = 1;
3550 break;
3551 }
3552
b5bae261
JB
3553 spin_lock(&mapping->private_lock);
3554 if (!PagePrivate(page)) {
3555 spin_unlock(&mapping->private_lock);
3556 continue;
3557 }
3558
0b32f4bb 3559 eb = (struct extent_buffer *)page->private;
b5bae261
JB
3560
3561 /*
3562 * Shouldn't happen and normally this would be a BUG_ON
3563 * but no sense in crashing the users box for something
3564 * we can survive anyway.
3565 */
0b32f4bb 3566 if (!eb) {
b5bae261 3567 spin_unlock(&mapping->private_lock);
0b32f4bb
JB
3568 WARN_ON(1);
3569 continue;
3570 }
3571
b5bae261
JB
3572 if (eb == prev_eb) {
3573 spin_unlock(&mapping->private_lock);
0b32f4bb 3574 continue;
b5bae261 3575 }
0b32f4bb 3576
b5bae261
JB
3577 ret = atomic_inc_not_zero(&eb->refs);
3578 spin_unlock(&mapping->private_lock);
3579 if (!ret)
0b32f4bb 3580 continue;
0b32f4bb
JB
3581
3582 prev_eb = eb;
3583 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3584 if (!ret) {
3585 free_extent_buffer(eb);
3586 continue;
3587 }
3588
3589 ret = write_one_eb(eb, fs_info, wbc, &epd);
3590 if (ret) {
3591 done = 1;
3592 free_extent_buffer(eb);
3593 break;
3594 }
3595 free_extent_buffer(eb);
3596
3597 /*
3598 * the filesystem may choose to bump up nr_to_write.
3599 * We have to make sure to honor the new nr_to_write
3600 * at any time
3601 */
3602 nr_to_write_done = wbc->nr_to_write <= 0;
3603 }
3604 pagevec_release(&pvec);
3605 cond_resched();
3606 }
3607 if (!scanned && !done) {
3608 /*
3609 * We hit the last page and there is more work to be done: wrap
3610 * back to the start of the file
3611 */
3612 scanned = 1;
3613 index = 0;
3614 goto retry;
3615 }
3616 flush_write_bio(&epd);
3617 return ret;
3618}
3619
d1310b2e 3620/**
4bef0848 3621 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
d1310b2e
CM
3622 * @mapping: address space structure to write
3623 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3624 * @writepage: function called for each page
3625 * @data: data passed to writepage function
3626 *
3627 * If a page is already under I/O, write_cache_pages() skips it, even
3628 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3629 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3630 * and msync() need to guarantee that all the data which was dirty at the time
3631 * the call was made get new I/O started against them. If wbc->sync_mode is
3632 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3633 * existing IO to complete.
3634 */
b2950863 3635static int extent_write_cache_pages(struct extent_io_tree *tree,
4bef0848
CM
3636 struct address_space *mapping,
3637 struct writeback_control *wbc,
d2c3f4f6
CM
3638 writepage_t writepage, void *data,
3639 void (*flush_fn)(void *))
d1310b2e 3640{
7fd1a3f7 3641 struct inode *inode = mapping->host;
d1310b2e
CM
3642 int ret = 0;
3643 int done = 0;
f85d7d6c 3644 int nr_to_write_done = 0;
d1310b2e
CM
3645 struct pagevec pvec;
3646 int nr_pages;
3647 pgoff_t index;
3648 pgoff_t end; /* Inclusive */
3649 int scanned = 0;
f7aaa06b 3650 int tag;
d1310b2e 3651
7fd1a3f7
JB
3652 /*
3653 * We have to hold onto the inode so that ordered extents can do their
3654 * work when the IO finishes. The alternative to this is failing to add
3655 * an ordered extent if the igrab() fails there and that is a huge pain
3656 * to deal with, so instead just hold onto the inode throughout the
3657 * writepages operation. If it fails here we are freeing up the inode
3658 * anyway and we'd rather not waste our time writing out stuff that is
3659 * going to be truncated anyway.
3660 */
3661 if (!igrab(inode))
3662 return 0;
3663
d1310b2e
CM
3664 pagevec_init(&pvec, 0);
3665 if (wbc->range_cyclic) {
3666 index = mapping->writeback_index; /* Start from prev offset */
3667 end = -1;
3668 } else {
3669 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3670 end = wbc->range_end >> PAGE_CACHE_SHIFT;
d1310b2e
CM
3671 scanned = 1;
3672 }
f7aaa06b
JB
3673 if (wbc->sync_mode == WB_SYNC_ALL)
3674 tag = PAGECACHE_TAG_TOWRITE;
3675 else
3676 tag = PAGECACHE_TAG_DIRTY;
d1310b2e 3677retry:
f7aaa06b
JB
3678 if (wbc->sync_mode == WB_SYNC_ALL)
3679 tag_pages_for_writeback(mapping, index, end);
f85d7d6c 3680 while (!done && !nr_to_write_done && (index <= end) &&
f7aaa06b
JB
3681 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3682 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
d1310b2e
CM
3683 unsigned i;
3684
3685 scanned = 1;
3686 for (i = 0; i < nr_pages; i++) {
3687 struct page *page = pvec.pages[i];
3688
3689 /*
3690 * At this point we hold neither mapping->tree_lock nor
3691 * lock on the page itself: the page may be truncated or
3692 * invalidated (changing page->mapping to NULL), or even
3693 * swizzled back from swapper_space to tmpfs file
3694 * mapping
3695 */
c8f2f24b
JB
3696 if (!trylock_page(page)) {
3697 flush_fn(data);
3698 lock_page(page);
01d658f2 3699 }
d1310b2e
CM
3700
3701 if (unlikely(page->mapping != mapping)) {
3702 unlock_page(page);
3703 continue;
3704 }
3705
3706 if (!wbc->range_cyclic && page->index > end) {
3707 done = 1;
3708 unlock_page(page);
3709 continue;
3710 }
3711
d2c3f4f6 3712 if (wbc->sync_mode != WB_SYNC_NONE) {
0e6bd956
CM
3713 if (PageWriteback(page))
3714 flush_fn(data);
d1310b2e 3715 wait_on_page_writeback(page);
d2c3f4f6 3716 }
d1310b2e
CM
3717
3718 if (PageWriteback(page) ||
3719 !clear_page_dirty_for_io(page)) {
3720 unlock_page(page);
3721 continue;
3722 }
3723
3724 ret = (*writepage)(page, wbc, data);
3725
3726 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3727 unlock_page(page);
3728 ret = 0;
3729 }
f85d7d6c 3730 if (ret)
d1310b2e 3731 done = 1;
f85d7d6c
CM
3732
3733 /*
3734 * the filesystem may choose to bump up nr_to_write.
3735 * We have to make sure to honor the new nr_to_write
3736 * at any time
3737 */
3738 nr_to_write_done = wbc->nr_to_write <= 0;
d1310b2e
CM
3739 }
3740 pagevec_release(&pvec);
3741 cond_resched();
3742 }
3743 if (!scanned && !done) {
3744 /*
3745 * We hit the last page and there is more work to be done: wrap
3746 * back to the start of the file
3747 */
3748 scanned = 1;
3749 index = 0;
3750 goto retry;
3751 }
7fd1a3f7 3752 btrfs_add_delayed_iput(inode);
d1310b2e
CM
3753 return ret;
3754}
d1310b2e 3755
ffbd517d 3756static void flush_epd_write_bio(struct extent_page_data *epd)
d2c3f4f6 3757{
d2c3f4f6 3758 if (epd->bio) {
355808c2
JM
3759 int rw = WRITE;
3760 int ret;
3761
ffbd517d 3762 if (epd->sync_io)
355808c2
JM
3763 rw = WRITE_SYNC;
3764
de0022b9 3765 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
79787eaa 3766 BUG_ON(ret < 0); /* -ENOMEM */
d2c3f4f6
CM
3767 epd->bio = NULL;
3768 }
3769}
3770
ffbd517d
CM
3771static noinline void flush_write_bio(void *data)
3772{
3773 struct extent_page_data *epd = data;
3774 flush_epd_write_bio(epd);
3775}
3776
d1310b2e
CM
3777int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3778 get_extent_t *get_extent,
3779 struct writeback_control *wbc)
3780{
3781 int ret;
d1310b2e
CM
3782 struct extent_page_data epd = {
3783 .bio = NULL,
3784 .tree = tree,
3785 .get_extent = get_extent,
771ed689 3786 .extent_locked = 0,
ffbd517d 3787 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
de0022b9 3788 .bio_flags = 0,
d1310b2e 3789 };
d1310b2e 3790
d1310b2e
CM
3791 ret = __extent_writepage(page, wbc, &epd);
3792
ffbd517d 3793 flush_epd_write_bio(&epd);
d1310b2e
CM
3794 return ret;
3795}
d1310b2e 3796
771ed689
CM
3797int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3798 u64 start, u64 end, get_extent_t *get_extent,
3799 int mode)
3800{
3801 int ret = 0;
3802 struct address_space *mapping = inode->i_mapping;
3803 struct page *page;
3804 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3805 PAGE_CACHE_SHIFT;
3806
3807 struct extent_page_data epd = {
3808 .bio = NULL,
3809 .tree = tree,
3810 .get_extent = get_extent,
3811 .extent_locked = 1,
ffbd517d 3812 .sync_io = mode == WB_SYNC_ALL,
de0022b9 3813 .bio_flags = 0,
771ed689
CM
3814 };
3815 struct writeback_control wbc_writepages = {
771ed689 3816 .sync_mode = mode,
771ed689
CM
3817 .nr_to_write = nr_pages * 2,
3818 .range_start = start,
3819 .range_end = end + 1,
3820 };
3821
d397712b 3822 while (start <= end) {
771ed689
CM
3823 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3824 if (clear_page_dirty_for_io(page))
3825 ret = __extent_writepage(page, &wbc_writepages, &epd);
3826 else {
3827 if (tree->ops && tree->ops->writepage_end_io_hook)
3828 tree->ops->writepage_end_io_hook(page, start,
3829 start + PAGE_CACHE_SIZE - 1,
3830 NULL, 1);
3831 unlock_page(page);
3832 }
3833 page_cache_release(page);
3834 start += PAGE_CACHE_SIZE;
3835 }
3836
ffbd517d 3837 flush_epd_write_bio(&epd);
771ed689
CM
3838 return ret;
3839}
d1310b2e
CM
3840
3841int extent_writepages(struct extent_io_tree *tree,
3842 struct address_space *mapping,
3843 get_extent_t *get_extent,
3844 struct writeback_control *wbc)
3845{
3846 int ret = 0;
3847 struct extent_page_data epd = {
3848 .bio = NULL,
3849 .tree = tree,
3850 .get_extent = get_extent,
771ed689 3851 .extent_locked = 0,
ffbd517d 3852 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
de0022b9 3853 .bio_flags = 0,
d1310b2e
CM
3854 };
3855
4bef0848 3856 ret = extent_write_cache_pages(tree, mapping, wbc,
d2c3f4f6
CM
3857 __extent_writepage, &epd,
3858 flush_write_bio);
ffbd517d 3859 flush_epd_write_bio(&epd);
d1310b2e
CM
3860 return ret;
3861}
d1310b2e
CM
3862
3863int extent_readpages(struct extent_io_tree *tree,
3864 struct address_space *mapping,
3865 struct list_head *pages, unsigned nr_pages,
3866 get_extent_t get_extent)
3867{
3868 struct bio *bio = NULL;
3869 unsigned page_idx;
c8b97818 3870 unsigned long bio_flags = 0;
67c9684f
LB
3871 struct page *pagepool[16];
3872 struct page *page;
125bac01 3873 struct extent_map *em_cached = NULL;
67c9684f 3874 int nr = 0;
d1310b2e 3875
d1310b2e 3876 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
67c9684f 3877 page = list_entry(pages->prev, struct page, lru);
d1310b2e
CM
3878
3879 prefetchw(&page->flags);
3880 list_del(&page->lru);
67c9684f 3881 if (add_to_page_cache_lru(page, mapping,
43e817a1 3882 page->index, GFP_NOFS)) {
67c9684f
LB
3883 page_cache_release(page);
3884 continue;
d1310b2e 3885 }
67c9684f
LB
3886
3887 pagepool[nr++] = page;
3888 if (nr < ARRAY_SIZE(pagepool))
3889 continue;
125bac01 3890 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
9974090b 3891 &bio, 0, &bio_flags, READ);
67c9684f 3892 nr = 0;
d1310b2e 3893 }
9974090b 3894 if (nr)
125bac01 3895 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
9974090b 3896 &bio, 0, &bio_flags, READ);
67c9684f 3897
125bac01
MX
3898 if (em_cached)
3899 free_extent_map(em_cached);
3900
d1310b2e
CM
3901 BUG_ON(!list_empty(pages));
3902 if (bio)
79787eaa 3903 return submit_one_bio(READ, bio, 0, bio_flags);
d1310b2e
CM
3904 return 0;
3905}
d1310b2e
CM
3906
3907/*
3908 * basic invalidatepage code, this waits on any locked or writeback
3909 * ranges corresponding to the page, and then deletes any extent state
3910 * records from the tree
3911 */
3912int extent_invalidatepage(struct extent_io_tree *tree,
3913 struct page *page, unsigned long offset)
3914{
2ac55d41 3915 struct extent_state *cached_state = NULL;
4eee4fa4 3916 u64 start = page_offset(page);
d1310b2e
CM
3917 u64 end = start + PAGE_CACHE_SIZE - 1;
3918 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3919
fda2832f 3920 start += ALIGN(offset, blocksize);
d1310b2e
CM
3921 if (start > end)
3922 return 0;
3923
d0082371 3924 lock_extent_bits(tree, start, end, 0, &cached_state);
1edbb734 3925 wait_on_page_writeback(page);
d1310b2e 3926 clear_extent_bit(tree, start, end,
32c00aff
JB
3927 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3928 EXTENT_DO_ACCOUNTING,
2ac55d41 3929 1, 1, &cached_state, GFP_NOFS);
d1310b2e
CM
3930 return 0;
3931}
d1310b2e 3932
7b13b7b1
CM
3933/*
3934 * a helper for releasepage, this tests for areas of the page that
3935 * are locked or under IO and drops the related state bits if it is safe
3936 * to drop the page.
3937 */
48a3b636
ES
3938static int try_release_extent_state(struct extent_map_tree *map,
3939 struct extent_io_tree *tree,
3940 struct page *page, gfp_t mask)
7b13b7b1 3941{
4eee4fa4 3942 u64 start = page_offset(page);
7b13b7b1
CM
3943 u64 end = start + PAGE_CACHE_SIZE - 1;
3944 int ret = 1;
3945
211f90e6 3946 if (test_range_bit(tree, start, end,
8b62b72b 3947 EXTENT_IOBITS, 0, NULL))
7b13b7b1
CM
3948 ret = 0;
3949 else {
3950 if ((mask & GFP_NOFS) == GFP_NOFS)
3951 mask = GFP_NOFS;
11ef160f
CM
3952 /*
3953 * at this point we can safely clear everything except the
3954 * locked bit and the nodatasum bit
3955 */
e3f24cc5 3956 ret = clear_extent_bit(tree, start, end,
11ef160f
CM
3957 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3958 0, 0, NULL, mask);
e3f24cc5
CM
3959
3960 /* if clear_extent_bit failed for enomem reasons,
3961 * we can't allow the release to continue.
3962 */
3963 if (ret < 0)
3964 ret = 0;
3965 else
3966 ret = 1;
7b13b7b1
CM
3967 }
3968 return ret;
3969}
7b13b7b1 3970
d1310b2e
CM
3971/*
3972 * a helper for releasepage. As long as there are no locked extents
3973 * in the range corresponding to the page, both state records and extent
3974 * map records are removed
3975 */
3976int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
3977 struct extent_io_tree *tree, struct page *page,
3978 gfp_t mask)
d1310b2e
CM
3979{
3980 struct extent_map *em;
4eee4fa4 3981 u64 start = page_offset(page);
d1310b2e 3982 u64 end = start + PAGE_CACHE_SIZE - 1;
7b13b7b1 3983
70dec807
CM
3984 if ((mask & __GFP_WAIT) &&
3985 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 3986 u64 len;
70dec807 3987 while (start <= end) {
39b5637f 3988 len = end - start + 1;
890871be 3989 write_lock(&map->lock);
39b5637f 3990 em = lookup_extent_mapping(map, start, len);
285190d9 3991 if (!em) {
890871be 3992 write_unlock(&map->lock);
70dec807
CM
3993 break;
3994 }
7f3c74fb
CM
3995 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3996 em->start != start) {
890871be 3997 write_unlock(&map->lock);
70dec807
CM
3998 free_extent_map(em);
3999 break;
4000 }
4001 if (!test_range_bit(tree, em->start,
4002 extent_map_end(em) - 1,
8b62b72b 4003 EXTENT_LOCKED | EXTENT_WRITEBACK,
9655d298 4004 0, NULL)) {
70dec807
CM
4005 remove_extent_mapping(map, em);
4006 /* once for the rb tree */
4007 free_extent_map(em);
4008 }
4009 start = extent_map_end(em);
890871be 4010 write_unlock(&map->lock);
70dec807
CM
4011
4012 /* once for us */
d1310b2e
CM
4013 free_extent_map(em);
4014 }
d1310b2e 4015 }
7b13b7b1 4016 return try_release_extent_state(map, tree, page, mask);
d1310b2e 4017}
d1310b2e 4018
ec29ed5b
CM
4019/*
4020 * helper function for fiemap, which doesn't want to see any holes.
4021 * This maps until we find something past 'last'
4022 */
4023static struct extent_map *get_extent_skip_holes(struct inode *inode,
4024 u64 offset,
4025 u64 last,
4026 get_extent_t *get_extent)
4027{
4028 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4029 struct extent_map *em;
4030 u64 len;
4031
4032 if (offset >= last)
4033 return NULL;
4034
4035 while(1) {
4036 len = last - offset;
4037 if (len == 0)
4038 break;
fda2832f 4039 len = ALIGN(len, sectorsize);
ec29ed5b 4040 em = get_extent(inode, NULL, 0, offset, len, 0);
c704005d 4041 if (IS_ERR_OR_NULL(em))
ec29ed5b
CM
4042 return em;
4043
4044 /* if this isn't a hole return it */
4045 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4046 em->block_start != EXTENT_MAP_HOLE) {
4047 return em;
4048 }
4049
4050 /* this is a hole, advance to the next extent */
4051 offset = extent_map_end(em);
4052 free_extent_map(em);
4053 if (offset >= last)
4054 break;
4055 }
4056 return NULL;
4057}
4058
1506fcc8
YS
4059int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4060 __u64 start, __u64 len, get_extent_t *get_extent)
4061{
975f84fe 4062 int ret = 0;
1506fcc8
YS
4063 u64 off = start;
4064 u64 max = start + len;
4065 u32 flags = 0;
975f84fe
JB
4066 u32 found_type;
4067 u64 last;
ec29ed5b 4068 u64 last_for_get_extent = 0;
1506fcc8 4069 u64 disko = 0;
ec29ed5b 4070 u64 isize = i_size_read(inode);
975f84fe 4071 struct btrfs_key found_key;
1506fcc8 4072 struct extent_map *em = NULL;
2ac55d41 4073 struct extent_state *cached_state = NULL;
975f84fe
JB
4074 struct btrfs_path *path;
4075 struct btrfs_file_extent_item *item;
1506fcc8 4076 int end = 0;
ec29ed5b
CM
4077 u64 em_start = 0;
4078 u64 em_len = 0;
4079 u64 em_end = 0;
1506fcc8 4080 unsigned long emflags;
1506fcc8
YS
4081
4082 if (len == 0)
4083 return -EINVAL;
4084
975f84fe
JB
4085 path = btrfs_alloc_path();
4086 if (!path)
4087 return -ENOMEM;
4088 path->leave_spinning = 1;
4089
4d479cf0
JB
4090 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
4091 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
4092
ec29ed5b
CM
4093 /*
4094 * lookup the last file extent. We're not using i_size here
4095 * because there might be preallocation past i_size
4096 */
975f84fe 4097 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
33345d01 4098 path, btrfs_ino(inode), -1, 0);
975f84fe
JB
4099 if (ret < 0) {
4100 btrfs_free_path(path);
4101 return ret;
4102 }
4103 WARN_ON(!ret);
4104 path->slots[0]--;
4105 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4106 struct btrfs_file_extent_item);
4107 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4108 found_type = btrfs_key_type(&found_key);
4109
ec29ed5b 4110 /* No extents, but there might be delalloc bits */
33345d01 4111 if (found_key.objectid != btrfs_ino(inode) ||
975f84fe 4112 found_type != BTRFS_EXTENT_DATA_KEY) {
ec29ed5b
CM
4113 /* have to trust i_size as the end */
4114 last = (u64)-1;
4115 last_for_get_extent = isize;
4116 } else {
4117 /*
4118 * remember the start of the last extent. There are a
4119 * bunch of different factors that go into the length of the
4120 * extent, so its much less complex to remember where it started
4121 */
4122 last = found_key.offset;
4123 last_for_get_extent = last + 1;
975f84fe 4124 }
975f84fe
JB
4125 btrfs_free_path(path);
4126
ec29ed5b
CM
4127 /*
4128 * we might have some extents allocated but more delalloc past those
4129 * extents. so, we trust isize unless the start of the last extent is
4130 * beyond isize
4131 */
4132 if (last < isize) {
4133 last = (u64)-1;
4134 last_for_get_extent = isize;
4135 }
4136
a52f4cd2 4137 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
d0082371 4138 &cached_state);
ec29ed5b 4139
4d479cf0 4140 em = get_extent_skip_holes(inode, start, last_for_get_extent,
ec29ed5b 4141 get_extent);
1506fcc8
YS
4142 if (!em)
4143 goto out;
4144 if (IS_ERR(em)) {
4145 ret = PTR_ERR(em);
4146 goto out;
4147 }
975f84fe 4148
1506fcc8 4149 while (!end) {
b76bb701 4150 u64 offset_in_extent = 0;
ea8efc74
CM
4151
4152 /* break if the extent we found is outside the range */
4153 if (em->start >= max || extent_map_end(em) < off)
4154 break;
4155
4156 /*
4157 * get_extent may return an extent that starts before our
4158 * requested range. We have to make sure the ranges
4159 * we return to fiemap always move forward and don't
4160 * overlap, so adjust the offsets here
4161 */
4162 em_start = max(em->start, off);
1506fcc8 4163
ea8efc74
CM
4164 /*
4165 * record the offset from the start of the extent
b76bb701
JB
4166 * for adjusting the disk offset below. Only do this if the
4167 * extent isn't compressed since our in ram offset may be past
4168 * what we have actually allocated on disk.
ea8efc74 4169 */
b76bb701
JB
4170 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4171 offset_in_extent = em_start - em->start;
ec29ed5b 4172 em_end = extent_map_end(em);
ea8efc74 4173 em_len = em_end - em_start;
ec29ed5b 4174 emflags = em->flags;
1506fcc8
YS
4175 disko = 0;
4176 flags = 0;
4177
ea8efc74
CM
4178 /*
4179 * bump off for our next call to get_extent
4180 */
4181 off = extent_map_end(em);
4182 if (off >= max)
4183 end = 1;
4184
93dbfad7 4185 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
1506fcc8
YS
4186 end = 1;
4187 flags |= FIEMAP_EXTENT_LAST;
93dbfad7 4188 } else if (em->block_start == EXTENT_MAP_INLINE) {
1506fcc8
YS
4189 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4190 FIEMAP_EXTENT_NOT_ALIGNED);
93dbfad7 4191 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
1506fcc8
YS
4192 flags |= (FIEMAP_EXTENT_DELALLOC |
4193 FIEMAP_EXTENT_UNKNOWN);
93dbfad7 4194 } else {
ea8efc74 4195 disko = em->block_start + offset_in_extent;
1506fcc8
YS
4196 }
4197 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4198 flags |= FIEMAP_EXTENT_ENCODED;
4199
1506fcc8
YS
4200 free_extent_map(em);
4201 em = NULL;
ec29ed5b
CM
4202 if ((em_start >= last) || em_len == (u64)-1 ||
4203 (last == (u64)-1 && isize <= em_end)) {
1506fcc8
YS
4204 flags |= FIEMAP_EXTENT_LAST;
4205 end = 1;
4206 }
4207
ec29ed5b
CM
4208 /* now scan forward to see if this is really the last extent. */
4209 em = get_extent_skip_holes(inode, off, last_for_get_extent,
4210 get_extent);
4211 if (IS_ERR(em)) {
4212 ret = PTR_ERR(em);
4213 goto out;
4214 }
4215 if (!em) {
975f84fe
JB
4216 flags |= FIEMAP_EXTENT_LAST;
4217 end = 1;
4218 }
ec29ed5b
CM
4219 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4220 em_len, flags);
4221 if (ret)
4222 goto out_free;
1506fcc8
YS
4223 }
4224out_free:
4225 free_extent_map(em);
4226out:
a52f4cd2 4227 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
2ac55d41 4228 &cached_state, GFP_NOFS);
1506fcc8
YS
4229 return ret;
4230}
4231
727011e0
CM
4232static void __free_extent_buffer(struct extent_buffer *eb)
4233{
6d49ba1b 4234 btrfs_leak_debug_del(&eb->leak_list);
727011e0
CM
4235 kmem_cache_free(extent_buffer_cache, eb);
4236}
4237
db7f3436
JB
4238static int extent_buffer_under_io(struct extent_buffer *eb)
4239{
4240 return (atomic_read(&eb->io_pages) ||
4241 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4242 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4243}
4244
4245/*
4246 * Helper for releasing extent buffer page.
4247 */
4248static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4249 unsigned long start_idx)
4250{
4251 unsigned long index;
4252 unsigned long num_pages;
4253 struct page *page;
4254 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4255
4256 BUG_ON(extent_buffer_under_io(eb));
4257
4258 num_pages = num_extent_pages(eb->start, eb->len);
4259 index = start_idx + num_pages;
4260 if (start_idx >= index)
4261 return;
4262
4263 do {
4264 index--;
4265 page = extent_buffer_page(eb, index);
4266 if (page && mapped) {
4267 spin_lock(&page->mapping->private_lock);
4268 /*
4269 * We do this since we'll remove the pages after we've
4270 * removed the eb from the radix tree, so we could race
4271 * and have this page now attached to the new eb. So
4272 * only clear page_private if it's still connected to
4273 * this eb.
4274 */
4275 if (PagePrivate(page) &&
4276 page->private == (unsigned long)eb) {
4277 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4278 BUG_ON(PageDirty(page));
4279 BUG_ON(PageWriteback(page));
4280 /*
4281 * We need to make sure we haven't be attached
4282 * to a new eb.
4283 */
4284 ClearPagePrivate(page);
4285 set_page_private(page, 0);
4286 /* One for the page private */
4287 page_cache_release(page);
4288 }
4289 spin_unlock(&page->mapping->private_lock);
4290
4291 }
4292 if (page) {
4293 /* One for when we alloced the page */
4294 page_cache_release(page);
4295 }
4296 } while (index != start_idx);
4297}
4298
4299/*
4300 * Helper for releasing the extent buffer.
4301 */
4302static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4303{
4304 btrfs_release_extent_buffer_page(eb, 0);
4305 __free_extent_buffer(eb);
4306}
4307
d1310b2e
CM
4308static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
4309 u64 start,
4310 unsigned long len,
4311 gfp_t mask)
4312{
4313 struct extent_buffer *eb = NULL;
4314
d1310b2e 4315 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
91ca338d
TI
4316 if (eb == NULL)
4317 return NULL;
d1310b2e
CM
4318 eb->start = start;
4319 eb->len = len;
4f2de97a 4320 eb->tree = tree;
815a51c7 4321 eb->bflags = 0;
bd681513
CM
4322 rwlock_init(&eb->lock);
4323 atomic_set(&eb->write_locks, 0);
4324 atomic_set(&eb->read_locks, 0);
4325 atomic_set(&eb->blocking_readers, 0);
4326 atomic_set(&eb->blocking_writers, 0);
4327 atomic_set(&eb->spinning_readers, 0);
4328 atomic_set(&eb->spinning_writers, 0);
5b25f70f 4329 eb->lock_nested = 0;
bd681513
CM
4330 init_waitqueue_head(&eb->write_lock_wq);
4331 init_waitqueue_head(&eb->read_lock_wq);
b4ce94de 4332
6d49ba1b
ES
4333 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4334
3083ee2e 4335 spin_lock_init(&eb->refs_lock);
d1310b2e 4336 atomic_set(&eb->refs, 1);
0b32f4bb 4337 atomic_set(&eb->io_pages, 0);
727011e0 4338
b8dae313
DS
4339 /*
4340 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4341 */
4342 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4343 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4344 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
d1310b2e
CM
4345
4346 return eb;
4347}
4348
815a51c7
JS
4349struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4350{
4351 unsigned long i;
4352 struct page *p;
4353 struct extent_buffer *new;
4354 unsigned long num_pages = num_extent_pages(src->start, src->len);
4355
9ec72677 4356 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
815a51c7
JS
4357 if (new == NULL)
4358 return NULL;
4359
4360 for (i = 0; i < num_pages; i++) {
9ec72677 4361 p = alloc_page(GFP_NOFS);
db7f3436
JB
4362 if (!p) {
4363 btrfs_release_extent_buffer(new);
4364 return NULL;
4365 }
815a51c7
JS
4366 attach_extent_buffer_page(new, p);
4367 WARN_ON(PageDirty(p));
4368 SetPageUptodate(p);
4369 new->pages[i] = p;
4370 }
4371
4372 copy_extent_buffer(new, src, 0, 0, src->len);
4373 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4374 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4375
4376 return new;
4377}
4378
4379struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4380{
4381 struct extent_buffer *eb;
4382 unsigned long num_pages = num_extent_pages(0, len);
4383 unsigned long i;
4384
9ec72677 4385 eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
815a51c7
JS
4386 if (!eb)
4387 return NULL;
4388
4389 for (i = 0; i < num_pages; i++) {
9ec72677 4390 eb->pages[i] = alloc_page(GFP_NOFS);
815a51c7
JS
4391 if (!eb->pages[i])
4392 goto err;
4393 }
4394 set_extent_buffer_uptodate(eb);
4395 btrfs_set_header_nritems(eb, 0);
4396 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4397
4398 return eb;
4399err:
84167d19
SB
4400 for (; i > 0; i--)
4401 __free_page(eb->pages[i - 1]);
815a51c7
JS
4402 __free_extent_buffer(eb);
4403 return NULL;
4404}
4405
0b32f4bb
JB
4406static void check_buffer_tree_ref(struct extent_buffer *eb)
4407{
242e18c7 4408 int refs;
0b32f4bb
JB
4409 /* the ref bit is tricky. We have to make sure it is set
4410 * if we have the buffer dirty. Otherwise the
4411 * code to free a buffer can end up dropping a dirty
4412 * page
4413 *
4414 * Once the ref bit is set, it won't go away while the
4415 * buffer is dirty or in writeback, and it also won't
4416 * go away while we have the reference count on the
4417 * eb bumped.
4418 *
4419 * We can't just set the ref bit without bumping the
4420 * ref on the eb because free_extent_buffer might
4421 * see the ref bit and try to clear it. If this happens
4422 * free_extent_buffer might end up dropping our original
4423 * ref by mistake and freeing the page before we are able
4424 * to add one more ref.
4425 *
4426 * So bump the ref count first, then set the bit. If someone
4427 * beat us to it, drop the ref we added.
4428 */
242e18c7
CM
4429 refs = atomic_read(&eb->refs);
4430 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4431 return;
4432
594831c4
JB
4433 spin_lock(&eb->refs_lock);
4434 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
0b32f4bb 4435 atomic_inc(&eb->refs);
594831c4 4436 spin_unlock(&eb->refs_lock);
0b32f4bb
JB
4437}
4438
5df4235e
JB
4439static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4440{
4441 unsigned long num_pages, i;
4442
0b32f4bb
JB
4443 check_buffer_tree_ref(eb);
4444
5df4235e
JB
4445 num_pages = num_extent_pages(eb->start, eb->len);
4446 for (i = 0; i < num_pages; i++) {
4447 struct page *p = extent_buffer_page(eb, i);
4448 mark_page_accessed(p);
4449 }
4450}
4451
d1310b2e 4452struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
727011e0 4453 u64 start, unsigned long len)
d1310b2e
CM
4454{
4455 unsigned long num_pages = num_extent_pages(start, len);
4456 unsigned long i;
4457 unsigned long index = start >> PAGE_CACHE_SHIFT;
4458 struct extent_buffer *eb;
6af118ce 4459 struct extent_buffer *exists = NULL;
d1310b2e
CM
4460 struct page *p;
4461 struct address_space *mapping = tree->mapping;
4462 int uptodate = 1;
19fe0a8b 4463 int ret;
d1310b2e 4464
19fe0a8b
MX
4465 rcu_read_lock();
4466 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4467 if (eb && atomic_inc_not_zero(&eb->refs)) {
4468 rcu_read_unlock();
5df4235e 4469 mark_extent_buffer_accessed(eb);
6af118ce
CM
4470 return eb;
4471 }
19fe0a8b 4472 rcu_read_unlock();
6af118ce 4473
ba144192 4474 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
2b114d1d 4475 if (!eb)
d1310b2e
CM
4476 return NULL;
4477
727011e0 4478 for (i = 0; i < num_pages; i++, index++) {
a6591715 4479 p = find_or_create_page(mapping, index, GFP_NOFS);
4804b382 4480 if (!p)
6af118ce 4481 goto free_eb;
4f2de97a
JB
4482
4483 spin_lock(&mapping->private_lock);
4484 if (PagePrivate(p)) {
4485 /*
4486 * We could have already allocated an eb for this page
4487 * and attached one so lets see if we can get a ref on
4488 * the existing eb, and if we can we know it's good and
4489 * we can just return that one, else we know we can just
4490 * overwrite page->private.
4491 */
4492 exists = (struct extent_buffer *)p->private;
4493 if (atomic_inc_not_zero(&exists->refs)) {
4494 spin_unlock(&mapping->private_lock);
4495 unlock_page(p);
17de39ac 4496 page_cache_release(p);
5df4235e 4497 mark_extent_buffer_accessed(exists);
4f2de97a
JB
4498 goto free_eb;
4499 }
4500
0b32f4bb 4501 /*
4f2de97a
JB
4502 * Do this so attach doesn't complain and we need to
4503 * drop the ref the old guy had.
4504 */
4505 ClearPagePrivate(p);
0b32f4bb 4506 WARN_ON(PageDirty(p));
4f2de97a 4507 page_cache_release(p);
d1310b2e 4508 }
4f2de97a
JB
4509 attach_extent_buffer_page(eb, p);
4510 spin_unlock(&mapping->private_lock);
0b32f4bb 4511 WARN_ON(PageDirty(p));
d1310b2e 4512 mark_page_accessed(p);
727011e0 4513 eb->pages[i] = p;
d1310b2e
CM
4514 if (!PageUptodate(p))
4515 uptodate = 0;
eb14ab8e
CM
4516
4517 /*
4518 * see below about how we avoid a nasty race with release page
4519 * and why we unlock later
4520 */
d1310b2e
CM
4521 }
4522 if (uptodate)
b4ce94de 4523 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
115391d2 4524again:
19fe0a8b
MX
4525 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4526 if (ret)
4527 goto free_eb;
4528
6af118ce 4529 spin_lock(&tree->buffer_lock);
19fe0a8b
MX
4530 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4531 if (ret == -EEXIST) {
4532 exists = radix_tree_lookup(&tree->buffer,
4533 start >> PAGE_CACHE_SHIFT);
115391d2
JB
4534 if (!atomic_inc_not_zero(&exists->refs)) {
4535 spin_unlock(&tree->buffer_lock);
4536 radix_tree_preload_end();
115391d2
JB
4537 exists = NULL;
4538 goto again;
4539 }
6af118ce 4540 spin_unlock(&tree->buffer_lock);
19fe0a8b 4541 radix_tree_preload_end();
5df4235e 4542 mark_extent_buffer_accessed(exists);
6af118ce
CM
4543 goto free_eb;
4544 }
6af118ce 4545 /* add one reference for the tree */
0b32f4bb 4546 check_buffer_tree_ref(eb);
f044ba78 4547 spin_unlock(&tree->buffer_lock);
19fe0a8b 4548 radix_tree_preload_end();
eb14ab8e
CM
4549
4550 /*
4551 * there is a race where release page may have
4552 * tried to find this extent buffer in the radix
4553 * but failed. It will tell the VM it is safe to
4554 * reclaim the, and it will clear the page private bit.
4555 * We must make sure to set the page private bit properly
4556 * after the extent buffer is in the radix tree so
4557 * it doesn't get lost
4558 */
727011e0
CM
4559 SetPageChecked(eb->pages[0]);
4560 for (i = 1; i < num_pages; i++) {
4561 p = extent_buffer_page(eb, i);
727011e0
CM
4562 ClearPageChecked(p);
4563 unlock_page(p);
4564 }
4565 unlock_page(eb->pages[0]);
d1310b2e
CM
4566 return eb;
4567
6af118ce 4568free_eb:
727011e0
CM
4569 for (i = 0; i < num_pages; i++) {
4570 if (eb->pages[i])
4571 unlock_page(eb->pages[i]);
4572 }
eb14ab8e 4573
17de39ac 4574 WARN_ON(!atomic_dec_and_test(&eb->refs));
897ca6e9 4575 btrfs_release_extent_buffer(eb);
6af118ce 4576 return exists;
d1310b2e 4577}
d1310b2e
CM
4578
4579struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
f09d1f60 4580 u64 start, unsigned long len)
d1310b2e 4581{
d1310b2e 4582 struct extent_buffer *eb;
d1310b2e 4583
19fe0a8b
MX
4584 rcu_read_lock();
4585 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4586 if (eb && atomic_inc_not_zero(&eb->refs)) {
4587 rcu_read_unlock();
5df4235e 4588 mark_extent_buffer_accessed(eb);
19fe0a8b
MX
4589 return eb;
4590 }
4591 rcu_read_unlock();
0f9dd46c 4592
19fe0a8b 4593 return NULL;
d1310b2e 4594}
d1310b2e 4595
3083ee2e
JB
4596static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4597{
4598 struct extent_buffer *eb =
4599 container_of(head, struct extent_buffer, rcu_head);
4600
4601 __free_extent_buffer(eb);
4602}
4603
3083ee2e 4604/* Expects to have eb->eb_lock already held */
f7a52a40 4605static int release_extent_buffer(struct extent_buffer *eb)
3083ee2e
JB
4606{
4607 WARN_ON(atomic_read(&eb->refs) == 0);
4608 if (atomic_dec_and_test(&eb->refs)) {
815a51c7
JS
4609 if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4610 spin_unlock(&eb->refs_lock);
4611 } else {
4612 struct extent_io_tree *tree = eb->tree;
3083ee2e 4613
815a51c7 4614 spin_unlock(&eb->refs_lock);
3083ee2e 4615
815a51c7
JS
4616 spin_lock(&tree->buffer_lock);
4617 radix_tree_delete(&tree->buffer,
4618 eb->start >> PAGE_CACHE_SHIFT);
4619 spin_unlock(&tree->buffer_lock);
4620 }
3083ee2e
JB
4621
4622 /* Should be safe to release our pages at this point */
4623 btrfs_release_extent_buffer_page(eb, 0);
3083ee2e 4624 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
e64860aa 4625 return 1;
3083ee2e
JB
4626 }
4627 spin_unlock(&eb->refs_lock);
e64860aa
JB
4628
4629 return 0;
3083ee2e
JB
4630}
4631
d1310b2e
CM
4632void free_extent_buffer(struct extent_buffer *eb)
4633{
242e18c7
CM
4634 int refs;
4635 int old;
d1310b2e
CM
4636 if (!eb)
4637 return;
4638
242e18c7
CM
4639 while (1) {
4640 refs = atomic_read(&eb->refs);
4641 if (refs <= 3)
4642 break;
4643 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
4644 if (old == refs)
4645 return;
4646 }
4647
3083ee2e 4648 spin_lock(&eb->refs_lock);
815a51c7
JS
4649 if (atomic_read(&eb->refs) == 2 &&
4650 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4651 atomic_dec(&eb->refs);
4652
3083ee2e
JB
4653 if (atomic_read(&eb->refs) == 2 &&
4654 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
0b32f4bb 4655 !extent_buffer_under_io(eb) &&
3083ee2e
JB
4656 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4657 atomic_dec(&eb->refs);
4658
4659 /*
4660 * I know this is terrible, but it's temporary until we stop tracking
4661 * the uptodate bits and such for the extent buffers.
4662 */
f7a52a40 4663 release_extent_buffer(eb);
3083ee2e
JB
4664}
4665
4666void free_extent_buffer_stale(struct extent_buffer *eb)
4667{
4668 if (!eb)
d1310b2e
CM
4669 return;
4670
3083ee2e
JB
4671 spin_lock(&eb->refs_lock);
4672 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4673
0b32f4bb 4674 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3083ee2e
JB
4675 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4676 atomic_dec(&eb->refs);
f7a52a40 4677 release_extent_buffer(eb);
d1310b2e 4678}
d1310b2e 4679
1d4284bd 4680void clear_extent_buffer_dirty(struct extent_buffer *eb)
d1310b2e 4681{
d1310b2e
CM
4682 unsigned long i;
4683 unsigned long num_pages;
4684 struct page *page;
4685
d1310b2e
CM
4686 num_pages = num_extent_pages(eb->start, eb->len);
4687
4688 for (i = 0; i < num_pages; i++) {
4689 page = extent_buffer_page(eb, i);
b9473439 4690 if (!PageDirty(page))
d2c3f4f6
CM
4691 continue;
4692
a61e6f29 4693 lock_page(page);
eb14ab8e
CM
4694 WARN_ON(!PagePrivate(page));
4695
d1310b2e 4696 clear_page_dirty_for_io(page);
0ee0fda0 4697 spin_lock_irq(&page->mapping->tree_lock);
d1310b2e
CM
4698 if (!PageDirty(page)) {
4699 radix_tree_tag_clear(&page->mapping->page_tree,
4700 page_index(page),
4701 PAGECACHE_TAG_DIRTY);
4702 }
0ee0fda0 4703 spin_unlock_irq(&page->mapping->tree_lock);
bf0da8c1 4704 ClearPageError(page);
a61e6f29 4705 unlock_page(page);
d1310b2e 4706 }
0b32f4bb 4707 WARN_ON(atomic_read(&eb->refs) == 0);
d1310b2e 4708}
d1310b2e 4709
0b32f4bb 4710int set_extent_buffer_dirty(struct extent_buffer *eb)
d1310b2e
CM
4711{
4712 unsigned long i;
4713 unsigned long num_pages;
b9473439 4714 int was_dirty = 0;
d1310b2e 4715
0b32f4bb
JB
4716 check_buffer_tree_ref(eb);
4717
b9473439 4718 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
0b32f4bb 4719
d1310b2e 4720 num_pages = num_extent_pages(eb->start, eb->len);
3083ee2e 4721 WARN_ON(atomic_read(&eb->refs) == 0);
0b32f4bb
JB
4722 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4723
b9473439 4724 for (i = 0; i < num_pages; i++)
0b32f4bb 4725 set_page_dirty(extent_buffer_page(eb, i));
b9473439 4726 return was_dirty;
d1310b2e 4727}
d1310b2e 4728
0b32f4bb 4729int clear_extent_buffer_uptodate(struct extent_buffer *eb)
1259ab75
CM
4730{
4731 unsigned long i;
4732 struct page *page;
4733 unsigned long num_pages;
4734
b4ce94de 4735 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
0b32f4bb 4736 num_pages = num_extent_pages(eb->start, eb->len);
1259ab75
CM
4737 for (i = 0; i < num_pages; i++) {
4738 page = extent_buffer_page(eb, i);
33958dc6
CM
4739 if (page)
4740 ClearPageUptodate(page);
1259ab75
CM
4741 }
4742 return 0;
4743}
4744
0b32f4bb 4745int set_extent_buffer_uptodate(struct extent_buffer *eb)
d1310b2e
CM
4746{
4747 unsigned long i;
4748 struct page *page;
4749 unsigned long num_pages;
4750
0b32f4bb 4751 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 4752 num_pages = num_extent_pages(eb->start, eb->len);
d1310b2e
CM
4753 for (i = 0; i < num_pages; i++) {
4754 page = extent_buffer_page(eb, i);
d1310b2e
CM
4755 SetPageUptodate(page);
4756 }
4757 return 0;
4758}
d1310b2e 4759
0b32f4bb 4760int extent_buffer_uptodate(struct extent_buffer *eb)
d1310b2e 4761{
0b32f4bb 4762 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 4763}
d1310b2e
CM
4764
4765int read_extent_buffer_pages(struct extent_io_tree *tree,
bb82ab88 4766 struct extent_buffer *eb, u64 start, int wait,
f188591e 4767 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
4768{
4769 unsigned long i;
4770 unsigned long start_i;
4771 struct page *page;
4772 int err;
4773 int ret = 0;
ce9adaa5
CM
4774 int locked_pages = 0;
4775 int all_uptodate = 1;
d1310b2e 4776 unsigned long num_pages;
727011e0 4777 unsigned long num_reads = 0;
a86c12c7 4778 struct bio *bio = NULL;
c8b97818 4779 unsigned long bio_flags = 0;
a86c12c7 4780
b4ce94de 4781 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
d1310b2e
CM
4782 return 0;
4783
d1310b2e
CM
4784 if (start) {
4785 WARN_ON(start < eb->start);
4786 start_i = (start >> PAGE_CACHE_SHIFT) -
4787 (eb->start >> PAGE_CACHE_SHIFT);
4788 } else {
4789 start_i = 0;
4790 }
4791
4792 num_pages = num_extent_pages(eb->start, eb->len);
4793 for (i = start_i; i < num_pages; i++) {
4794 page = extent_buffer_page(eb, i);
bb82ab88 4795 if (wait == WAIT_NONE) {
2db04966 4796 if (!trylock_page(page))
ce9adaa5 4797 goto unlock_exit;
d1310b2e
CM
4798 } else {
4799 lock_page(page);
4800 }
ce9adaa5 4801 locked_pages++;
727011e0
CM
4802 if (!PageUptodate(page)) {
4803 num_reads++;
ce9adaa5 4804 all_uptodate = 0;
727011e0 4805 }
ce9adaa5
CM
4806 }
4807 if (all_uptodate) {
4808 if (start_i == 0)
b4ce94de 4809 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
ce9adaa5
CM
4810 goto unlock_exit;
4811 }
4812
ea466794 4813 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
5cf1ab56 4814 eb->read_mirror = 0;
0b32f4bb 4815 atomic_set(&eb->io_pages, num_reads);
ce9adaa5
CM
4816 for (i = start_i; i < num_pages; i++) {
4817 page = extent_buffer_page(eb, i);
ce9adaa5 4818 if (!PageUptodate(page)) {
f188591e 4819 ClearPageError(page);
a86c12c7 4820 err = __extent_read_full_page(tree, page,
f188591e 4821 get_extent, &bio,
d4c7ca86
JB
4822 mirror_num, &bio_flags,
4823 READ | REQ_META);
d397712b 4824 if (err)
d1310b2e 4825 ret = err;
d1310b2e
CM
4826 } else {
4827 unlock_page(page);
4828 }
4829 }
4830
355808c2 4831 if (bio) {
d4c7ca86
JB
4832 err = submit_one_bio(READ | REQ_META, bio, mirror_num,
4833 bio_flags);
79787eaa
JM
4834 if (err)
4835 return err;
355808c2 4836 }
a86c12c7 4837
bb82ab88 4838 if (ret || wait != WAIT_COMPLETE)
d1310b2e 4839 return ret;
d397712b 4840
d1310b2e
CM
4841 for (i = start_i; i < num_pages; i++) {
4842 page = extent_buffer_page(eb, i);
4843 wait_on_page_locked(page);
d397712b 4844 if (!PageUptodate(page))
d1310b2e 4845 ret = -EIO;
d1310b2e 4846 }
d397712b 4847
d1310b2e 4848 return ret;
ce9adaa5
CM
4849
4850unlock_exit:
4851 i = start_i;
d397712b 4852 while (locked_pages > 0) {
ce9adaa5
CM
4853 page = extent_buffer_page(eb, i);
4854 i++;
4855 unlock_page(page);
4856 locked_pages--;
4857 }
4858 return ret;
d1310b2e 4859}
d1310b2e
CM
4860
4861void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4862 unsigned long start,
4863 unsigned long len)
4864{
4865 size_t cur;
4866 size_t offset;
4867 struct page *page;
4868 char *kaddr;
4869 char *dst = (char *)dstv;
4870 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4871 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
4872
4873 WARN_ON(start > eb->len);
4874 WARN_ON(start + len > eb->start + eb->len);
4875
4876 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4877
d397712b 4878 while (len > 0) {
d1310b2e 4879 page = extent_buffer_page(eb, i);
d1310b2e
CM
4880
4881 cur = min(len, (PAGE_CACHE_SIZE - offset));
a6591715 4882 kaddr = page_address(page);
d1310b2e 4883 memcpy(dst, kaddr + offset, cur);
d1310b2e
CM
4884
4885 dst += cur;
4886 len -= cur;
4887 offset = 0;
4888 i++;
4889 }
4890}
d1310b2e
CM
4891
4892int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
a6591715 4893 unsigned long min_len, char **map,
d1310b2e 4894 unsigned long *map_start,
a6591715 4895 unsigned long *map_len)
d1310b2e
CM
4896{
4897 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4898 char *kaddr;
4899 struct page *p;
4900 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4901 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4902 unsigned long end_i = (start_offset + start + min_len - 1) >>
4903 PAGE_CACHE_SHIFT;
4904
4905 if (i != end_i)
4906 return -EINVAL;
4907
4908 if (i == 0) {
4909 offset = start_offset;
4910 *map_start = 0;
4911 } else {
4912 offset = 0;
4913 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4914 }
d397712b 4915
d1310b2e 4916 if (start + min_len > eb->len) {
31b1a2bd 4917 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
c1c9ff7c
GU
4918 "wanted %lu %lu\n",
4919 eb->start, eb->len, start, min_len);
85026533 4920 return -EINVAL;
d1310b2e
CM
4921 }
4922
4923 p = extent_buffer_page(eb, i);
a6591715 4924 kaddr = page_address(p);
d1310b2e
CM
4925 *map = kaddr + offset;
4926 *map_len = PAGE_CACHE_SIZE - offset;
4927 return 0;
4928}
d1310b2e 4929
d1310b2e
CM
4930int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4931 unsigned long start,
4932 unsigned long len)
4933{
4934 size_t cur;
4935 size_t offset;
4936 struct page *page;
4937 char *kaddr;
4938 char *ptr = (char *)ptrv;
4939 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4940 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4941 int ret = 0;
4942
4943 WARN_ON(start > eb->len);
4944 WARN_ON(start + len > eb->start + eb->len);
4945
4946 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4947
d397712b 4948 while (len > 0) {
d1310b2e 4949 page = extent_buffer_page(eb, i);
d1310b2e
CM
4950
4951 cur = min(len, (PAGE_CACHE_SIZE - offset));
4952
a6591715 4953 kaddr = page_address(page);
d1310b2e 4954 ret = memcmp(ptr, kaddr + offset, cur);
d1310b2e
CM
4955 if (ret)
4956 break;
4957
4958 ptr += cur;
4959 len -= cur;
4960 offset = 0;
4961 i++;
4962 }
4963 return ret;
4964}
d1310b2e
CM
4965
4966void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4967 unsigned long start, unsigned long len)
4968{
4969 size_t cur;
4970 size_t offset;
4971 struct page *page;
4972 char *kaddr;
4973 char *src = (char *)srcv;
4974 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4975 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4976
4977 WARN_ON(start > eb->len);
4978 WARN_ON(start + len > eb->start + eb->len);
4979
4980 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4981
d397712b 4982 while (len > 0) {
d1310b2e
CM
4983 page = extent_buffer_page(eb, i);
4984 WARN_ON(!PageUptodate(page));
4985
4986 cur = min(len, PAGE_CACHE_SIZE - offset);
a6591715 4987 kaddr = page_address(page);
d1310b2e 4988 memcpy(kaddr + offset, src, cur);
d1310b2e
CM
4989
4990 src += cur;
4991 len -= cur;
4992 offset = 0;
4993 i++;
4994 }
4995}
d1310b2e
CM
4996
4997void memset_extent_buffer(struct extent_buffer *eb, char c,
4998 unsigned long start, unsigned long len)
4999{
5000 size_t cur;
5001 size_t offset;
5002 struct page *page;
5003 char *kaddr;
5004 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5005 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5006
5007 WARN_ON(start > eb->len);
5008 WARN_ON(start + len > eb->start + eb->len);
5009
5010 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5011
d397712b 5012 while (len > 0) {
d1310b2e
CM
5013 page = extent_buffer_page(eb, i);
5014 WARN_ON(!PageUptodate(page));
5015
5016 cur = min(len, PAGE_CACHE_SIZE - offset);
a6591715 5017 kaddr = page_address(page);
d1310b2e 5018 memset(kaddr + offset, c, cur);
d1310b2e
CM
5019
5020 len -= cur;
5021 offset = 0;
5022 i++;
5023 }
5024}
d1310b2e
CM
5025
5026void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5027 unsigned long dst_offset, unsigned long src_offset,
5028 unsigned long len)
5029{
5030 u64 dst_len = dst->len;
5031 size_t cur;
5032 size_t offset;
5033 struct page *page;
5034 char *kaddr;
5035 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5036 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5037
5038 WARN_ON(src->len != dst_len);
5039
5040 offset = (start_offset + dst_offset) &
5041 ((unsigned long)PAGE_CACHE_SIZE - 1);
5042
d397712b 5043 while (len > 0) {
d1310b2e
CM
5044 page = extent_buffer_page(dst, i);
5045 WARN_ON(!PageUptodate(page));
5046
5047 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
5048
a6591715 5049 kaddr = page_address(page);
d1310b2e 5050 read_extent_buffer(src, kaddr + offset, src_offset, cur);
d1310b2e
CM
5051
5052 src_offset += cur;
5053 len -= cur;
5054 offset = 0;
5055 i++;
5056 }
5057}
d1310b2e
CM
5058
5059static void move_pages(struct page *dst_page, struct page *src_page,
5060 unsigned long dst_off, unsigned long src_off,
5061 unsigned long len)
5062{
a6591715 5063 char *dst_kaddr = page_address(dst_page);
d1310b2e
CM
5064 if (dst_page == src_page) {
5065 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
5066 } else {
a6591715 5067 char *src_kaddr = page_address(src_page);
d1310b2e
CM
5068 char *p = dst_kaddr + dst_off + len;
5069 char *s = src_kaddr + src_off + len;
5070
5071 while (len--)
5072 *--p = *--s;
d1310b2e 5073 }
d1310b2e
CM
5074}
5075
3387206f
ST
5076static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5077{
5078 unsigned long distance = (src > dst) ? src - dst : dst - src;
5079 return distance < len;
5080}
5081
d1310b2e
CM
5082static void copy_pages(struct page *dst_page, struct page *src_page,
5083 unsigned long dst_off, unsigned long src_off,
5084 unsigned long len)
5085{
a6591715 5086 char *dst_kaddr = page_address(dst_page);
d1310b2e 5087 char *src_kaddr;
727011e0 5088 int must_memmove = 0;
d1310b2e 5089
3387206f 5090 if (dst_page != src_page) {
a6591715 5091 src_kaddr = page_address(src_page);
3387206f 5092 } else {
d1310b2e 5093 src_kaddr = dst_kaddr;
727011e0
CM
5094 if (areas_overlap(src_off, dst_off, len))
5095 must_memmove = 1;
3387206f 5096 }
d1310b2e 5097
727011e0
CM
5098 if (must_memmove)
5099 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5100 else
5101 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
d1310b2e
CM
5102}
5103
5104void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5105 unsigned long src_offset, unsigned long len)
5106{
5107 size_t cur;
5108 size_t dst_off_in_page;
5109 size_t src_off_in_page;
5110 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5111 unsigned long dst_i;
5112 unsigned long src_i;
5113
5114 if (src_offset + len > dst->len) {
d397712b
CM
5115 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
5116 "len %lu dst len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
5117 BUG_ON(1);
5118 }
5119 if (dst_offset + len > dst->len) {
d397712b
CM
5120 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
5121 "len %lu dst len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
5122 BUG_ON(1);
5123 }
5124
d397712b 5125 while (len > 0) {
d1310b2e
CM
5126 dst_off_in_page = (start_offset + dst_offset) &
5127 ((unsigned long)PAGE_CACHE_SIZE - 1);
5128 src_off_in_page = (start_offset + src_offset) &
5129 ((unsigned long)PAGE_CACHE_SIZE - 1);
5130
5131 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5132 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
5133
5134 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
5135 src_off_in_page));
5136 cur = min_t(unsigned long, cur,
5137 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5138
5139 copy_pages(extent_buffer_page(dst, dst_i),
5140 extent_buffer_page(dst, src_i),
5141 dst_off_in_page, src_off_in_page, cur);
5142
5143 src_offset += cur;
5144 dst_offset += cur;
5145 len -= cur;
5146 }
5147}
d1310b2e
CM
5148
5149void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5150 unsigned long src_offset, unsigned long len)
5151{
5152 size_t cur;
5153 size_t dst_off_in_page;
5154 size_t src_off_in_page;
5155 unsigned long dst_end = dst_offset + len - 1;
5156 unsigned long src_end = src_offset + len - 1;
5157 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5158 unsigned long dst_i;
5159 unsigned long src_i;
5160
5161 if (src_offset + len > dst->len) {
d397712b
CM
5162 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
5163 "len %lu len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
5164 BUG_ON(1);
5165 }
5166 if (dst_offset + len > dst->len) {
d397712b
CM
5167 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
5168 "len %lu len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
5169 BUG_ON(1);
5170 }
727011e0 5171 if (dst_offset < src_offset) {
d1310b2e
CM
5172 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5173 return;
5174 }
d397712b 5175 while (len > 0) {
d1310b2e
CM
5176 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
5177 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
5178
5179 dst_off_in_page = (start_offset + dst_end) &
5180 ((unsigned long)PAGE_CACHE_SIZE - 1);
5181 src_off_in_page = (start_offset + src_end) &
5182 ((unsigned long)PAGE_CACHE_SIZE - 1);
5183
5184 cur = min_t(unsigned long, len, src_off_in_page + 1);
5185 cur = min(cur, dst_off_in_page + 1);
5186 move_pages(extent_buffer_page(dst, dst_i),
5187 extent_buffer_page(dst, src_i),
5188 dst_off_in_page - cur + 1,
5189 src_off_in_page - cur + 1, cur);
5190
5191 dst_end -= cur;
5192 src_end -= cur;
5193 len -= cur;
5194 }
5195}
6af118ce 5196
f7a52a40 5197int try_release_extent_buffer(struct page *page)
19fe0a8b 5198{
6af118ce 5199 struct extent_buffer *eb;
6af118ce 5200
3083ee2e
JB
5201 /*
5202 * We need to make sure noboody is attaching this page to an eb right
5203 * now.
5204 */
5205 spin_lock(&page->mapping->private_lock);
5206 if (!PagePrivate(page)) {
5207 spin_unlock(&page->mapping->private_lock);
4f2de97a 5208 return 1;
45f49bce 5209 }
6af118ce 5210
3083ee2e
JB
5211 eb = (struct extent_buffer *)page->private;
5212 BUG_ON(!eb);
19fe0a8b
MX
5213
5214 /*
3083ee2e
JB
5215 * This is a little awful but should be ok, we need to make sure that
5216 * the eb doesn't disappear out from under us while we're looking at
5217 * this page.
19fe0a8b 5218 */
3083ee2e 5219 spin_lock(&eb->refs_lock);
0b32f4bb 5220 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
3083ee2e
JB
5221 spin_unlock(&eb->refs_lock);
5222 spin_unlock(&page->mapping->private_lock);
5223 return 0;
b9473439 5224 }
3083ee2e 5225 spin_unlock(&page->mapping->private_lock);
897ca6e9 5226
19fe0a8b 5227 /*
3083ee2e
JB
5228 * If tree ref isn't set then we know the ref on this eb is a real ref,
5229 * so just return, this page will likely be freed soon anyway.
19fe0a8b 5230 */
3083ee2e
JB
5231 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5232 spin_unlock(&eb->refs_lock);
5233 return 0;
b9473439 5234 }
19fe0a8b 5235
f7a52a40 5236 return release_extent_buffer(eb);
6af118ce 5237}