Btrfs: Add shared reference cache
[linux-2.6-block.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include "extent_io.h"
16#include "extent_map.h"
2db04966 17#include "compat.h"
902b22f3
DW
18#include "ctree.h"
19#include "btrfs_inode.h"
d1310b2e
CM
20
21/* temporary define until extent_map moves out of btrfs */
22struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
25 unsigned long));
26
27static struct kmem_cache *extent_state_cache;
28static struct kmem_cache *extent_buffer_cache;
29
30static LIST_HEAD(buffers);
31static LIST_HEAD(states);
4bef0848
CM
32
33#ifdef LEAK_DEBUG
2d2ae547 34static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
4bef0848 35#endif
d1310b2e 36
d1310b2e
CM
37#define BUFFER_LRU_MAX 64
38
39struct tree_entry {
40 u64 start;
41 u64 end;
d1310b2e
CM
42 struct rb_node rb_node;
43};
44
45struct extent_page_data {
46 struct bio *bio;
47 struct extent_io_tree *tree;
48 get_extent_t *get_extent;
49};
50
51int __init extent_io_init(void)
52{
53 extent_state_cache = btrfs_cache_create("extent_state",
54 sizeof(struct extent_state), 0,
55 NULL);
56 if (!extent_state_cache)
57 return -ENOMEM;
58
59 extent_buffer_cache = btrfs_cache_create("extent_buffers",
60 sizeof(struct extent_buffer), 0,
61 NULL);
62 if (!extent_buffer_cache)
63 goto free_state_cache;
64 return 0;
65
66free_state_cache:
67 kmem_cache_destroy(extent_state_cache);
68 return -ENOMEM;
69}
70
71void extent_io_exit(void)
72{
73 struct extent_state *state;
2d2ae547 74 struct extent_buffer *eb;
d1310b2e
CM
75
76 while (!list_empty(&states)) {
2d2ae547 77 state = list_entry(states.next, struct extent_state, leak_list);
70dec807 78 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
2d2ae547 79 list_del(&state->leak_list);
d1310b2e
CM
80 kmem_cache_free(extent_state_cache, state);
81
82 }
83
2d2ae547
CM
84 while (!list_empty(&buffers)) {
85 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
86 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
87 list_del(&eb->leak_list);
88 kmem_cache_free(extent_buffer_cache, eb);
89 }
d1310b2e
CM
90 if (extent_state_cache)
91 kmem_cache_destroy(extent_state_cache);
92 if (extent_buffer_cache)
93 kmem_cache_destroy(extent_buffer_cache);
94}
95
96void extent_io_tree_init(struct extent_io_tree *tree,
97 struct address_space *mapping, gfp_t mask)
98{
99 tree->state.rb_node = NULL;
6af118ce 100 tree->buffer.rb_node = NULL;
d1310b2e
CM
101 tree->ops = NULL;
102 tree->dirty_bytes = 0;
70dec807 103 spin_lock_init(&tree->lock);
6af118ce 104 spin_lock_init(&tree->buffer_lock);
d1310b2e 105 tree->mapping = mapping;
d1310b2e
CM
106}
107EXPORT_SYMBOL(extent_io_tree_init);
108
d1310b2e
CM
109struct extent_state *alloc_extent_state(gfp_t mask)
110{
111 struct extent_state *state;
4bef0848 112#ifdef LEAK_DEBUG
2d2ae547 113 unsigned long flags;
4bef0848 114#endif
d1310b2e
CM
115
116 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 117 if (!state)
d1310b2e
CM
118 return state;
119 state->state = 0;
d1310b2e 120 state->private = 0;
70dec807 121 state->tree = NULL;
4bef0848 122#ifdef LEAK_DEBUG
2d2ae547
CM
123 spin_lock_irqsave(&leak_lock, flags);
124 list_add(&state->leak_list, &states);
125 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 126#endif
d1310b2e
CM
127 atomic_set(&state->refs, 1);
128 init_waitqueue_head(&state->wq);
129 return state;
130}
131EXPORT_SYMBOL(alloc_extent_state);
132
133void free_extent_state(struct extent_state *state)
134{
d1310b2e
CM
135 if (!state)
136 return;
137 if (atomic_dec_and_test(&state->refs)) {
4bef0848 138#ifdef LEAK_DEBUG
2d2ae547 139 unsigned long flags;
4bef0848 140#endif
70dec807 141 WARN_ON(state->tree);
4bef0848 142#ifdef LEAK_DEBUG
2d2ae547
CM
143 spin_lock_irqsave(&leak_lock, flags);
144 list_del(&state->leak_list);
145 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 146#endif
d1310b2e
CM
147 kmem_cache_free(extent_state_cache, state);
148 }
149}
150EXPORT_SYMBOL(free_extent_state);
151
152static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
153 struct rb_node *node)
154{
155 struct rb_node ** p = &root->rb_node;
156 struct rb_node * parent = NULL;
157 struct tree_entry *entry;
158
159 while(*p) {
160 parent = *p;
161 entry = rb_entry(parent, struct tree_entry, rb_node);
162
163 if (offset < entry->start)
164 p = &(*p)->rb_left;
165 else if (offset > entry->end)
166 p = &(*p)->rb_right;
167 else
168 return parent;
169 }
170
171 entry = rb_entry(node, struct tree_entry, rb_node);
d1310b2e
CM
172 rb_link_node(node, parent, p);
173 rb_insert_color(node, root);
174 return NULL;
175}
176
80ea96b1 177static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
178 struct rb_node **prev_ret,
179 struct rb_node **next_ret)
180{
80ea96b1 181 struct rb_root *root = &tree->state;
d1310b2e
CM
182 struct rb_node * n = root->rb_node;
183 struct rb_node *prev = NULL;
184 struct rb_node *orig_prev = NULL;
185 struct tree_entry *entry;
186 struct tree_entry *prev_entry = NULL;
187
188 while(n) {
189 entry = rb_entry(n, struct tree_entry, rb_node);
190 prev = n;
191 prev_entry = entry;
192
193 if (offset < entry->start)
194 n = n->rb_left;
195 else if (offset > entry->end)
196 n = n->rb_right;
80ea96b1 197 else {
d1310b2e 198 return n;
80ea96b1 199 }
d1310b2e
CM
200 }
201
202 if (prev_ret) {
203 orig_prev = prev;
204 while(prev && offset > prev_entry->end) {
205 prev = rb_next(prev);
206 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
207 }
208 *prev_ret = prev;
209 prev = orig_prev;
210 }
211
212 if (next_ret) {
213 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214 while(prev && offset < prev_entry->start) {
215 prev = rb_prev(prev);
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
217 }
218 *next_ret = prev;
219 }
220 return NULL;
221}
222
80ea96b1
CM
223static inline struct rb_node *tree_search(struct extent_io_tree *tree,
224 u64 offset)
d1310b2e 225{
70dec807 226 struct rb_node *prev = NULL;
d1310b2e 227 struct rb_node *ret;
70dec807 228
80ea96b1
CM
229 ret = __etree_search(tree, offset, &prev, NULL);
230 if (!ret) {
d1310b2e 231 return prev;
80ea96b1 232 }
d1310b2e
CM
233 return ret;
234}
235
6af118ce
CM
236static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
237 u64 offset, struct rb_node *node)
238{
239 struct rb_root *root = &tree->buffer;
240 struct rb_node ** p = &root->rb_node;
241 struct rb_node * parent = NULL;
242 struct extent_buffer *eb;
243
244 while(*p) {
245 parent = *p;
246 eb = rb_entry(parent, struct extent_buffer, rb_node);
247
248 if (offset < eb->start)
249 p = &(*p)->rb_left;
250 else if (offset > eb->start)
251 p = &(*p)->rb_right;
252 else
253 return eb;
254 }
255
256 rb_link_node(node, parent, p);
257 rb_insert_color(node, root);
258 return NULL;
259}
260
261static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
262 u64 offset)
263{
264 struct rb_root *root = &tree->buffer;
265 struct rb_node * n = root->rb_node;
266 struct extent_buffer *eb;
267
268 while(n) {
269 eb = rb_entry(n, struct extent_buffer, rb_node);
270 if (offset < eb->start)
271 n = n->rb_left;
272 else if (offset > eb->start)
273 n = n->rb_right;
274 else
275 return eb;
276 }
277 return NULL;
278}
279
d1310b2e
CM
280/*
281 * utility function to look for merge candidates inside a given range.
282 * Any extents with matching state are merged together into a single
283 * extent in the tree. Extents with EXTENT_IO in their state field
284 * are not merged because the end_io handlers need to be able to do
285 * operations on them without sleeping (or doing allocations/splits).
286 *
287 * This should be called with the tree lock held.
288 */
289static int merge_state(struct extent_io_tree *tree,
290 struct extent_state *state)
291{
292 struct extent_state *other;
293 struct rb_node *other_node;
294
295 if (state->state & EXTENT_IOBITS)
296 return 0;
297
298 other_node = rb_prev(&state->rb_node);
299 if (other_node) {
300 other = rb_entry(other_node, struct extent_state, rb_node);
301 if (other->end == state->start - 1 &&
302 other->state == state->state) {
303 state->start = other->start;
70dec807 304 other->tree = NULL;
d1310b2e
CM
305 rb_erase(&other->rb_node, &tree->state);
306 free_extent_state(other);
307 }
308 }
309 other_node = rb_next(&state->rb_node);
310 if (other_node) {
311 other = rb_entry(other_node, struct extent_state, rb_node);
312 if (other->start == state->end + 1 &&
313 other->state == state->state) {
314 other->start = state->start;
70dec807 315 state->tree = NULL;
d1310b2e
CM
316 rb_erase(&state->rb_node, &tree->state);
317 free_extent_state(state);
318 }
319 }
320 return 0;
321}
322
291d673e
CM
323static void set_state_cb(struct extent_io_tree *tree,
324 struct extent_state *state,
325 unsigned long bits)
326{
327 if (tree->ops && tree->ops->set_bit_hook) {
328 tree->ops->set_bit_hook(tree->mapping->host, state->start,
b0c68f8b 329 state->end, state->state, bits);
291d673e
CM
330 }
331}
332
333static void clear_state_cb(struct extent_io_tree *tree,
334 struct extent_state *state,
335 unsigned long bits)
336{
337 if (tree->ops && tree->ops->set_bit_hook) {
338 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
b0c68f8b 339 state->end, state->state, bits);
291d673e
CM
340 }
341}
342
d1310b2e
CM
343/*
344 * insert an extent_state struct into the tree. 'bits' are set on the
345 * struct before it is inserted.
346 *
347 * This may return -EEXIST if the extent is already there, in which case the
348 * state struct is freed.
349 *
350 * The tree lock is not taken internally. This is a utility function and
351 * probably isn't what you want to call (see set/clear_extent_bit).
352 */
353static int insert_state(struct extent_io_tree *tree,
354 struct extent_state *state, u64 start, u64 end,
355 int bits)
356{
357 struct rb_node *node;
358
359 if (end < start) {
360 printk("end < start %Lu %Lu\n", end, start);
361 WARN_ON(1);
362 }
363 if (bits & EXTENT_DIRTY)
364 tree->dirty_bytes += end - start + 1;
b0c68f8b 365 set_state_cb(tree, state, bits);
d1310b2e
CM
366 state->state |= bits;
367 state->start = start;
368 state->end = end;
369 node = tree_insert(&tree->state, end, &state->rb_node);
370 if (node) {
371 struct extent_state *found;
372 found = rb_entry(node, struct extent_state, rb_node);
373 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
374 free_extent_state(state);
375 return -EEXIST;
376 }
70dec807 377 state->tree = tree;
d1310b2e
CM
378 merge_state(tree, state);
379 return 0;
380}
381
382/*
383 * split a given extent state struct in two, inserting the preallocated
384 * struct 'prealloc' as the newly created second half. 'split' indicates an
385 * offset inside 'orig' where it should be split.
386 *
387 * Before calling,
388 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
389 * are two extent state structs in the tree:
390 * prealloc: [orig->start, split - 1]
391 * orig: [ split, orig->end ]
392 *
393 * The tree locks are not taken by this function. They need to be held
394 * by the caller.
395 */
396static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
397 struct extent_state *prealloc, u64 split)
398{
399 struct rb_node *node;
400 prealloc->start = orig->start;
401 prealloc->end = split - 1;
402 prealloc->state = orig->state;
403 orig->start = split;
404
405 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
406 if (node) {
407 struct extent_state *found;
408 found = rb_entry(node, struct extent_state, rb_node);
409 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
410 free_extent_state(prealloc);
411 return -EEXIST;
412 }
70dec807 413 prealloc->tree = tree;
d1310b2e
CM
414 return 0;
415}
416
417/*
418 * utility function to clear some bits in an extent state struct.
419 * it will optionally wake up any one waiting on this state (wake == 1), or
420 * forcibly remove the state from the tree (delete == 1).
421 *
422 * If no bits are set on the state struct after clearing things, the
423 * struct is freed and removed from the tree
424 */
425static int clear_state_bit(struct extent_io_tree *tree,
426 struct extent_state *state, int bits, int wake,
427 int delete)
428{
429 int ret = state->state & bits;
430
431 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
432 u64 range = state->end - state->start + 1;
433 WARN_ON(range > tree->dirty_bytes);
434 tree->dirty_bytes -= range;
435 }
291d673e 436 clear_state_cb(tree, state, bits);
b0c68f8b 437 state->state &= ~bits;
d1310b2e
CM
438 if (wake)
439 wake_up(&state->wq);
440 if (delete || state->state == 0) {
70dec807 441 if (state->tree) {
ae9d1285 442 clear_state_cb(tree, state, state->state);
d1310b2e 443 rb_erase(&state->rb_node, &tree->state);
70dec807 444 state->tree = NULL;
d1310b2e
CM
445 free_extent_state(state);
446 } else {
447 WARN_ON(1);
448 }
449 } else {
450 merge_state(tree, state);
451 }
452 return ret;
453}
454
455/*
456 * clear some bits on a range in the tree. This may require splitting
457 * or inserting elements in the tree, so the gfp mask is used to
458 * indicate which allocations or sleeping are allowed.
459 *
460 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
461 * the given range from the tree regardless of state (ie for truncate).
462 *
463 * the range [start, end] is inclusive.
464 *
465 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
466 * bits were already set, or zero if none of the bits were already set.
467 */
468int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
469 int bits, int wake, int delete, gfp_t mask)
470{
471 struct extent_state *state;
472 struct extent_state *prealloc = NULL;
473 struct rb_node *node;
474 unsigned long flags;
475 int err;
476 int set = 0;
477
478again:
479 if (!prealloc && (mask & __GFP_WAIT)) {
480 prealloc = alloc_extent_state(mask);
481 if (!prealloc)
482 return -ENOMEM;
483 }
484
70dec807 485 spin_lock_irqsave(&tree->lock, flags);
d1310b2e
CM
486 /*
487 * this search will find the extents that end after
488 * our range starts
489 */
80ea96b1 490 node = tree_search(tree, start);
d1310b2e
CM
491 if (!node)
492 goto out;
493 state = rb_entry(node, struct extent_state, rb_node);
494 if (state->start > end)
495 goto out;
496 WARN_ON(state->end < start);
497
498 /*
499 * | ---- desired range ---- |
500 * | state | or
501 * | ------------- state -------------- |
502 *
503 * We need to split the extent we found, and may flip
504 * bits on second half.
505 *
506 * If the extent we found extends past our range, we
507 * just split and search again. It'll get split again
508 * the next time though.
509 *
510 * If the extent we found is inside our range, we clear
511 * the desired bit on it.
512 */
513
514 if (state->start < start) {
70dec807
CM
515 if (!prealloc)
516 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
517 err = split_state(tree, state, prealloc, start);
518 BUG_ON(err == -EEXIST);
519 prealloc = NULL;
520 if (err)
521 goto out;
522 if (state->end <= end) {
523 start = state->end + 1;
524 set |= clear_state_bit(tree, state, bits,
525 wake, delete);
526 } else {
527 start = state->start;
528 }
529 goto search_again;
530 }
531 /*
532 * | ---- desired range ---- |
533 * | state |
534 * We need to split the extent, and clear the bit
535 * on the first half
536 */
537 if (state->start <= end && state->end > end) {
70dec807
CM
538 if (!prealloc)
539 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
540 err = split_state(tree, state, prealloc, end + 1);
541 BUG_ON(err == -EEXIST);
542
543 if (wake)
544 wake_up(&state->wq);
545 set |= clear_state_bit(tree, prealloc, bits,
546 wake, delete);
547 prealloc = NULL;
548 goto out;
549 }
550
551 start = state->end + 1;
552 set |= clear_state_bit(tree, state, bits, wake, delete);
553 goto search_again;
554
555out:
70dec807 556 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
557 if (prealloc)
558 free_extent_state(prealloc);
559
560 return set;
561
562search_again:
563 if (start > end)
564 goto out;
70dec807 565 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
566 if (mask & __GFP_WAIT)
567 cond_resched();
568 goto again;
569}
570EXPORT_SYMBOL(clear_extent_bit);
571
572static int wait_on_state(struct extent_io_tree *tree,
573 struct extent_state *state)
574{
575 DEFINE_WAIT(wait);
576 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
70dec807 577 spin_unlock_irq(&tree->lock);
d1310b2e 578 schedule();
70dec807 579 spin_lock_irq(&tree->lock);
d1310b2e
CM
580 finish_wait(&state->wq, &wait);
581 return 0;
582}
583
584/*
585 * waits for one or more bits to clear on a range in the state tree.
586 * The range [start, end] is inclusive.
587 * The tree lock is taken by this function
588 */
589int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
590{
591 struct extent_state *state;
592 struct rb_node *node;
593
70dec807 594 spin_lock_irq(&tree->lock);
d1310b2e
CM
595again:
596 while (1) {
597 /*
598 * this search will find all the extents that end after
599 * our range starts
600 */
80ea96b1 601 node = tree_search(tree, start);
d1310b2e
CM
602 if (!node)
603 break;
604
605 state = rb_entry(node, struct extent_state, rb_node);
606
607 if (state->start > end)
608 goto out;
609
610 if (state->state & bits) {
611 start = state->start;
612 atomic_inc(&state->refs);
613 wait_on_state(tree, state);
614 free_extent_state(state);
615 goto again;
616 }
617 start = state->end + 1;
618
619 if (start > end)
620 break;
621
622 if (need_resched()) {
70dec807 623 spin_unlock_irq(&tree->lock);
d1310b2e 624 cond_resched();
70dec807 625 spin_lock_irq(&tree->lock);
d1310b2e
CM
626 }
627 }
628out:
70dec807 629 spin_unlock_irq(&tree->lock);
d1310b2e
CM
630 return 0;
631}
632EXPORT_SYMBOL(wait_extent_bit);
633
634static void set_state_bits(struct extent_io_tree *tree,
635 struct extent_state *state,
636 int bits)
637{
638 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
639 u64 range = state->end - state->start + 1;
640 tree->dirty_bytes += range;
641 }
291d673e 642 set_state_cb(tree, state, bits);
b0c68f8b 643 state->state |= bits;
d1310b2e
CM
644}
645
646/*
647 * set some bits on a range in the tree. This may require allocations
648 * or sleeping, so the gfp mask is used to indicate what is allowed.
649 *
650 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
651 * range already has the desired bits set. The start of the existing
652 * range is returned in failed_start in this case.
653 *
654 * [start, end] is inclusive
655 * This takes the tree lock.
656 */
657int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
658 int exclusive, u64 *failed_start, gfp_t mask)
659{
660 struct extent_state *state;
661 struct extent_state *prealloc = NULL;
662 struct rb_node *node;
663 unsigned long flags;
664 int err = 0;
665 int set;
666 u64 last_start;
667 u64 last_end;
668again:
669 if (!prealloc && (mask & __GFP_WAIT)) {
670 prealloc = alloc_extent_state(mask);
671 if (!prealloc)
672 return -ENOMEM;
673 }
674
70dec807 675 spin_lock_irqsave(&tree->lock, flags);
d1310b2e
CM
676 /*
677 * this search will find all the extents that end after
678 * our range starts.
679 */
80ea96b1 680 node = tree_search(tree, start);
d1310b2e
CM
681 if (!node) {
682 err = insert_state(tree, prealloc, start, end, bits);
683 prealloc = NULL;
684 BUG_ON(err == -EEXIST);
685 goto out;
686 }
687
688 state = rb_entry(node, struct extent_state, rb_node);
689 last_start = state->start;
690 last_end = state->end;
691
692 /*
693 * | ---- desired range ---- |
694 * | state |
695 *
696 * Just lock what we found and keep going
697 */
698 if (state->start == start && state->end <= end) {
699 set = state->state & bits;
700 if (set && exclusive) {
701 *failed_start = state->start;
702 err = -EEXIST;
703 goto out;
704 }
705 set_state_bits(tree, state, bits);
706 start = state->end + 1;
707 merge_state(tree, state);
708 goto search_again;
709 }
710
711 /*
712 * | ---- desired range ---- |
713 * | state |
714 * or
715 * | ------------- state -------------- |
716 *
717 * We need to split the extent we found, and may flip bits on
718 * second half.
719 *
720 * If the extent we found extends past our
721 * range, we just split and search again. It'll get split
722 * again the next time though.
723 *
724 * If the extent we found is inside our range, we set the
725 * desired bit on it.
726 */
727 if (state->start < start) {
728 set = state->state & bits;
729 if (exclusive && set) {
730 *failed_start = start;
731 err = -EEXIST;
732 goto out;
733 }
734 err = split_state(tree, state, prealloc, start);
735 BUG_ON(err == -EEXIST);
736 prealloc = NULL;
737 if (err)
738 goto out;
739 if (state->end <= end) {
740 set_state_bits(tree, state, bits);
741 start = state->end + 1;
742 merge_state(tree, state);
743 } else {
744 start = state->start;
745 }
746 goto search_again;
747 }
748 /*
749 * | ---- desired range ---- |
750 * | state | or | state |
751 *
752 * There's a hole, we need to insert something in it and
753 * ignore the extent we found.
754 */
755 if (state->start > start) {
756 u64 this_end;
757 if (end < last_start)
758 this_end = end;
759 else
760 this_end = last_start -1;
761 err = insert_state(tree, prealloc, start, this_end,
762 bits);
763 prealloc = NULL;
764 BUG_ON(err == -EEXIST);
765 if (err)
766 goto out;
767 start = this_end + 1;
768 goto search_again;
769 }
770 /*
771 * | ---- desired range ---- |
772 * | state |
773 * We need to split the extent, and set the bit
774 * on the first half
775 */
776 if (state->start <= end && state->end > end) {
777 set = state->state & bits;
778 if (exclusive && set) {
779 *failed_start = start;
780 err = -EEXIST;
781 goto out;
782 }
783 err = split_state(tree, state, prealloc, end + 1);
784 BUG_ON(err == -EEXIST);
785
786 set_state_bits(tree, prealloc, bits);
787 merge_state(tree, prealloc);
788 prealloc = NULL;
789 goto out;
790 }
791
792 goto search_again;
793
794out:
70dec807 795 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
796 if (prealloc)
797 free_extent_state(prealloc);
798
799 return err;
800
801search_again:
802 if (start > end)
803 goto out;
70dec807 804 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
805 if (mask & __GFP_WAIT)
806 cond_resched();
807 goto again;
808}
809EXPORT_SYMBOL(set_extent_bit);
810
811/* wrappers around set/clear extent bit */
812int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
813 gfp_t mask)
814{
815 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
816 mask);
817}
818EXPORT_SYMBOL(set_extent_dirty);
819
e6dcd2dc
CM
820int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
821 gfp_t mask)
822{
823 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
824}
825EXPORT_SYMBOL(set_extent_ordered);
826
d1310b2e
CM
827int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
828 int bits, gfp_t mask)
829{
830 return set_extent_bit(tree, start, end, bits, 0, NULL,
831 mask);
832}
833EXPORT_SYMBOL(set_extent_bits);
834
835int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
836 int bits, gfp_t mask)
837{
838 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
839}
840EXPORT_SYMBOL(clear_extent_bits);
841
842int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
843 gfp_t mask)
844{
845 return set_extent_bit(tree, start, end,
e6dcd2dc
CM
846 EXTENT_DELALLOC | EXTENT_DIRTY,
847 0, NULL, mask);
d1310b2e
CM
848}
849EXPORT_SYMBOL(set_extent_delalloc);
850
851int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
852 gfp_t mask)
853{
854 return clear_extent_bit(tree, start, end,
855 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
856}
857EXPORT_SYMBOL(clear_extent_dirty);
858
e6dcd2dc
CM
859int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
860 gfp_t mask)
861{
862 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
863}
864EXPORT_SYMBOL(clear_extent_ordered);
865
d1310b2e
CM
866int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
867 gfp_t mask)
868{
869 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
870 mask);
871}
872EXPORT_SYMBOL(set_extent_new);
873
874int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
875 gfp_t mask)
876{
877 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
878}
879EXPORT_SYMBOL(clear_extent_new);
880
881int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
882 gfp_t mask)
883{
884 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
885 mask);
886}
887EXPORT_SYMBOL(set_extent_uptodate);
888
889int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
890 gfp_t mask)
891{
892 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
893}
894EXPORT_SYMBOL(clear_extent_uptodate);
895
896int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
897 gfp_t mask)
898{
899 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
900 0, NULL, mask);
901}
902EXPORT_SYMBOL(set_extent_writeback);
903
904int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
905 gfp_t mask)
906{
907 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
908}
909EXPORT_SYMBOL(clear_extent_writeback);
910
911int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
912{
913 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
914}
915EXPORT_SYMBOL(wait_on_extent_writeback);
916
d1310b2e
CM
917int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
918{
919 int err;
920 u64 failed_start;
921 while (1) {
922 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
923 &failed_start, mask);
924 if (err == -EEXIST && (mask & __GFP_WAIT)) {
925 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
926 start = failed_start;
927 } else {
928 break;
929 }
930 WARN_ON(start > end);
931 }
932 return err;
933}
934EXPORT_SYMBOL(lock_extent);
935
936int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
937 gfp_t mask)
938{
939 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
940}
941EXPORT_SYMBOL(unlock_extent);
942
943/*
944 * helper function to set pages and extents in the tree dirty
945 */
946int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
947{
948 unsigned long index = start >> PAGE_CACHE_SHIFT;
949 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
950 struct page *page;
951
952 while (index <= end_index) {
953 page = find_get_page(tree->mapping, index);
954 BUG_ON(!page);
955 __set_page_dirty_nobuffers(page);
956 page_cache_release(page);
957 index++;
958 }
959 set_extent_dirty(tree, start, end, GFP_NOFS);
960 return 0;
961}
962EXPORT_SYMBOL(set_range_dirty);
963
964/*
965 * helper function to set both pages and extents in the tree writeback
966 */
967int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
968{
969 unsigned long index = start >> PAGE_CACHE_SHIFT;
970 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
971 struct page *page;
972
973 while (index <= end_index) {
974 page = find_get_page(tree->mapping, index);
975 BUG_ON(!page);
976 set_page_writeback(page);
977 page_cache_release(page);
978 index++;
979 }
980 set_extent_writeback(tree, start, end, GFP_NOFS);
981 return 0;
982}
983EXPORT_SYMBOL(set_range_writeback);
984
985int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
986 u64 *start_ret, u64 *end_ret, int bits)
987{
988 struct rb_node *node;
989 struct extent_state *state;
990 int ret = 1;
991
70dec807 992 spin_lock_irq(&tree->lock);
d1310b2e
CM
993 /*
994 * this search will find all the extents that end after
995 * our range starts.
996 */
80ea96b1 997 node = tree_search(tree, start);
2b114d1d 998 if (!node) {
d1310b2e
CM
999 goto out;
1000 }
1001
1002 while(1) {
1003 state = rb_entry(node, struct extent_state, rb_node);
1004 if (state->end >= start && (state->state & bits)) {
1005 *start_ret = state->start;
1006 *end_ret = state->end;
1007 ret = 0;
1008 break;
1009 }
1010 node = rb_next(node);
1011 if (!node)
1012 break;
1013 }
1014out:
70dec807 1015 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1016 return ret;
1017}
1018EXPORT_SYMBOL(find_first_extent_bit);
1019
d7fc640e
CM
1020struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1021 u64 start, int bits)
1022{
1023 struct rb_node *node;
1024 struct extent_state *state;
1025
1026 /*
1027 * this search will find all the extents that end after
1028 * our range starts.
1029 */
1030 node = tree_search(tree, start);
2b114d1d 1031 if (!node) {
d7fc640e
CM
1032 goto out;
1033 }
1034
1035 while(1) {
1036 state = rb_entry(node, struct extent_state, rb_node);
1037 if (state->end >= start && (state->state & bits)) {
1038 return state;
1039 }
1040 node = rb_next(node);
1041 if (!node)
1042 break;
1043 }
1044out:
1045 return NULL;
1046}
1047EXPORT_SYMBOL(find_first_extent_bit_state);
1048
d1310b2e
CM
1049u64 find_lock_delalloc_range(struct extent_io_tree *tree,
1050 u64 *start, u64 *end, u64 max_bytes)
1051{
1052 struct rb_node *node;
1053 struct extent_state *state;
1054 u64 cur_start = *start;
1055 u64 found = 0;
1056 u64 total_bytes = 0;
1057
70dec807 1058 spin_lock_irq(&tree->lock);
d1310b2e
CM
1059 /*
1060 * this search will find all the extents that end after
1061 * our range starts.
1062 */
1063search_again:
80ea96b1 1064 node = tree_search(tree, cur_start);
2b114d1d 1065 if (!node) {
3b951516
CM
1066 if (!found)
1067 *end = (u64)-1;
d1310b2e
CM
1068 goto out;
1069 }
1070
1071 while(1) {
1072 state = rb_entry(node, struct extent_state, rb_node);
1073 if (found && state->start != cur_start) {
1074 goto out;
1075 }
1076 if (!(state->state & EXTENT_DELALLOC)) {
1077 if (!found)
1078 *end = state->end;
1079 goto out;
1080 }
1081 if (!found) {
1082 struct extent_state *prev_state;
1083 struct rb_node *prev_node = node;
1084 while(1) {
1085 prev_node = rb_prev(prev_node);
1086 if (!prev_node)
1087 break;
1088 prev_state = rb_entry(prev_node,
1089 struct extent_state,
1090 rb_node);
1091 if (!(prev_state->state & EXTENT_DELALLOC))
1092 break;
1093 state = prev_state;
1094 node = prev_node;
1095 }
1096 }
1097 if (state->state & EXTENT_LOCKED) {
1098 DEFINE_WAIT(wait);
1099 atomic_inc(&state->refs);
1100 prepare_to_wait(&state->wq, &wait,
1101 TASK_UNINTERRUPTIBLE);
70dec807 1102 spin_unlock_irq(&tree->lock);
d1310b2e 1103 schedule();
70dec807 1104 spin_lock_irq(&tree->lock);
d1310b2e
CM
1105 finish_wait(&state->wq, &wait);
1106 free_extent_state(state);
1107 goto search_again;
1108 }
291d673e 1109 set_state_cb(tree, state, EXTENT_LOCKED);
b0c68f8b 1110 state->state |= EXTENT_LOCKED;
d1310b2e
CM
1111 if (!found)
1112 *start = state->start;
1113 found++;
1114 *end = state->end;
1115 cur_start = state->end + 1;
1116 node = rb_next(node);
1117 if (!node)
1118 break;
1119 total_bytes += state->end - state->start + 1;
1120 if (total_bytes >= max_bytes)
1121 break;
1122 }
1123out:
70dec807 1124 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1125 return found;
1126}
1127
1128u64 count_range_bits(struct extent_io_tree *tree,
1129 u64 *start, u64 search_end, u64 max_bytes,
1130 unsigned long bits)
1131{
1132 struct rb_node *node;
1133 struct extent_state *state;
1134 u64 cur_start = *start;
1135 u64 total_bytes = 0;
1136 int found = 0;
1137
1138 if (search_end <= cur_start) {
1139 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1140 WARN_ON(1);
1141 return 0;
1142 }
1143
70dec807 1144 spin_lock_irq(&tree->lock);
d1310b2e
CM
1145 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1146 total_bytes = tree->dirty_bytes;
1147 goto out;
1148 }
1149 /*
1150 * this search will find all the extents that end after
1151 * our range starts.
1152 */
80ea96b1 1153 node = tree_search(tree, cur_start);
2b114d1d 1154 if (!node) {
d1310b2e
CM
1155 goto out;
1156 }
1157
1158 while(1) {
1159 state = rb_entry(node, struct extent_state, rb_node);
1160 if (state->start > search_end)
1161 break;
1162 if (state->end >= cur_start && (state->state & bits)) {
1163 total_bytes += min(search_end, state->end) + 1 -
1164 max(cur_start, state->start);
1165 if (total_bytes >= max_bytes)
1166 break;
1167 if (!found) {
1168 *start = state->start;
1169 found = 1;
1170 }
1171 }
1172 node = rb_next(node);
1173 if (!node)
1174 break;
1175 }
1176out:
70dec807 1177 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1178 return total_bytes;
1179}
1180/*
1181 * helper function to lock both pages and extents in the tree.
1182 * pages must be locked first.
1183 */
1184int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1185{
1186 unsigned long index = start >> PAGE_CACHE_SHIFT;
1187 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1188 struct page *page;
1189 int err;
1190
1191 while (index <= end_index) {
1192 page = grab_cache_page(tree->mapping, index);
1193 if (!page) {
1194 err = -ENOMEM;
1195 goto failed;
1196 }
1197 if (IS_ERR(page)) {
1198 err = PTR_ERR(page);
1199 goto failed;
1200 }
1201 index++;
1202 }
1203 lock_extent(tree, start, end, GFP_NOFS);
1204 return 0;
1205
1206failed:
1207 /*
1208 * we failed above in getting the page at 'index', so we undo here
1209 * up to but not including the page at 'index'
1210 */
1211 end_index = index;
1212 index = start >> PAGE_CACHE_SHIFT;
1213 while (index < end_index) {
1214 page = find_get_page(tree->mapping, index);
1215 unlock_page(page);
1216 page_cache_release(page);
1217 index++;
1218 }
1219 return err;
1220}
1221EXPORT_SYMBOL(lock_range);
1222
1223/*
1224 * helper function to unlock both pages and extents in the tree.
1225 */
1226int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1227{
1228 unsigned long index = start >> PAGE_CACHE_SHIFT;
1229 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1230 struct page *page;
1231
1232 while (index <= end_index) {
1233 page = find_get_page(tree->mapping, index);
1234 unlock_page(page);
1235 page_cache_release(page);
1236 index++;
1237 }
1238 unlock_extent(tree, start, end, GFP_NOFS);
1239 return 0;
1240}
1241EXPORT_SYMBOL(unlock_range);
1242
1243int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1244{
1245 struct rb_node *node;
1246 struct extent_state *state;
1247 int ret = 0;
1248
70dec807 1249 spin_lock_irq(&tree->lock);
d1310b2e
CM
1250 /*
1251 * this search will find all the extents that end after
1252 * our range starts.
1253 */
80ea96b1 1254 node = tree_search(tree, start);
2b114d1d 1255 if (!node) {
d1310b2e
CM
1256 ret = -ENOENT;
1257 goto out;
1258 }
1259 state = rb_entry(node, struct extent_state, rb_node);
1260 if (state->start != start) {
1261 ret = -ENOENT;
1262 goto out;
1263 }
1264 state->private = private;
1265out:
70dec807 1266 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1267 return ret;
1268}
1269
1270int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1271{
1272 struct rb_node *node;
1273 struct extent_state *state;
1274 int ret = 0;
1275
70dec807 1276 spin_lock_irq(&tree->lock);
d1310b2e
CM
1277 /*
1278 * this search will find all the extents that end after
1279 * our range starts.
1280 */
80ea96b1 1281 node = tree_search(tree, start);
2b114d1d 1282 if (!node) {
d1310b2e
CM
1283 ret = -ENOENT;
1284 goto out;
1285 }
1286 state = rb_entry(node, struct extent_state, rb_node);
1287 if (state->start != start) {
1288 ret = -ENOENT;
1289 goto out;
1290 }
1291 *private = state->private;
1292out:
70dec807 1293 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1294 return ret;
1295}
1296
1297/*
1298 * searches a range in the state tree for a given mask.
70dec807 1299 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1300 * has the bits set. Otherwise, 1 is returned if any bit in the
1301 * range is found set.
1302 */
1303int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1304 int bits, int filled)
1305{
1306 struct extent_state *state = NULL;
1307 struct rb_node *node;
1308 int bitset = 0;
1309 unsigned long flags;
1310
70dec807 1311 spin_lock_irqsave(&tree->lock, flags);
80ea96b1 1312 node = tree_search(tree, start);
d1310b2e
CM
1313 while (node && start <= end) {
1314 state = rb_entry(node, struct extent_state, rb_node);
1315
1316 if (filled && state->start > start) {
1317 bitset = 0;
1318 break;
1319 }
1320
1321 if (state->start > end)
1322 break;
1323
1324 if (state->state & bits) {
1325 bitset = 1;
1326 if (!filled)
1327 break;
1328 } else if (filled) {
1329 bitset = 0;
1330 break;
1331 }
1332 start = state->end + 1;
1333 if (start > end)
1334 break;
1335 node = rb_next(node);
1336 if (!node) {
1337 if (filled)
1338 bitset = 0;
1339 break;
1340 }
1341 }
70dec807 1342 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
1343 return bitset;
1344}
1345EXPORT_SYMBOL(test_range_bit);
1346
1347/*
1348 * helper function to set a given page up to date if all the
1349 * extents in the tree for that page are up to date
1350 */
1351static int check_page_uptodate(struct extent_io_tree *tree,
1352 struct page *page)
1353{
1354 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1355 u64 end = start + PAGE_CACHE_SIZE - 1;
1356 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1357 SetPageUptodate(page);
1358 return 0;
1359}
1360
1361/*
1362 * helper function to unlock a page if all the extents in the tree
1363 * for that page are unlocked
1364 */
1365static int check_page_locked(struct extent_io_tree *tree,
1366 struct page *page)
1367{
1368 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1369 u64 end = start + PAGE_CACHE_SIZE - 1;
1370 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1371 unlock_page(page);
1372 return 0;
1373}
1374
1375/*
1376 * helper function to end page writeback if all the extents
1377 * in the tree for that page are done with writeback
1378 */
1379static int check_page_writeback(struct extent_io_tree *tree,
1380 struct page *page)
1381{
1382 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1383 u64 end = start + PAGE_CACHE_SIZE - 1;
1384 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1385 end_page_writeback(page);
1386 return 0;
1387}
1388
1389/* lots and lots of room for performance fixes in the end_bio funcs */
1390
1391/*
1392 * after a writepage IO is done, we need to:
1393 * clear the uptodate bits on error
1394 * clear the writeback bits in the extent tree for this IO
1395 * end_page_writeback if the page has no more pending IO
1396 *
1397 * Scheduling is not allowed, so the extent state tree is expected
1398 * to have one and only one object corresponding to this IO.
1399 */
d1310b2e 1400static void end_bio_extent_writepage(struct bio *bio, int err)
d1310b2e 1401{
1259ab75 1402 int uptodate = err == 0;
d1310b2e 1403 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1404 struct extent_io_tree *tree;
d1310b2e
CM
1405 u64 start;
1406 u64 end;
1407 int whole_page;
1259ab75 1408 int ret;
d1310b2e 1409
d1310b2e
CM
1410 do {
1411 struct page *page = bvec->bv_page;
902b22f3
DW
1412 tree = &BTRFS_I(page->mapping->host)->io_tree;
1413
d1310b2e
CM
1414 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1415 bvec->bv_offset;
1416 end = start + bvec->bv_len - 1;
1417
1418 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1419 whole_page = 1;
1420 else
1421 whole_page = 0;
1422
1423 if (--bvec >= bio->bi_io_vec)
1424 prefetchw(&bvec->bv_page->flags);
1259ab75
CM
1425 if (tree->ops && tree->ops->writepage_end_io_hook) {
1426 ret = tree->ops->writepage_end_io_hook(page, start,
902b22f3 1427 end, NULL, uptodate);
1259ab75
CM
1428 if (ret)
1429 uptodate = 0;
1430 }
1431
1432 if (!uptodate && tree->ops &&
1433 tree->ops->writepage_io_failed_hook) {
1434 ret = tree->ops->writepage_io_failed_hook(bio, page,
902b22f3 1435 start, end, NULL);
1259ab75 1436 if (ret == 0) {
1259ab75
CM
1437 uptodate = (err == 0);
1438 continue;
1439 }
1440 }
1441
d1310b2e
CM
1442 if (!uptodate) {
1443 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1444 ClearPageUptodate(page);
1445 SetPageError(page);
1446 }
70dec807 1447
902b22f3 1448 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
d1310b2e
CM
1449
1450 if (whole_page)
1451 end_page_writeback(page);
1452 else
1453 check_page_writeback(tree, page);
d1310b2e 1454 } while (bvec >= bio->bi_io_vec);
2b1f55b0 1455
d1310b2e 1456 bio_put(bio);
d1310b2e
CM
1457}
1458
1459/*
1460 * after a readpage IO is done, we need to:
1461 * clear the uptodate bits on error
1462 * set the uptodate bits if things worked
1463 * set the page up to date if all extents in the tree are uptodate
1464 * clear the lock bit in the extent tree
1465 * unlock the page if there are no other extents locked for it
1466 *
1467 * Scheduling is not allowed, so the extent state tree is expected
1468 * to have one and only one object corresponding to this IO.
1469 */
d1310b2e 1470static void end_bio_extent_readpage(struct bio *bio, int err)
d1310b2e
CM
1471{
1472 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1473 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1474 struct extent_io_tree *tree;
d1310b2e
CM
1475 u64 start;
1476 u64 end;
1477 int whole_page;
1478 int ret;
1479
d1310b2e
CM
1480 do {
1481 struct page *page = bvec->bv_page;
902b22f3
DW
1482 tree = &BTRFS_I(page->mapping->host)->io_tree;
1483
d1310b2e
CM
1484 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1485 bvec->bv_offset;
1486 end = start + bvec->bv_len - 1;
1487
1488 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1489 whole_page = 1;
1490 else
1491 whole_page = 0;
1492
1493 if (--bvec >= bio->bi_io_vec)
1494 prefetchw(&bvec->bv_page->flags);
1495
1496 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
70dec807 1497 ret = tree->ops->readpage_end_io_hook(page, start, end,
902b22f3 1498 NULL);
d1310b2e
CM
1499 if (ret)
1500 uptodate = 0;
1501 }
7e38326f
CM
1502 if (!uptodate && tree->ops &&
1503 tree->ops->readpage_io_failed_hook) {
1504 ret = tree->ops->readpage_io_failed_hook(bio, page,
902b22f3 1505 start, end, NULL);
7e38326f 1506 if (ret == 0) {
3b951516
CM
1507 uptodate =
1508 test_bit(BIO_UPTODATE, &bio->bi_flags);
7e38326f
CM
1509 continue;
1510 }
1511 }
d1310b2e 1512
902b22f3
DW
1513 if (uptodate)
1514 set_extent_uptodate(tree, start, end,
1515 GFP_ATOMIC);
1516 unlock_extent(tree, start, end, GFP_ATOMIC);
d1310b2e 1517
70dec807
CM
1518 if (whole_page) {
1519 if (uptodate) {
1520 SetPageUptodate(page);
1521 } else {
1522 ClearPageUptodate(page);
1523 SetPageError(page);
1524 }
d1310b2e 1525 unlock_page(page);
70dec807
CM
1526 } else {
1527 if (uptodate) {
1528 check_page_uptodate(tree, page);
1529 } else {
1530 ClearPageUptodate(page);
1531 SetPageError(page);
1532 }
d1310b2e 1533 check_page_locked(tree, page);
70dec807 1534 }
d1310b2e
CM
1535 } while (bvec >= bio->bi_io_vec);
1536
1537 bio_put(bio);
d1310b2e
CM
1538}
1539
1540/*
1541 * IO done from prepare_write is pretty simple, we just unlock
1542 * the structs in the extent tree when done, and set the uptodate bits
1543 * as appropriate.
1544 */
d1310b2e 1545static void end_bio_extent_preparewrite(struct bio *bio, int err)
d1310b2e
CM
1546{
1547 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1548 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1549 struct extent_io_tree *tree;
d1310b2e
CM
1550 u64 start;
1551 u64 end;
1552
d1310b2e
CM
1553 do {
1554 struct page *page = bvec->bv_page;
902b22f3
DW
1555 tree = &BTRFS_I(page->mapping->host)->io_tree;
1556
d1310b2e
CM
1557 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1558 bvec->bv_offset;
1559 end = start + bvec->bv_len - 1;
1560
1561 if (--bvec >= bio->bi_io_vec)
1562 prefetchw(&bvec->bv_page->flags);
1563
1564 if (uptodate) {
1565 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1566 } else {
1567 ClearPageUptodate(page);
1568 SetPageError(page);
1569 }
1570
1571 unlock_extent(tree, start, end, GFP_ATOMIC);
1572
1573 } while (bvec >= bio->bi_io_vec);
1574
1575 bio_put(bio);
d1310b2e
CM
1576}
1577
1578static struct bio *
1579extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1580 gfp_t gfp_flags)
1581{
1582 struct bio *bio;
1583
1584 bio = bio_alloc(gfp_flags, nr_vecs);
1585
1586 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1587 while (!bio && (nr_vecs /= 2))
1588 bio = bio_alloc(gfp_flags, nr_vecs);
1589 }
1590
1591 if (bio) {
e1c4b745 1592 bio->bi_size = 0;
d1310b2e
CM
1593 bio->bi_bdev = bdev;
1594 bio->bi_sector = first_sector;
1595 }
1596 return bio;
1597}
1598
f188591e 1599static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
d1310b2e 1600{
d1310b2e 1601 int ret = 0;
70dec807
CM
1602 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1603 struct page *page = bvec->bv_page;
1604 struct extent_io_tree *tree = bio->bi_private;
1605 struct rb_node *node;
1606 struct extent_state *state;
1607 u64 start;
1608 u64 end;
1609
1610 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1611 end = start + bvec->bv_len - 1;
1612
1613 spin_lock_irq(&tree->lock);
80ea96b1 1614 node = __etree_search(tree, start, NULL, NULL);
70dec807
CM
1615 BUG_ON(!node);
1616 state = rb_entry(node, struct extent_state, rb_node);
1617 while(state->end < end) {
1618 node = rb_next(node);
1619 state = rb_entry(node, struct extent_state, rb_node);
1620 }
1621 BUG_ON(state->end != end);
1622 spin_unlock_irq(&tree->lock);
1623
902b22f3 1624 bio->bi_private = NULL;
d1310b2e
CM
1625
1626 bio_get(bio);
1627
065631f6 1628 if (tree->ops && tree->ops->submit_bio_hook)
f188591e
CM
1629 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1630 mirror_num);
0b86a832
CM
1631 else
1632 submit_bio(rw, bio);
d1310b2e
CM
1633 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1634 ret = -EOPNOTSUPP;
1635 bio_put(bio);
1636 return ret;
1637}
1638
1639static int submit_extent_page(int rw, struct extent_io_tree *tree,
1640 struct page *page, sector_t sector,
1641 size_t size, unsigned long offset,
1642 struct block_device *bdev,
1643 struct bio **bio_ret,
1644 unsigned long max_pages,
f188591e
CM
1645 bio_end_io_t end_io_func,
1646 int mirror_num)
d1310b2e
CM
1647{
1648 int ret = 0;
1649 struct bio *bio;
1650 int nr;
1651
1652 if (bio_ret && *bio_ret) {
1653 bio = *bio_ret;
1654 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
239b14b3
CM
1655 (tree->ops && tree->ops->merge_bio_hook &&
1656 tree->ops->merge_bio_hook(page, offset, size, bio)) ||
d1310b2e 1657 bio_add_page(bio, page, size, offset) < size) {
f188591e 1658 ret = submit_one_bio(rw, bio, mirror_num);
d1310b2e
CM
1659 bio = NULL;
1660 } else {
1661 return 0;
1662 }
1663 }
961d0232 1664 nr = bio_get_nr_vecs(bdev);
d1310b2e
CM
1665 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1666 if (!bio) {
1667 printk("failed to allocate bio nr %d\n", nr);
1668 }
70dec807
CM
1669
1670
d1310b2e
CM
1671 bio_add_page(bio, page, size, offset);
1672 bio->bi_end_io = end_io_func;
1673 bio->bi_private = tree;
70dec807 1674
d1310b2e
CM
1675 if (bio_ret) {
1676 *bio_ret = bio;
1677 } else {
f188591e 1678 ret = submit_one_bio(rw, bio, mirror_num);
d1310b2e
CM
1679 }
1680
1681 return ret;
1682}
1683
1684void set_page_extent_mapped(struct page *page)
1685{
1686 if (!PagePrivate(page)) {
1687 SetPagePrivate(page);
d1310b2e 1688 page_cache_get(page);
6af118ce 1689 set_page_private(page, EXTENT_PAGE_PRIVATE);
d1310b2e
CM
1690 }
1691}
1692
1693void set_page_extent_head(struct page *page, unsigned long len)
1694{
1695 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1696}
1697
1698/*
1699 * basic readpage implementation. Locked extent state structs are inserted
1700 * into the tree that are removed when the IO is done (by the end_io
1701 * handlers)
1702 */
1703static int __extent_read_full_page(struct extent_io_tree *tree,
1704 struct page *page,
1705 get_extent_t *get_extent,
f188591e 1706 struct bio **bio, int mirror_num)
d1310b2e
CM
1707{
1708 struct inode *inode = page->mapping->host;
1709 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1710 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1711 u64 end;
1712 u64 cur = start;
1713 u64 extent_offset;
1714 u64 last_byte = i_size_read(inode);
1715 u64 block_start;
1716 u64 cur_end;
1717 sector_t sector;
1718 struct extent_map *em;
1719 struct block_device *bdev;
1720 int ret;
1721 int nr = 0;
1722 size_t page_offset = 0;
1723 size_t iosize;
1724 size_t blocksize = inode->i_sb->s_blocksize;
1725
1726 set_page_extent_mapped(page);
1727
1728 end = page_end;
1729 lock_extent(tree, start, end, GFP_NOFS);
1730
1731 while (cur <= end) {
1732 if (cur >= last_byte) {
1733 char *userpage;
1734 iosize = PAGE_CACHE_SIZE - page_offset;
1735 userpage = kmap_atomic(page, KM_USER0);
1736 memset(userpage + page_offset, 0, iosize);
1737 flush_dcache_page(page);
1738 kunmap_atomic(userpage, KM_USER0);
1739 set_extent_uptodate(tree, cur, cur + iosize - 1,
1740 GFP_NOFS);
1741 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1742 break;
1743 }
1744 em = get_extent(inode, page, page_offset, cur,
1745 end - cur + 1, 0);
1746 if (IS_ERR(em) || !em) {
1747 SetPageError(page);
1748 unlock_extent(tree, cur, end, GFP_NOFS);
1749 break;
1750 }
d1310b2e 1751 extent_offset = cur - em->start;
e6dcd2dc
CM
1752 if (extent_map_end(em) <= cur) {
1753printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1754 }
d1310b2e 1755 BUG_ON(extent_map_end(em) <= cur);
e6dcd2dc
CM
1756 if (end < cur) {
1757printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1758 }
d1310b2e
CM
1759 BUG_ON(end < cur);
1760
1761 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1762 cur_end = min(extent_map_end(em) - 1, end);
1763 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1764 sector = (em->block_start + extent_offset) >> 9;
1765 bdev = em->bdev;
1766 block_start = em->block_start;
1767 free_extent_map(em);
1768 em = NULL;
1769
1770 /* we've found a hole, just zero and go on */
1771 if (block_start == EXTENT_MAP_HOLE) {
1772 char *userpage;
1773 userpage = kmap_atomic(page, KM_USER0);
1774 memset(userpage + page_offset, 0, iosize);
1775 flush_dcache_page(page);
1776 kunmap_atomic(userpage, KM_USER0);
1777
1778 set_extent_uptodate(tree, cur, cur + iosize - 1,
1779 GFP_NOFS);
1780 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1781 cur = cur + iosize;
1782 page_offset += iosize;
1783 continue;
1784 }
1785 /* the get_extent function already copied into the page */
1786 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
a1b32a59 1787 check_page_uptodate(tree, page);
d1310b2e
CM
1788 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1789 cur = cur + iosize;
1790 page_offset += iosize;
1791 continue;
1792 }
70dec807
CM
1793 /* we have an inline extent but it didn't get marked up
1794 * to date. Error out
1795 */
1796 if (block_start == EXTENT_MAP_INLINE) {
1797 SetPageError(page);
1798 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1799 cur = cur + iosize;
1800 page_offset += iosize;
1801 continue;
1802 }
d1310b2e
CM
1803
1804 ret = 0;
1805 if (tree->ops && tree->ops->readpage_io_hook) {
1806 ret = tree->ops->readpage_io_hook(page, cur,
1807 cur + iosize - 1);
1808 }
1809 if (!ret) {
89642229
CM
1810 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1811 pnr -= page->index;
d1310b2e
CM
1812 ret = submit_extent_page(READ, tree, page,
1813 sector, iosize, page_offset,
89642229 1814 bdev, bio, pnr,
f188591e 1815 end_bio_extent_readpage, mirror_num);
89642229 1816 nr++;
d1310b2e
CM
1817 }
1818 if (ret)
1819 SetPageError(page);
1820 cur = cur + iosize;
1821 page_offset += iosize;
d1310b2e
CM
1822 }
1823 if (!nr) {
1824 if (!PageError(page))
1825 SetPageUptodate(page);
1826 unlock_page(page);
1827 }
1828 return 0;
1829}
1830
1831int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1832 get_extent_t *get_extent)
1833{
1834 struct bio *bio = NULL;
1835 int ret;
1836
f188591e 1837 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
d1310b2e 1838 if (bio)
f188591e 1839 submit_one_bio(READ, bio, 0);
d1310b2e
CM
1840 return ret;
1841}
1842EXPORT_SYMBOL(extent_read_full_page);
1843
1844/*
1845 * the writepage semantics are similar to regular writepage. extent
1846 * records are inserted to lock ranges in the tree, and as dirty areas
1847 * are found, they are marked writeback. Then the lock bits are removed
1848 * and the end_io handler clears the writeback ranges
1849 */
1850static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1851 void *data)
1852{
1853 struct inode *inode = page->mapping->host;
1854 struct extent_page_data *epd = data;
1855 struct extent_io_tree *tree = epd->tree;
1856 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1857 u64 delalloc_start;
1858 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1859 u64 end;
1860 u64 cur = start;
1861 u64 extent_offset;
1862 u64 last_byte = i_size_read(inode);
1863 u64 block_start;
1864 u64 iosize;
e6dcd2dc 1865 u64 unlock_start;
d1310b2e
CM
1866 sector_t sector;
1867 struct extent_map *em;
1868 struct block_device *bdev;
1869 int ret;
1870 int nr = 0;
7f3c74fb 1871 size_t pg_offset = 0;
d1310b2e
CM
1872 size_t blocksize;
1873 loff_t i_size = i_size_read(inode);
1874 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1875 u64 nr_delalloc;
1876 u64 delalloc_end;
1877
1878 WARN_ON(!PageLocked(page));
7f3c74fb 1879 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
211c17f5 1880 if (page->index > end_index ||
7f3c74fb 1881 (page->index == end_index && !pg_offset)) {
211c17f5 1882 page->mapping->a_ops->invalidatepage(page, 0);
d1310b2e
CM
1883 unlock_page(page);
1884 return 0;
1885 }
1886
1887 if (page->index == end_index) {
1888 char *userpage;
1889
d1310b2e 1890 userpage = kmap_atomic(page, KM_USER0);
7f3c74fb
CM
1891 memset(userpage + pg_offset, 0,
1892 PAGE_CACHE_SIZE - pg_offset);
d1310b2e 1893 kunmap_atomic(userpage, KM_USER0);
211c17f5 1894 flush_dcache_page(page);
d1310b2e 1895 }
7f3c74fb 1896 pg_offset = 0;
d1310b2e
CM
1897
1898 set_page_extent_mapped(page);
1899
1900 delalloc_start = start;
1901 delalloc_end = 0;
1902 while(delalloc_end < page_end) {
1903 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1904 &delalloc_end,
1905 128 * 1024 * 1024);
1906 if (nr_delalloc == 0) {
1907 delalloc_start = delalloc_end + 1;
1908 continue;
1909 }
1910 tree->ops->fill_delalloc(inode, delalloc_start,
1911 delalloc_end);
1912 clear_extent_bit(tree, delalloc_start,
1913 delalloc_end,
1914 EXTENT_LOCKED | EXTENT_DELALLOC,
1915 1, 0, GFP_NOFS);
1916 delalloc_start = delalloc_end + 1;
1917 }
1918 lock_extent(tree, start, page_end, GFP_NOFS);
e6dcd2dc 1919 unlock_start = start;
d1310b2e 1920
247e743c
CM
1921 if (tree->ops && tree->ops->writepage_start_hook) {
1922 ret = tree->ops->writepage_start_hook(page, start, page_end);
1923 if (ret == -EAGAIN) {
1924 unlock_extent(tree, start, page_end, GFP_NOFS);
1925 redirty_page_for_writepage(wbc, page);
1926 unlock_page(page);
1927 return 0;
1928 }
1929 }
1930
d1310b2e
CM
1931 end = page_end;
1932 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1933 printk("found delalloc bits after lock_extent\n");
1934 }
1935
1936 if (last_byte <= start) {
1937 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
e6dcd2dc
CM
1938 unlock_extent(tree, start, page_end, GFP_NOFS);
1939 if (tree->ops && tree->ops->writepage_end_io_hook)
1940 tree->ops->writepage_end_io_hook(page, start,
1941 page_end, NULL, 1);
1942 unlock_start = page_end + 1;
d1310b2e
CM
1943 goto done;
1944 }
1945
1946 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1947 blocksize = inode->i_sb->s_blocksize;
1948
1949 while (cur <= end) {
1950 if (cur >= last_byte) {
1951 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
e6dcd2dc
CM
1952 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
1953 if (tree->ops && tree->ops->writepage_end_io_hook)
1954 tree->ops->writepage_end_io_hook(page, cur,
1955 page_end, NULL, 1);
1956 unlock_start = page_end + 1;
d1310b2e
CM
1957 break;
1958 }
7f3c74fb 1959 em = epd->get_extent(inode, page, pg_offset, cur,
d1310b2e
CM
1960 end - cur + 1, 1);
1961 if (IS_ERR(em) || !em) {
1962 SetPageError(page);
1963 break;
1964 }
1965
1966 extent_offset = cur - em->start;
1967 BUG_ON(extent_map_end(em) <= cur);
1968 BUG_ON(end < cur);
1969 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1970 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1971 sector = (em->block_start + extent_offset) >> 9;
1972 bdev = em->bdev;
1973 block_start = em->block_start;
1974 free_extent_map(em);
1975 em = NULL;
1976
1977 if (block_start == EXTENT_MAP_HOLE ||
1978 block_start == EXTENT_MAP_INLINE) {
1979 clear_extent_dirty(tree, cur,
1980 cur + iosize - 1, GFP_NOFS);
e6dcd2dc
CM
1981
1982 unlock_extent(tree, unlock_start, cur + iosize -1,
1983 GFP_NOFS);
7f3c74fb 1984
e6dcd2dc
CM
1985 if (tree->ops && tree->ops->writepage_end_io_hook)
1986 tree->ops->writepage_end_io_hook(page, cur,
1987 cur + iosize - 1,
1988 NULL, 1);
d1310b2e 1989 cur = cur + iosize;
7f3c74fb 1990 pg_offset += iosize;
e6dcd2dc 1991 unlock_start = cur;
d1310b2e
CM
1992 continue;
1993 }
1994
1995 /* leave this out until we have a page_mkwrite call */
1996 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1997 EXTENT_DIRTY, 0)) {
1998 cur = cur + iosize;
7f3c74fb 1999 pg_offset += iosize;
d1310b2e
CM
2000 continue;
2001 }
2002 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2003 if (tree->ops && tree->ops->writepage_io_hook) {
2004 ret = tree->ops->writepage_io_hook(page, cur,
2005 cur + iosize - 1);
2006 } else {
2007 ret = 0;
2008 }
1259ab75 2009 if (ret) {
d1310b2e 2010 SetPageError(page);
1259ab75 2011 } else {
d1310b2e 2012 unsigned long max_nr = end_index + 1;
7f3c74fb 2013
d1310b2e
CM
2014 set_range_writeback(tree, cur, cur + iosize - 1);
2015 if (!PageWriteback(page)) {
2016 printk("warning page %lu not writeback, "
2017 "cur %llu end %llu\n", page->index,
2018 (unsigned long long)cur,
2019 (unsigned long long)end);
2020 }
2021
2022 ret = submit_extent_page(WRITE, tree, page, sector,
7f3c74fb 2023 iosize, pg_offset, bdev,
d1310b2e 2024 &epd->bio, max_nr,
f188591e 2025 end_bio_extent_writepage, 0);
d1310b2e
CM
2026 if (ret)
2027 SetPageError(page);
2028 }
2029 cur = cur + iosize;
7f3c74fb 2030 pg_offset += iosize;
d1310b2e
CM
2031 nr++;
2032 }
2033done:
2034 if (nr == 0) {
2035 /* make sure the mapping tag for page dirty gets cleared */
2036 set_page_writeback(page);
2037 end_page_writeback(page);
2038 }
e6dcd2dc
CM
2039 if (unlock_start <= page_end)
2040 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
d1310b2e
CM
2041 unlock_page(page);
2042 return 0;
2043}
2044
d1310b2e 2045/**
4bef0848 2046 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
d1310b2e
CM
2047 * @mapping: address space structure to write
2048 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2049 * @writepage: function called for each page
2050 * @data: data passed to writepage function
2051 *
2052 * If a page is already under I/O, write_cache_pages() skips it, even
2053 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2054 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2055 * and msync() need to guarantee that all the data which was dirty at the time
2056 * the call was made get new I/O started against them. If wbc->sync_mode is
2057 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2058 * existing IO to complete.
2059 */
4bef0848
CM
2060int extent_write_cache_pages(struct extent_io_tree *tree,
2061 struct address_space *mapping,
2062 struct writeback_control *wbc,
2063 writepage_t writepage, void *data)
d1310b2e
CM
2064{
2065 struct backing_dev_info *bdi = mapping->backing_dev_info;
2066 int ret = 0;
2067 int done = 0;
2068 struct pagevec pvec;
2069 int nr_pages;
2070 pgoff_t index;
2071 pgoff_t end; /* Inclusive */
2072 int scanned = 0;
2073 int range_whole = 0;
2074
2075 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2076 wbc->encountered_congestion = 1;
2077 return 0;
2078 }
2079
2080 pagevec_init(&pvec, 0);
2081 if (wbc->range_cyclic) {
2082 index = mapping->writeback_index; /* Start from prev offset */
2083 end = -1;
2084 } else {
2085 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2086 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2087 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2088 range_whole = 1;
2089 scanned = 1;
2090 }
2091retry:
2092 while (!done && (index <= end) &&
2093 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2094 PAGECACHE_TAG_DIRTY,
2095 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2096 unsigned i;
2097
2098 scanned = 1;
2099 for (i = 0; i < nr_pages; i++) {
2100 struct page *page = pvec.pages[i];
2101
2102 /*
2103 * At this point we hold neither mapping->tree_lock nor
2104 * lock on the page itself: the page may be truncated or
2105 * invalidated (changing page->mapping to NULL), or even
2106 * swizzled back from swapper_space to tmpfs file
2107 * mapping
2108 */
4bef0848
CM
2109 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2110 tree->ops->write_cache_pages_lock_hook(page);
2111 else
2112 lock_page(page);
d1310b2e
CM
2113
2114 if (unlikely(page->mapping != mapping)) {
2115 unlock_page(page);
2116 continue;
2117 }
2118
2119 if (!wbc->range_cyclic && page->index > end) {
2120 done = 1;
2121 unlock_page(page);
2122 continue;
2123 }
2124
2125 if (wbc->sync_mode != WB_SYNC_NONE)
2126 wait_on_page_writeback(page);
2127
2128 if (PageWriteback(page) ||
2129 !clear_page_dirty_for_io(page)) {
2130 unlock_page(page);
2131 continue;
2132 }
2133
2134 ret = (*writepage)(page, wbc, data);
2135
2136 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2137 unlock_page(page);
2138 ret = 0;
2139 }
2140 if (ret || (--(wbc->nr_to_write) <= 0))
2141 done = 1;
2142 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2143 wbc->encountered_congestion = 1;
2144 done = 1;
2145 }
2146 }
2147 pagevec_release(&pvec);
2148 cond_resched();
2149 }
2150 if (!scanned && !done) {
2151 /*
2152 * We hit the last page and there is more work to be done: wrap
2153 * back to the start of the file
2154 */
2155 scanned = 1;
2156 index = 0;
2157 goto retry;
2158 }
2159 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2160 mapping->writeback_index = index;
2b1f55b0 2161
4bef0848
CM
2162 if (wbc->range_cont)
2163 wbc->range_start = index << PAGE_CACHE_SHIFT;
d1310b2e
CM
2164 return ret;
2165}
4bef0848 2166EXPORT_SYMBOL(extent_write_cache_pages);
d1310b2e
CM
2167
2168int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2169 get_extent_t *get_extent,
2170 struct writeback_control *wbc)
2171{
2172 int ret;
2173 struct address_space *mapping = page->mapping;
2174 struct extent_page_data epd = {
2175 .bio = NULL,
2176 .tree = tree,
2177 .get_extent = get_extent,
2178 };
2179 struct writeback_control wbc_writepages = {
2180 .bdi = wbc->bdi,
2181 .sync_mode = WB_SYNC_NONE,
2182 .older_than_this = NULL,
2183 .nr_to_write = 64,
2184 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2185 .range_end = (loff_t)-1,
2186 };
2187
2188
2189 ret = __extent_writepage(page, wbc, &epd);
2190
4bef0848
CM
2191 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2192 __extent_writepage, &epd);
d1310b2e 2193 if (epd.bio) {
f188591e 2194 submit_one_bio(WRITE, epd.bio, 0);
d1310b2e
CM
2195 }
2196 return ret;
2197}
2198EXPORT_SYMBOL(extent_write_full_page);
2199
2200
2201int extent_writepages(struct extent_io_tree *tree,
2202 struct address_space *mapping,
2203 get_extent_t *get_extent,
2204 struct writeback_control *wbc)
2205{
2206 int ret = 0;
2207 struct extent_page_data epd = {
2208 .bio = NULL,
2209 .tree = tree,
2210 .get_extent = get_extent,
2211 };
2212
4bef0848
CM
2213 ret = extent_write_cache_pages(tree, mapping, wbc,
2214 __extent_writepage, &epd);
d1310b2e 2215 if (epd.bio) {
f188591e 2216 submit_one_bio(WRITE, epd.bio, 0);
d1310b2e
CM
2217 }
2218 return ret;
2219}
2220EXPORT_SYMBOL(extent_writepages);
2221
2222int extent_readpages(struct extent_io_tree *tree,
2223 struct address_space *mapping,
2224 struct list_head *pages, unsigned nr_pages,
2225 get_extent_t get_extent)
2226{
2227 struct bio *bio = NULL;
2228 unsigned page_idx;
2229 struct pagevec pvec;
2230
2231 pagevec_init(&pvec, 0);
2232 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2233 struct page *page = list_entry(pages->prev, struct page, lru);
2234
2235 prefetchw(&page->flags);
2236 list_del(&page->lru);
2237 /*
2238 * what we want to do here is call add_to_page_cache_lru,
2239 * but that isn't exported, so we reproduce it here
2240 */
2241 if (!add_to_page_cache(page, mapping,
2242 page->index, GFP_KERNEL)) {
2243
2244 /* open coding of lru_cache_add, also not exported */
2245 page_cache_get(page);
2246 if (!pagevec_add(&pvec, page))
2247 __pagevec_lru_add(&pvec);
f188591e
CM
2248 __extent_read_full_page(tree, page, get_extent,
2249 &bio, 0);
d1310b2e
CM
2250 }
2251 page_cache_release(page);
2252 }
2253 if (pagevec_count(&pvec))
2254 __pagevec_lru_add(&pvec);
2255 BUG_ON(!list_empty(pages));
2256 if (bio)
f188591e 2257 submit_one_bio(READ, bio, 0);
d1310b2e
CM
2258 return 0;
2259}
2260EXPORT_SYMBOL(extent_readpages);
2261
2262/*
2263 * basic invalidatepage code, this waits on any locked or writeback
2264 * ranges corresponding to the page, and then deletes any extent state
2265 * records from the tree
2266 */
2267int extent_invalidatepage(struct extent_io_tree *tree,
2268 struct page *page, unsigned long offset)
2269{
2270 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2271 u64 end = start + PAGE_CACHE_SIZE - 1;
2272 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2273
2274 start += (offset + blocksize -1) & ~(blocksize - 1);
2275 if (start > end)
2276 return 0;
2277
2278 lock_extent(tree, start, end, GFP_NOFS);
2279 wait_on_extent_writeback(tree, start, end);
2280 clear_extent_bit(tree, start, end,
2281 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2282 1, 1, GFP_NOFS);
2283 return 0;
2284}
2285EXPORT_SYMBOL(extent_invalidatepage);
2286
2287/*
2288 * simple commit_write call, set_range_dirty is used to mark both
2289 * the pages and the extent records as dirty
2290 */
2291int extent_commit_write(struct extent_io_tree *tree,
2292 struct inode *inode, struct page *page,
2293 unsigned from, unsigned to)
2294{
2295 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2296
2297 set_page_extent_mapped(page);
2298 set_page_dirty(page);
2299
2300 if (pos > inode->i_size) {
2301 i_size_write(inode, pos);
2302 mark_inode_dirty(inode);
2303 }
2304 return 0;
2305}
2306EXPORT_SYMBOL(extent_commit_write);
2307
2308int extent_prepare_write(struct extent_io_tree *tree,
2309 struct inode *inode, struct page *page,
2310 unsigned from, unsigned to, get_extent_t *get_extent)
2311{
2312 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2313 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2314 u64 block_start;
2315 u64 orig_block_start;
2316 u64 block_end;
2317 u64 cur_end;
2318 struct extent_map *em;
2319 unsigned blocksize = 1 << inode->i_blkbits;
2320 size_t page_offset = 0;
2321 size_t block_off_start;
2322 size_t block_off_end;
2323 int err = 0;
2324 int iocount = 0;
2325 int ret = 0;
2326 int isnew;
2327
2328 set_page_extent_mapped(page);
2329
2330 block_start = (page_start + from) & ~((u64)blocksize - 1);
2331 block_end = (page_start + to - 1) | (blocksize - 1);
2332 orig_block_start = block_start;
2333
2334 lock_extent(tree, page_start, page_end, GFP_NOFS);
2335 while(block_start <= block_end) {
2336 em = get_extent(inode, page, page_offset, block_start,
2337 block_end - block_start + 1, 1);
2338 if (IS_ERR(em) || !em) {
2339 goto err;
2340 }
2341 cur_end = min(block_end, extent_map_end(em) - 1);
2342 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2343 block_off_end = block_off_start + blocksize;
2344 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2345
2346 if (!PageUptodate(page) && isnew &&
2347 (block_off_end > to || block_off_start < from)) {
2348 void *kaddr;
2349
2350 kaddr = kmap_atomic(page, KM_USER0);
2351 if (block_off_end > to)
2352 memset(kaddr + to, 0, block_off_end - to);
2353 if (block_off_start < from)
2354 memset(kaddr + block_off_start, 0,
2355 from - block_off_start);
2356 flush_dcache_page(page);
2357 kunmap_atomic(kaddr, KM_USER0);
2358 }
2359 if ((em->block_start != EXTENT_MAP_HOLE &&
2360 em->block_start != EXTENT_MAP_INLINE) &&
2361 !isnew && !PageUptodate(page) &&
2362 (block_off_end > to || block_off_start < from) &&
2363 !test_range_bit(tree, block_start, cur_end,
2364 EXTENT_UPTODATE, 1)) {
2365 u64 sector;
2366 u64 extent_offset = block_start - em->start;
2367 size_t iosize;
2368 sector = (em->block_start + extent_offset) >> 9;
2369 iosize = (cur_end - block_start + blocksize) &
2370 ~((u64)blocksize - 1);
2371 /*
2372 * we've already got the extent locked, but we
2373 * need to split the state such that our end_bio
2374 * handler can clear the lock.
2375 */
2376 set_extent_bit(tree, block_start,
2377 block_start + iosize - 1,
2378 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2379 ret = submit_extent_page(READ, tree, page,
2380 sector, iosize, page_offset, em->bdev,
2381 NULL, 1,
f188591e 2382 end_bio_extent_preparewrite, 0);
d1310b2e
CM
2383 iocount++;
2384 block_start = block_start + iosize;
2385 } else {
2386 set_extent_uptodate(tree, block_start, cur_end,
2387 GFP_NOFS);
2388 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2389 block_start = cur_end + 1;
2390 }
2391 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2392 free_extent_map(em);
2393 }
2394 if (iocount) {
2395 wait_extent_bit(tree, orig_block_start,
2396 block_end, EXTENT_LOCKED);
2397 }
2398 check_page_uptodate(tree, page);
2399err:
2400 /* FIXME, zero out newly allocated blocks on error */
2401 return err;
2402}
2403EXPORT_SYMBOL(extent_prepare_write);
2404
7b13b7b1
CM
2405/*
2406 * a helper for releasepage, this tests for areas of the page that
2407 * are locked or under IO and drops the related state bits if it is safe
2408 * to drop the page.
2409 */
2410int try_release_extent_state(struct extent_map_tree *map,
2411 struct extent_io_tree *tree, struct page *page,
2412 gfp_t mask)
2413{
2414 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2415 u64 end = start + PAGE_CACHE_SIZE - 1;
2416 int ret = 1;
2417
211f90e6
CM
2418 if (test_range_bit(tree, start, end,
2419 EXTENT_IOBITS | EXTENT_ORDERED, 0))
7b13b7b1
CM
2420 ret = 0;
2421 else {
2422 if ((mask & GFP_NOFS) == GFP_NOFS)
2423 mask = GFP_NOFS;
2424 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2425 1, 1, mask);
2426 }
2427 return ret;
2428}
2429EXPORT_SYMBOL(try_release_extent_state);
2430
d1310b2e
CM
2431/*
2432 * a helper for releasepage. As long as there are no locked extents
2433 * in the range corresponding to the page, both state records and extent
2434 * map records are removed
2435 */
2436int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
2437 struct extent_io_tree *tree, struct page *page,
2438 gfp_t mask)
d1310b2e
CM
2439{
2440 struct extent_map *em;
2441 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2442 u64 end = start + PAGE_CACHE_SIZE - 1;
7b13b7b1 2443
70dec807
CM
2444 if ((mask & __GFP_WAIT) &&
2445 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 2446 u64 len;
70dec807 2447 while (start <= end) {
39b5637f 2448 len = end - start + 1;
70dec807 2449 spin_lock(&map->lock);
39b5637f 2450 em = lookup_extent_mapping(map, start, len);
70dec807
CM
2451 if (!em || IS_ERR(em)) {
2452 spin_unlock(&map->lock);
2453 break;
2454 }
7f3c74fb
CM
2455 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2456 em->start != start) {
70dec807
CM
2457 spin_unlock(&map->lock);
2458 free_extent_map(em);
2459 break;
2460 }
2461 if (!test_range_bit(tree, em->start,
2462 extent_map_end(em) - 1,
2463 EXTENT_LOCKED, 0)) {
2464 remove_extent_mapping(map, em);
2465 /* once for the rb tree */
2466 free_extent_map(em);
2467 }
2468 start = extent_map_end(em);
d1310b2e 2469 spin_unlock(&map->lock);
70dec807
CM
2470
2471 /* once for us */
d1310b2e
CM
2472 free_extent_map(em);
2473 }
d1310b2e 2474 }
7b13b7b1 2475 return try_release_extent_state(map, tree, page, mask);
d1310b2e
CM
2476}
2477EXPORT_SYMBOL(try_release_extent_mapping);
2478
2479sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2480 get_extent_t *get_extent)
2481{
2482 struct inode *inode = mapping->host;
2483 u64 start = iblock << inode->i_blkbits;
2484 sector_t sector = 0;
2485 struct extent_map *em;
2486
2487 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2488 if (!em || IS_ERR(em))
2489 return 0;
2490
2491 if (em->block_start == EXTENT_MAP_INLINE ||
2492 em->block_start == EXTENT_MAP_HOLE)
2493 goto out;
2494
2495 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
d1310b2e
CM
2496out:
2497 free_extent_map(em);
2498 return sector;
2499}
2500
d1310b2e
CM
2501static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2502 unsigned long i)
2503{
2504 struct page *p;
2505 struct address_space *mapping;
2506
2507 if (i == 0)
2508 return eb->first_page;
2509 i += eb->start >> PAGE_CACHE_SHIFT;
2510 mapping = eb->first_page->mapping;
33958dc6
CM
2511 if (!mapping)
2512 return NULL;
0ee0fda0
SW
2513
2514 /*
2515 * extent_buffer_page is only called after pinning the page
2516 * by increasing the reference count. So we know the page must
2517 * be in the radix tree.
2518 */
0ee0fda0 2519 rcu_read_lock();
d1310b2e 2520 p = radix_tree_lookup(&mapping->page_tree, i);
0ee0fda0 2521 rcu_read_unlock();
2b1f55b0 2522
d1310b2e
CM
2523 return p;
2524}
2525
6af118ce 2526static inline unsigned long num_extent_pages(u64 start, u64 len)
728131d8 2527{
6af118ce
CM
2528 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2529 (start >> PAGE_CACHE_SHIFT);
728131d8
CM
2530}
2531
d1310b2e
CM
2532static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2533 u64 start,
2534 unsigned long len,
2535 gfp_t mask)
2536{
2537 struct extent_buffer *eb = NULL;
4bef0848 2538#ifdef LEAK_DEBUG
2d2ae547 2539 unsigned long flags;
4bef0848 2540#endif
d1310b2e 2541
d1310b2e 2542 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
d1310b2e
CM
2543 eb->start = start;
2544 eb->len = len;
a61e6f29 2545 mutex_init(&eb->mutex);
4bef0848 2546#ifdef LEAK_DEBUG
2d2ae547
CM
2547 spin_lock_irqsave(&leak_lock, flags);
2548 list_add(&eb->leak_list, &buffers);
2549 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 2550#endif
d1310b2e
CM
2551 atomic_set(&eb->refs, 1);
2552
2553 return eb;
2554}
2555
2556static void __free_extent_buffer(struct extent_buffer *eb)
2557{
4bef0848 2558#ifdef LEAK_DEBUG
2d2ae547
CM
2559 unsigned long flags;
2560 spin_lock_irqsave(&leak_lock, flags);
2561 list_del(&eb->leak_list);
2562 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 2563#endif
d1310b2e
CM
2564 kmem_cache_free(extent_buffer_cache, eb);
2565}
2566
2567struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2568 u64 start, unsigned long len,
2569 struct page *page0,
2570 gfp_t mask)
2571{
2572 unsigned long num_pages = num_extent_pages(start, len);
2573 unsigned long i;
2574 unsigned long index = start >> PAGE_CACHE_SHIFT;
2575 struct extent_buffer *eb;
6af118ce 2576 struct extent_buffer *exists = NULL;
d1310b2e
CM
2577 struct page *p;
2578 struct address_space *mapping = tree->mapping;
2579 int uptodate = 1;
2580
6af118ce
CM
2581 spin_lock(&tree->buffer_lock);
2582 eb = buffer_search(tree, start);
2583 if (eb) {
2584 atomic_inc(&eb->refs);
2585 spin_unlock(&tree->buffer_lock);
0f9dd46c 2586 mark_page_accessed(eb->first_page);
6af118ce
CM
2587 return eb;
2588 }
2589 spin_unlock(&tree->buffer_lock);
2590
d1310b2e 2591 eb = __alloc_extent_buffer(tree, start, len, mask);
2b114d1d 2592 if (!eb)
d1310b2e
CM
2593 return NULL;
2594
d1310b2e
CM
2595 if (page0) {
2596 eb->first_page = page0;
2597 i = 1;
2598 index++;
2599 page_cache_get(page0);
2600 mark_page_accessed(page0);
2601 set_page_extent_mapped(page0);
d1310b2e 2602 set_page_extent_head(page0, len);
f188591e 2603 uptodate = PageUptodate(page0);
d1310b2e
CM
2604 } else {
2605 i = 0;
2606 }
2607 for (; i < num_pages; i++, index++) {
2608 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2609 if (!p) {
2610 WARN_ON(1);
6af118ce 2611 goto free_eb;
d1310b2e
CM
2612 }
2613 set_page_extent_mapped(p);
2614 mark_page_accessed(p);
2615 if (i == 0) {
2616 eb->first_page = p;
2617 set_page_extent_head(p, len);
2618 } else {
2619 set_page_private(p, EXTENT_PAGE_PRIVATE);
2620 }
2621 if (!PageUptodate(p))
2622 uptodate = 0;
2623 unlock_page(p);
2624 }
2625 if (uptodate)
2626 eb->flags |= EXTENT_UPTODATE;
2627 eb->flags |= EXTENT_BUFFER_FILLED;
2628
6af118ce
CM
2629 spin_lock(&tree->buffer_lock);
2630 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2631 if (exists) {
2632 /* add one reference for the caller */
2633 atomic_inc(&exists->refs);
2634 spin_unlock(&tree->buffer_lock);
2635 goto free_eb;
2636 }
2637 spin_unlock(&tree->buffer_lock);
2638
2639 /* add one reference for the tree */
2640 atomic_inc(&eb->refs);
d1310b2e
CM
2641 return eb;
2642
6af118ce 2643free_eb:
d1310b2e 2644 if (!atomic_dec_and_test(&eb->refs))
6af118ce
CM
2645 return exists;
2646 for (index = 1; index < i; index++)
d1310b2e 2647 page_cache_release(extent_buffer_page(eb, index));
6af118ce 2648 page_cache_release(extent_buffer_page(eb, 0));
d1310b2e 2649 __free_extent_buffer(eb);
6af118ce 2650 return exists;
d1310b2e
CM
2651}
2652EXPORT_SYMBOL(alloc_extent_buffer);
2653
2654struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2655 u64 start, unsigned long len,
2656 gfp_t mask)
2657{
d1310b2e 2658 struct extent_buffer *eb;
d1310b2e 2659
6af118ce
CM
2660 spin_lock(&tree->buffer_lock);
2661 eb = buffer_search(tree, start);
2662 if (eb)
2663 atomic_inc(&eb->refs);
2664 spin_unlock(&tree->buffer_lock);
d1310b2e 2665
0f9dd46c
JB
2666 if (eb)
2667 mark_page_accessed(eb->first_page);
2668
d1310b2e 2669 return eb;
d1310b2e
CM
2670}
2671EXPORT_SYMBOL(find_extent_buffer);
2672
2673void free_extent_buffer(struct extent_buffer *eb)
2674{
d1310b2e
CM
2675 if (!eb)
2676 return;
2677
2678 if (!atomic_dec_and_test(&eb->refs))
2679 return;
2680
6af118ce 2681 WARN_ON(1);
d1310b2e
CM
2682}
2683EXPORT_SYMBOL(free_extent_buffer);
2684
2685int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2686 struct extent_buffer *eb)
2687{
2688 int set;
2689 unsigned long i;
2690 unsigned long num_pages;
2691 struct page *page;
2692
2693 u64 start = eb->start;
2694 u64 end = start + eb->len - 1;
2695
2696 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2697 num_pages = num_extent_pages(eb->start, eb->len);
2698
2699 for (i = 0; i < num_pages; i++) {
2700 page = extent_buffer_page(eb, i);
a61e6f29 2701 lock_page(page);
d1310b2e
CM
2702 if (i == 0)
2703 set_page_extent_head(page, eb->len);
2704 else
2705 set_page_private(page, EXTENT_PAGE_PRIVATE);
2706
2707 /*
2708 * if we're on the last page or the first page and the
2709 * block isn't aligned on a page boundary, do extra checks
2710 * to make sure we don't clean page that is partially dirty
2711 */
2712 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2713 ((i == num_pages - 1) &&
2714 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2715 start = (u64)page->index << PAGE_CACHE_SHIFT;
2716 end = start + PAGE_CACHE_SIZE - 1;
2717 if (test_range_bit(tree, start, end,
2718 EXTENT_DIRTY, 0)) {
a61e6f29 2719 unlock_page(page);
d1310b2e
CM
2720 continue;
2721 }
2722 }
2723 clear_page_dirty_for_io(page);
0ee0fda0 2724 spin_lock_irq(&page->mapping->tree_lock);
d1310b2e
CM
2725 if (!PageDirty(page)) {
2726 radix_tree_tag_clear(&page->mapping->page_tree,
2727 page_index(page),
2728 PAGECACHE_TAG_DIRTY);
2729 }
0ee0fda0 2730 spin_unlock_irq(&page->mapping->tree_lock);
a61e6f29 2731 unlock_page(page);
d1310b2e
CM
2732 }
2733 return 0;
2734}
2735EXPORT_SYMBOL(clear_extent_buffer_dirty);
2736
2737int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2738 struct extent_buffer *eb)
2739{
2740 return wait_on_extent_writeback(tree, eb->start,
2741 eb->start + eb->len - 1);
2742}
2743EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2744
2745int set_extent_buffer_dirty(struct extent_io_tree *tree,
2746 struct extent_buffer *eb)
2747{
2748 unsigned long i;
2749 unsigned long num_pages;
2750
2751 num_pages = num_extent_pages(eb->start, eb->len);
2752 for (i = 0; i < num_pages; i++) {
2753 struct page *page = extent_buffer_page(eb, i);
2754 /* writepage may need to do something special for the
2755 * first page, we have to make sure page->private is
2756 * properly set. releasepage may drop page->private
2757 * on us if the page isn't already dirty.
2758 */
a1b32a59 2759 lock_page(page);
d1310b2e 2760 if (i == 0) {
d1310b2e
CM
2761 set_page_extent_head(page, eb->len);
2762 } else if (PagePrivate(page) &&
2763 page->private != EXTENT_PAGE_PRIVATE) {
d1310b2e 2764 set_page_extent_mapped(page);
d1310b2e
CM
2765 }
2766 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
a1b32a59
CM
2767 set_extent_dirty(tree, page_offset(page),
2768 page_offset(page) + PAGE_CACHE_SIZE -1,
2769 GFP_NOFS);
2770 unlock_page(page);
d1310b2e 2771 }
a1b32a59 2772 return 0;
d1310b2e
CM
2773}
2774EXPORT_SYMBOL(set_extent_buffer_dirty);
2775
1259ab75
CM
2776int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2777 struct extent_buffer *eb)
2778{
2779 unsigned long i;
2780 struct page *page;
2781 unsigned long num_pages;
2782
2783 num_pages = num_extent_pages(eb->start, eb->len);
2784 eb->flags &= ~EXTENT_UPTODATE;
2785
2786 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2787 GFP_NOFS);
2788 for (i = 0; i < num_pages; i++) {
2789 page = extent_buffer_page(eb, i);
33958dc6
CM
2790 if (page)
2791 ClearPageUptodate(page);
1259ab75
CM
2792 }
2793 return 0;
2794}
2795
d1310b2e
CM
2796int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2797 struct extent_buffer *eb)
2798{
2799 unsigned long i;
2800 struct page *page;
2801 unsigned long num_pages;
2802
2803 num_pages = num_extent_pages(eb->start, eb->len);
2804
2805 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2806 GFP_NOFS);
2807 for (i = 0; i < num_pages; i++) {
2808 page = extent_buffer_page(eb, i);
2809 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2810 ((i == num_pages - 1) &&
2811 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2812 check_page_uptodate(tree, page);
2813 continue;
2814 }
2815 SetPageUptodate(page);
2816 }
2817 return 0;
2818}
2819EXPORT_SYMBOL(set_extent_buffer_uptodate);
2820
ce9adaa5
CM
2821int extent_range_uptodate(struct extent_io_tree *tree,
2822 u64 start, u64 end)
2823{
2824 struct page *page;
2825 int ret;
2826 int pg_uptodate = 1;
2827 int uptodate;
2828 unsigned long index;
2829
2830 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
2831 if (ret)
2832 return 1;
2833 while(start <= end) {
2834 index = start >> PAGE_CACHE_SHIFT;
2835 page = find_get_page(tree->mapping, index);
2836 uptodate = PageUptodate(page);
2837 page_cache_release(page);
2838 if (!uptodate) {
2839 pg_uptodate = 0;
2840 break;
2841 }
2842 start += PAGE_CACHE_SIZE;
2843 }
2844 return pg_uptodate;
2845}
2846
d1310b2e 2847int extent_buffer_uptodate(struct extent_io_tree *tree,
ce9adaa5 2848 struct extent_buffer *eb)
d1310b2e 2849{
728131d8 2850 int ret = 0;
ce9adaa5
CM
2851 unsigned long num_pages;
2852 unsigned long i;
728131d8
CM
2853 struct page *page;
2854 int pg_uptodate = 1;
2855
d1310b2e 2856 if (eb->flags & EXTENT_UPTODATE)
4235298e 2857 return 1;
728131d8 2858
4235298e 2859 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
d1310b2e 2860 EXTENT_UPTODATE, 1);
4235298e
CM
2861 if (ret)
2862 return ret;
728131d8
CM
2863
2864 num_pages = num_extent_pages(eb->start, eb->len);
2865 for (i = 0; i < num_pages; i++) {
2866 page = extent_buffer_page(eb, i);
2867 if (!PageUptodate(page)) {
2868 pg_uptodate = 0;
2869 break;
2870 }
2871 }
4235298e 2872 return pg_uptodate;
d1310b2e
CM
2873}
2874EXPORT_SYMBOL(extent_buffer_uptodate);
2875
2876int read_extent_buffer_pages(struct extent_io_tree *tree,
2877 struct extent_buffer *eb,
a86c12c7 2878 u64 start, int wait,
f188591e 2879 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
2880{
2881 unsigned long i;
2882 unsigned long start_i;
2883 struct page *page;
2884 int err;
2885 int ret = 0;
ce9adaa5
CM
2886 int locked_pages = 0;
2887 int all_uptodate = 1;
2888 int inc_all_pages = 0;
d1310b2e 2889 unsigned long num_pages;
a86c12c7
CM
2890 struct bio *bio = NULL;
2891
d1310b2e
CM
2892 if (eb->flags & EXTENT_UPTODATE)
2893 return 0;
2894
ce9adaa5 2895 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
d1310b2e
CM
2896 EXTENT_UPTODATE, 1)) {
2897 return 0;
2898 }
2899
2900 if (start) {
2901 WARN_ON(start < eb->start);
2902 start_i = (start >> PAGE_CACHE_SHIFT) -
2903 (eb->start >> PAGE_CACHE_SHIFT);
2904 } else {
2905 start_i = 0;
2906 }
2907
2908 num_pages = num_extent_pages(eb->start, eb->len);
2909 for (i = start_i; i < num_pages; i++) {
2910 page = extent_buffer_page(eb, i);
d1310b2e 2911 if (!wait) {
2db04966 2912 if (!trylock_page(page))
ce9adaa5 2913 goto unlock_exit;
d1310b2e
CM
2914 } else {
2915 lock_page(page);
2916 }
ce9adaa5 2917 locked_pages++;
d1310b2e 2918 if (!PageUptodate(page)) {
ce9adaa5
CM
2919 all_uptodate = 0;
2920 }
2921 }
2922 if (all_uptodate) {
2923 if (start_i == 0)
2924 eb->flags |= EXTENT_UPTODATE;
a1b32a59
CM
2925 if (ret) {
2926 printk("all up to date but ret is %d\n", ret);
2927 }
ce9adaa5
CM
2928 goto unlock_exit;
2929 }
2930
2931 for (i = start_i; i < num_pages; i++) {
2932 page = extent_buffer_page(eb, i);
2933 if (inc_all_pages)
2934 page_cache_get(page);
2935 if (!PageUptodate(page)) {
2936 if (start_i == 0)
2937 inc_all_pages = 1;
f188591e 2938 ClearPageError(page);
a86c12c7 2939 err = __extent_read_full_page(tree, page,
f188591e
CM
2940 get_extent, &bio,
2941 mirror_num);
d1310b2e
CM
2942 if (err) {
2943 ret = err;
a1b32a59 2944 printk("err %d from __extent_read_full_page\n", ret);
d1310b2e
CM
2945 }
2946 } else {
2947 unlock_page(page);
2948 }
2949 }
2950
a86c12c7 2951 if (bio)
f188591e 2952 submit_one_bio(READ, bio, mirror_num);
a86c12c7 2953
d1310b2e 2954 if (ret || !wait) {
a1b32a59
CM
2955 if (ret)
2956 printk("ret %d wait %d returning\n", ret, wait);
d1310b2e
CM
2957 return ret;
2958 }
d1310b2e
CM
2959 for (i = start_i; i < num_pages; i++) {
2960 page = extent_buffer_page(eb, i);
2961 wait_on_page_locked(page);
2962 if (!PageUptodate(page)) {
a1b32a59 2963 printk("page not uptodate after wait_on_page_locked\n");
d1310b2e
CM
2964 ret = -EIO;
2965 }
2966 }
2967 if (!ret)
2968 eb->flags |= EXTENT_UPTODATE;
2969 return ret;
ce9adaa5
CM
2970
2971unlock_exit:
2972 i = start_i;
2973 while(locked_pages > 0) {
2974 page = extent_buffer_page(eb, i);
2975 i++;
2976 unlock_page(page);
2977 locked_pages--;
2978 }
2979 return ret;
d1310b2e
CM
2980}
2981EXPORT_SYMBOL(read_extent_buffer_pages);
2982
2983void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2984 unsigned long start,
2985 unsigned long len)
2986{
2987 size_t cur;
2988 size_t offset;
2989 struct page *page;
2990 char *kaddr;
2991 char *dst = (char *)dstv;
2992 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2993 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
2994
2995 WARN_ON(start > eb->len);
2996 WARN_ON(start + len > eb->start + eb->len);
2997
2998 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2999
3000 while(len > 0) {
3001 page = extent_buffer_page(eb, i);
d1310b2e
CM
3002
3003 cur = min(len, (PAGE_CACHE_SIZE - offset));
3004 kaddr = kmap_atomic(page, KM_USER1);
3005 memcpy(dst, kaddr + offset, cur);
3006 kunmap_atomic(kaddr, KM_USER1);
3007
3008 dst += cur;
3009 len -= cur;
3010 offset = 0;
3011 i++;
3012 }
3013}
3014EXPORT_SYMBOL(read_extent_buffer);
3015
3016int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3017 unsigned long min_len, char **token, char **map,
3018 unsigned long *map_start,
3019 unsigned long *map_len, int km)
3020{
3021 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3022 char *kaddr;
3023 struct page *p;
3024 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3025 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3026 unsigned long end_i = (start_offset + start + min_len - 1) >>
3027 PAGE_CACHE_SHIFT;
3028
3029 if (i != end_i)
3030 return -EINVAL;
3031
3032 if (i == 0) {
3033 offset = start_offset;
3034 *map_start = 0;
3035 } else {
3036 offset = 0;
3037 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3038 }
3039 if (start + min_len > eb->len) {
3040printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3041 WARN_ON(1);
3042 }
3043
3044 p = extent_buffer_page(eb, i);
d1310b2e
CM
3045 kaddr = kmap_atomic(p, km);
3046 *token = kaddr;
3047 *map = kaddr + offset;
3048 *map_len = PAGE_CACHE_SIZE - offset;
3049 return 0;
3050}
3051EXPORT_SYMBOL(map_private_extent_buffer);
3052
3053int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3054 unsigned long min_len,
3055 char **token, char **map,
3056 unsigned long *map_start,
3057 unsigned long *map_len, int km)
3058{
3059 int err;
3060 int save = 0;
3061 if (eb->map_token) {
3062 unmap_extent_buffer(eb, eb->map_token, km);
3063 eb->map_token = NULL;
3064 save = 1;
3065 }
3066 err = map_private_extent_buffer(eb, start, min_len, token, map,
3067 map_start, map_len, km);
3068 if (!err && save) {
3069 eb->map_token = *token;
3070 eb->kaddr = *map;
3071 eb->map_start = *map_start;
3072 eb->map_len = *map_len;
3073 }
3074 return err;
3075}
3076EXPORT_SYMBOL(map_extent_buffer);
3077
3078void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3079{
3080 kunmap_atomic(token, km);
3081}
3082EXPORT_SYMBOL(unmap_extent_buffer);
3083
3084int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3085 unsigned long start,
3086 unsigned long len)
3087{
3088 size_t cur;
3089 size_t offset;
3090 struct page *page;
3091 char *kaddr;
3092 char *ptr = (char *)ptrv;
3093 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3094 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3095 int ret = 0;
3096
3097 WARN_ON(start > eb->len);
3098 WARN_ON(start + len > eb->start + eb->len);
3099
3100 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3101
3102 while(len > 0) {
3103 page = extent_buffer_page(eb, i);
d1310b2e
CM
3104
3105 cur = min(len, (PAGE_CACHE_SIZE - offset));
3106
3107 kaddr = kmap_atomic(page, KM_USER0);
3108 ret = memcmp(ptr, kaddr + offset, cur);
3109 kunmap_atomic(kaddr, KM_USER0);
3110 if (ret)
3111 break;
3112
3113 ptr += cur;
3114 len -= cur;
3115 offset = 0;
3116 i++;
3117 }
3118 return ret;
3119}
3120EXPORT_SYMBOL(memcmp_extent_buffer);
3121
3122void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3123 unsigned long start, unsigned long len)
3124{
3125 size_t cur;
3126 size_t offset;
3127 struct page *page;
3128 char *kaddr;
3129 char *src = (char *)srcv;
3130 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3131 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3132
3133 WARN_ON(start > eb->len);
3134 WARN_ON(start + len > eb->start + eb->len);
3135
3136 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3137
3138 while(len > 0) {
3139 page = extent_buffer_page(eb, i);
3140 WARN_ON(!PageUptodate(page));
3141
3142 cur = min(len, PAGE_CACHE_SIZE - offset);
3143 kaddr = kmap_atomic(page, KM_USER1);
3144 memcpy(kaddr + offset, src, cur);
3145 kunmap_atomic(kaddr, KM_USER1);
3146
3147 src += cur;
3148 len -= cur;
3149 offset = 0;
3150 i++;
3151 }
3152}
3153EXPORT_SYMBOL(write_extent_buffer);
3154
3155void memset_extent_buffer(struct extent_buffer *eb, char c,
3156 unsigned long start, unsigned long len)
3157{
3158 size_t cur;
3159 size_t offset;
3160 struct page *page;
3161 char *kaddr;
3162 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3163 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3164
3165 WARN_ON(start > eb->len);
3166 WARN_ON(start + len > eb->start + eb->len);
3167
3168 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3169
3170 while(len > 0) {
3171 page = extent_buffer_page(eb, i);
3172 WARN_ON(!PageUptodate(page));
3173
3174 cur = min(len, PAGE_CACHE_SIZE - offset);
3175 kaddr = kmap_atomic(page, KM_USER0);
3176 memset(kaddr + offset, c, cur);
3177 kunmap_atomic(kaddr, KM_USER0);
3178
3179 len -= cur;
3180 offset = 0;
3181 i++;
3182 }
3183}
3184EXPORT_SYMBOL(memset_extent_buffer);
3185
3186void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3187 unsigned long dst_offset, unsigned long src_offset,
3188 unsigned long len)
3189{
3190 u64 dst_len = dst->len;
3191 size_t cur;
3192 size_t offset;
3193 struct page *page;
3194 char *kaddr;
3195 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3196 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3197
3198 WARN_ON(src->len != dst_len);
3199
3200 offset = (start_offset + dst_offset) &
3201 ((unsigned long)PAGE_CACHE_SIZE - 1);
3202
3203 while(len > 0) {
3204 page = extent_buffer_page(dst, i);
3205 WARN_ON(!PageUptodate(page));
3206
3207 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3208
3209 kaddr = kmap_atomic(page, KM_USER0);
3210 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3211 kunmap_atomic(kaddr, KM_USER0);
3212
3213 src_offset += cur;
3214 len -= cur;
3215 offset = 0;
3216 i++;
3217 }
3218}
3219EXPORT_SYMBOL(copy_extent_buffer);
3220
3221static void move_pages(struct page *dst_page, struct page *src_page,
3222 unsigned long dst_off, unsigned long src_off,
3223 unsigned long len)
3224{
3225 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3226 if (dst_page == src_page) {
3227 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3228 } else {
3229 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3230 char *p = dst_kaddr + dst_off + len;
3231 char *s = src_kaddr + src_off + len;
3232
3233 while (len--)
3234 *--p = *--s;
3235
3236 kunmap_atomic(src_kaddr, KM_USER1);
3237 }
3238 kunmap_atomic(dst_kaddr, KM_USER0);
3239}
3240
3241static void copy_pages(struct page *dst_page, struct page *src_page,
3242 unsigned long dst_off, unsigned long src_off,
3243 unsigned long len)
3244{
3245 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3246 char *src_kaddr;
3247
3248 if (dst_page != src_page)
3249 src_kaddr = kmap_atomic(src_page, KM_USER1);
3250 else
3251 src_kaddr = dst_kaddr;
3252
3253 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3254 kunmap_atomic(dst_kaddr, KM_USER0);
3255 if (dst_page != src_page)
3256 kunmap_atomic(src_kaddr, KM_USER1);
3257}
3258
3259void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3260 unsigned long src_offset, unsigned long len)
3261{
3262 size_t cur;
3263 size_t dst_off_in_page;
3264 size_t src_off_in_page;
3265 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3266 unsigned long dst_i;
3267 unsigned long src_i;
3268
3269 if (src_offset + len > dst->len) {
3270 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3271 src_offset, len, dst->len);
3272 BUG_ON(1);
3273 }
3274 if (dst_offset + len > dst->len) {
3275 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3276 dst_offset, len, dst->len);
3277 BUG_ON(1);
3278 }
3279
3280 while(len > 0) {
3281 dst_off_in_page = (start_offset + dst_offset) &
3282 ((unsigned long)PAGE_CACHE_SIZE - 1);
3283 src_off_in_page = (start_offset + src_offset) &
3284 ((unsigned long)PAGE_CACHE_SIZE - 1);
3285
3286 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3287 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3288
3289 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3290 src_off_in_page));
3291 cur = min_t(unsigned long, cur,
3292 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3293
3294 copy_pages(extent_buffer_page(dst, dst_i),
3295 extent_buffer_page(dst, src_i),
3296 dst_off_in_page, src_off_in_page, cur);
3297
3298 src_offset += cur;
3299 dst_offset += cur;
3300 len -= cur;
3301 }
3302}
3303EXPORT_SYMBOL(memcpy_extent_buffer);
3304
3305void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3306 unsigned long src_offset, unsigned long len)
3307{
3308 size_t cur;
3309 size_t dst_off_in_page;
3310 size_t src_off_in_page;
3311 unsigned long dst_end = dst_offset + len - 1;
3312 unsigned long src_end = src_offset + len - 1;
3313 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3314 unsigned long dst_i;
3315 unsigned long src_i;
3316
3317 if (src_offset + len > dst->len) {
3318 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3319 src_offset, len, dst->len);
3320 BUG_ON(1);
3321 }
3322 if (dst_offset + len > dst->len) {
3323 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3324 dst_offset, len, dst->len);
3325 BUG_ON(1);
3326 }
3327 if (dst_offset < src_offset) {
3328 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3329 return;
3330 }
3331 while(len > 0) {
3332 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3333 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3334
3335 dst_off_in_page = (start_offset + dst_end) &
3336 ((unsigned long)PAGE_CACHE_SIZE - 1);
3337 src_off_in_page = (start_offset + src_end) &
3338 ((unsigned long)PAGE_CACHE_SIZE - 1);
3339
3340 cur = min_t(unsigned long, len, src_off_in_page + 1);
3341 cur = min(cur, dst_off_in_page + 1);
3342 move_pages(extent_buffer_page(dst, dst_i),
3343 extent_buffer_page(dst, src_i),
3344 dst_off_in_page - cur + 1,
3345 src_off_in_page - cur + 1, cur);
3346
3347 dst_end -= cur;
3348 src_end -= cur;
3349 len -= cur;
3350 }
3351}
3352EXPORT_SYMBOL(memmove_extent_buffer);
6af118ce
CM
3353
3354int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3355{
3356 u64 start = page_offset(page);
3357 struct extent_buffer *eb;
3358 int ret = 1;
3359 unsigned long i;
3360 unsigned long num_pages;
3361
3362 spin_lock(&tree->buffer_lock);
3363 eb = buffer_search(tree, start);
3364 if (!eb)
3365 goto out;
3366
3367 if (atomic_read(&eb->refs) > 1) {
3368 ret = 0;
3369 goto out;
3370 }
3371 /* at this point we can safely release the extent buffer */
3372 num_pages = num_extent_pages(eb->start, eb->len);
b214107e
CH
3373 for (i = 0; i < num_pages; i++)
3374 page_cache_release(extent_buffer_page(eb, i));
6af118ce
CM
3375 rb_erase(&eb->rb_node, &tree->buffer);
3376 __free_extent_buffer(eb);
3377out:
3378 spin_unlock(&tree->buffer_lock);
3379 return ret;
3380}
3381EXPORT_SYMBOL(try_release_extent_buffer);