Btrfs: optimize btrget/set/removexattr
[linux-2.6-block.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include "extent_io.h"
16#include "extent_map.h"
2db04966 17#include "compat.h"
902b22f3
DW
18#include "ctree.h"
19#include "btrfs_inode.h"
d1310b2e
CM
20
21/* temporary define until extent_map moves out of btrfs */
22struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
25 unsigned long));
26
27static struct kmem_cache *extent_state_cache;
28static struct kmem_cache *extent_buffer_cache;
29
30static LIST_HEAD(buffers);
31static LIST_HEAD(states);
2d2ae547 32static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
d1310b2e 33
d1310b2e
CM
34#define BUFFER_LRU_MAX 64
35
36struct tree_entry {
37 u64 start;
38 u64 end;
d1310b2e
CM
39 struct rb_node rb_node;
40};
41
42struct extent_page_data {
43 struct bio *bio;
44 struct extent_io_tree *tree;
45 get_extent_t *get_extent;
46};
47
48int __init extent_io_init(void)
49{
50 extent_state_cache = btrfs_cache_create("extent_state",
51 sizeof(struct extent_state), 0,
52 NULL);
53 if (!extent_state_cache)
54 return -ENOMEM;
55
56 extent_buffer_cache = btrfs_cache_create("extent_buffers",
57 sizeof(struct extent_buffer), 0,
58 NULL);
59 if (!extent_buffer_cache)
60 goto free_state_cache;
61 return 0;
62
63free_state_cache:
64 kmem_cache_destroy(extent_state_cache);
65 return -ENOMEM;
66}
67
68void extent_io_exit(void)
69{
70 struct extent_state *state;
2d2ae547 71 struct extent_buffer *eb;
d1310b2e
CM
72
73 while (!list_empty(&states)) {
2d2ae547 74 state = list_entry(states.next, struct extent_state, leak_list);
70dec807 75 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
2d2ae547 76 list_del(&state->leak_list);
d1310b2e
CM
77 kmem_cache_free(extent_state_cache, state);
78
79 }
80
2d2ae547
CM
81 while (!list_empty(&buffers)) {
82 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
83 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
84 list_del(&eb->leak_list);
85 kmem_cache_free(extent_buffer_cache, eb);
86 }
d1310b2e
CM
87 if (extent_state_cache)
88 kmem_cache_destroy(extent_state_cache);
89 if (extent_buffer_cache)
90 kmem_cache_destroy(extent_buffer_cache);
91}
92
93void extent_io_tree_init(struct extent_io_tree *tree,
94 struct address_space *mapping, gfp_t mask)
95{
96 tree->state.rb_node = NULL;
6af118ce 97 tree->buffer.rb_node = NULL;
d1310b2e
CM
98 tree->ops = NULL;
99 tree->dirty_bytes = 0;
70dec807 100 spin_lock_init(&tree->lock);
6af118ce 101 spin_lock_init(&tree->buffer_lock);
d1310b2e 102 tree->mapping = mapping;
d1310b2e
CM
103}
104EXPORT_SYMBOL(extent_io_tree_init);
105
d1310b2e
CM
106struct extent_state *alloc_extent_state(gfp_t mask)
107{
108 struct extent_state *state;
2d2ae547 109 unsigned long flags;
d1310b2e
CM
110
111 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 112 if (!state)
d1310b2e
CM
113 return state;
114 state->state = 0;
d1310b2e 115 state->private = 0;
70dec807 116 state->tree = NULL;
2d2ae547
CM
117 spin_lock_irqsave(&leak_lock, flags);
118 list_add(&state->leak_list, &states);
119 spin_unlock_irqrestore(&leak_lock, flags);
d1310b2e
CM
120
121 atomic_set(&state->refs, 1);
122 init_waitqueue_head(&state->wq);
123 return state;
124}
125EXPORT_SYMBOL(alloc_extent_state);
126
127void free_extent_state(struct extent_state *state)
128{
d1310b2e
CM
129 if (!state)
130 return;
131 if (atomic_dec_and_test(&state->refs)) {
2d2ae547 132 unsigned long flags;
70dec807 133 WARN_ON(state->tree);
2d2ae547
CM
134 spin_lock_irqsave(&leak_lock, flags);
135 list_del(&state->leak_list);
136 spin_unlock_irqrestore(&leak_lock, flags);
d1310b2e
CM
137 kmem_cache_free(extent_state_cache, state);
138 }
139}
140EXPORT_SYMBOL(free_extent_state);
141
142static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
143 struct rb_node *node)
144{
145 struct rb_node ** p = &root->rb_node;
146 struct rb_node * parent = NULL;
147 struct tree_entry *entry;
148
149 while(*p) {
150 parent = *p;
151 entry = rb_entry(parent, struct tree_entry, rb_node);
152
153 if (offset < entry->start)
154 p = &(*p)->rb_left;
155 else if (offset > entry->end)
156 p = &(*p)->rb_right;
157 else
158 return parent;
159 }
160
161 entry = rb_entry(node, struct tree_entry, rb_node);
d1310b2e
CM
162 rb_link_node(node, parent, p);
163 rb_insert_color(node, root);
164 return NULL;
165}
166
80ea96b1 167static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
168 struct rb_node **prev_ret,
169 struct rb_node **next_ret)
170{
80ea96b1 171 struct rb_root *root = &tree->state;
d1310b2e
CM
172 struct rb_node * n = root->rb_node;
173 struct rb_node *prev = NULL;
174 struct rb_node *orig_prev = NULL;
175 struct tree_entry *entry;
176 struct tree_entry *prev_entry = NULL;
177
178 while(n) {
179 entry = rb_entry(n, struct tree_entry, rb_node);
180 prev = n;
181 prev_entry = entry;
182
183 if (offset < entry->start)
184 n = n->rb_left;
185 else if (offset > entry->end)
186 n = n->rb_right;
80ea96b1 187 else {
d1310b2e 188 return n;
80ea96b1 189 }
d1310b2e
CM
190 }
191
192 if (prev_ret) {
193 orig_prev = prev;
194 while(prev && offset > prev_entry->end) {
195 prev = rb_next(prev);
196 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
197 }
198 *prev_ret = prev;
199 prev = orig_prev;
200 }
201
202 if (next_ret) {
203 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
204 while(prev && offset < prev_entry->start) {
205 prev = rb_prev(prev);
206 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
207 }
208 *next_ret = prev;
209 }
210 return NULL;
211}
212
80ea96b1
CM
213static inline struct rb_node *tree_search(struct extent_io_tree *tree,
214 u64 offset)
d1310b2e 215{
70dec807 216 struct rb_node *prev = NULL;
d1310b2e 217 struct rb_node *ret;
70dec807 218
80ea96b1
CM
219 ret = __etree_search(tree, offset, &prev, NULL);
220 if (!ret) {
d1310b2e 221 return prev;
80ea96b1 222 }
d1310b2e
CM
223 return ret;
224}
225
6af118ce
CM
226static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
227 u64 offset, struct rb_node *node)
228{
229 struct rb_root *root = &tree->buffer;
230 struct rb_node ** p = &root->rb_node;
231 struct rb_node * parent = NULL;
232 struct extent_buffer *eb;
233
234 while(*p) {
235 parent = *p;
236 eb = rb_entry(parent, struct extent_buffer, rb_node);
237
238 if (offset < eb->start)
239 p = &(*p)->rb_left;
240 else if (offset > eb->start)
241 p = &(*p)->rb_right;
242 else
243 return eb;
244 }
245
246 rb_link_node(node, parent, p);
247 rb_insert_color(node, root);
248 return NULL;
249}
250
251static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
252 u64 offset)
253{
254 struct rb_root *root = &tree->buffer;
255 struct rb_node * n = root->rb_node;
256 struct extent_buffer *eb;
257
258 while(n) {
259 eb = rb_entry(n, struct extent_buffer, rb_node);
260 if (offset < eb->start)
261 n = n->rb_left;
262 else if (offset > eb->start)
263 n = n->rb_right;
264 else
265 return eb;
266 }
267 return NULL;
268}
269
d1310b2e
CM
270/*
271 * utility function to look for merge candidates inside a given range.
272 * Any extents with matching state are merged together into a single
273 * extent in the tree. Extents with EXTENT_IO in their state field
274 * are not merged because the end_io handlers need to be able to do
275 * operations on them without sleeping (or doing allocations/splits).
276 *
277 * This should be called with the tree lock held.
278 */
279static int merge_state(struct extent_io_tree *tree,
280 struct extent_state *state)
281{
282 struct extent_state *other;
283 struct rb_node *other_node;
284
285 if (state->state & EXTENT_IOBITS)
286 return 0;
287
288 other_node = rb_prev(&state->rb_node);
289 if (other_node) {
290 other = rb_entry(other_node, struct extent_state, rb_node);
291 if (other->end == state->start - 1 &&
292 other->state == state->state) {
293 state->start = other->start;
70dec807 294 other->tree = NULL;
d1310b2e
CM
295 rb_erase(&other->rb_node, &tree->state);
296 free_extent_state(other);
297 }
298 }
299 other_node = rb_next(&state->rb_node);
300 if (other_node) {
301 other = rb_entry(other_node, struct extent_state, rb_node);
302 if (other->start == state->end + 1 &&
303 other->state == state->state) {
304 other->start = state->start;
70dec807 305 state->tree = NULL;
d1310b2e
CM
306 rb_erase(&state->rb_node, &tree->state);
307 free_extent_state(state);
308 }
309 }
310 return 0;
311}
312
291d673e
CM
313static void set_state_cb(struct extent_io_tree *tree,
314 struct extent_state *state,
315 unsigned long bits)
316{
317 if (tree->ops && tree->ops->set_bit_hook) {
318 tree->ops->set_bit_hook(tree->mapping->host, state->start,
b0c68f8b 319 state->end, state->state, bits);
291d673e
CM
320 }
321}
322
323static void clear_state_cb(struct extent_io_tree *tree,
324 struct extent_state *state,
325 unsigned long bits)
326{
327 if (tree->ops && tree->ops->set_bit_hook) {
328 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
b0c68f8b 329 state->end, state->state, bits);
291d673e
CM
330 }
331}
332
d1310b2e
CM
333/*
334 * insert an extent_state struct into the tree. 'bits' are set on the
335 * struct before it is inserted.
336 *
337 * This may return -EEXIST if the extent is already there, in which case the
338 * state struct is freed.
339 *
340 * The tree lock is not taken internally. This is a utility function and
341 * probably isn't what you want to call (see set/clear_extent_bit).
342 */
343static int insert_state(struct extent_io_tree *tree,
344 struct extent_state *state, u64 start, u64 end,
345 int bits)
346{
347 struct rb_node *node;
348
349 if (end < start) {
350 printk("end < start %Lu %Lu\n", end, start);
351 WARN_ON(1);
352 }
353 if (bits & EXTENT_DIRTY)
354 tree->dirty_bytes += end - start + 1;
b0c68f8b 355 set_state_cb(tree, state, bits);
d1310b2e
CM
356 state->state |= bits;
357 state->start = start;
358 state->end = end;
359 node = tree_insert(&tree->state, end, &state->rb_node);
360 if (node) {
361 struct extent_state *found;
362 found = rb_entry(node, struct extent_state, rb_node);
363 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
364 free_extent_state(state);
365 return -EEXIST;
366 }
70dec807 367 state->tree = tree;
d1310b2e
CM
368 merge_state(tree, state);
369 return 0;
370}
371
372/*
373 * split a given extent state struct in two, inserting the preallocated
374 * struct 'prealloc' as the newly created second half. 'split' indicates an
375 * offset inside 'orig' where it should be split.
376 *
377 * Before calling,
378 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
379 * are two extent state structs in the tree:
380 * prealloc: [orig->start, split - 1]
381 * orig: [ split, orig->end ]
382 *
383 * The tree locks are not taken by this function. They need to be held
384 * by the caller.
385 */
386static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
387 struct extent_state *prealloc, u64 split)
388{
389 struct rb_node *node;
390 prealloc->start = orig->start;
391 prealloc->end = split - 1;
392 prealloc->state = orig->state;
393 orig->start = split;
394
395 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
396 if (node) {
397 struct extent_state *found;
398 found = rb_entry(node, struct extent_state, rb_node);
399 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
400 free_extent_state(prealloc);
401 return -EEXIST;
402 }
70dec807 403 prealloc->tree = tree;
d1310b2e
CM
404 return 0;
405}
406
407/*
408 * utility function to clear some bits in an extent state struct.
409 * it will optionally wake up any one waiting on this state (wake == 1), or
410 * forcibly remove the state from the tree (delete == 1).
411 *
412 * If no bits are set on the state struct after clearing things, the
413 * struct is freed and removed from the tree
414 */
415static int clear_state_bit(struct extent_io_tree *tree,
416 struct extent_state *state, int bits, int wake,
417 int delete)
418{
419 int ret = state->state & bits;
420
421 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
422 u64 range = state->end - state->start + 1;
423 WARN_ON(range > tree->dirty_bytes);
424 tree->dirty_bytes -= range;
425 }
291d673e 426 clear_state_cb(tree, state, bits);
b0c68f8b 427 state->state &= ~bits;
d1310b2e
CM
428 if (wake)
429 wake_up(&state->wq);
430 if (delete || state->state == 0) {
70dec807 431 if (state->tree) {
ae9d1285 432 clear_state_cb(tree, state, state->state);
d1310b2e 433 rb_erase(&state->rb_node, &tree->state);
70dec807 434 state->tree = NULL;
d1310b2e
CM
435 free_extent_state(state);
436 } else {
437 WARN_ON(1);
438 }
439 } else {
440 merge_state(tree, state);
441 }
442 return ret;
443}
444
445/*
446 * clear some bits on a range in the tree. This may require splitting
447 * or inserting elements in the tree, so the gfp mask is used to
448 * indicate which allocations or sleeping are allowed.
449 *
450 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
451 * the given range from the tree regardless of state (ie for truncate).
452 *
453 * the range [start, end] is inclusive.
454 *
455 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
456 * bits were already set, or zero if none of the bits were already set.
457 */
458int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
459 int bits, int wake, int delete, gfp_t mask)
460{
461 struct extent_state *state;
462 struct extent_state *prealloc = NULL;
463 struct rb_node *node;
464 unsigned long flags;
465 int err;
466 int set = 0;
467
468again:
469 if (!prealloc && (mask & __GFP_WAIT)) {
470 prealloc = alloc_extent_state(mask);
471 if (!prealloc)
472 return -ENOMEM;
473 }
474
70dec807 475 spin_lock_irqsave(&tree->lock, flags);
d1310b2e
CM
476 /*
477 * this search will find the extents that end after
478 * our range starts
479 */
80ea96b1 480 node = tree_search(tree, start);
d1310b2e
CM
481 if (!node)
482 goto out;
483 state = rb_entry(node, struct extent_state, rb_node);
484 if (state->start > end)
485 goto out;
486 WARN_ON(state->end < start);
487
488 /*
489 * | ---- desired range ---- |
490 * | state | or
491 * | ------------- state -------------- |
492 *
493 * We need to split the extent we found, and may flip
494 * bits on second half.
495 *
496 * If the extent we found extends past our range, we
497 * just split and search again. It'll get split again
498 * the next time though.
499 *
500 * If the extent we found is inside our range, we clear
501 * the desired bit on it.
502 */
503
504 if (state->start < start) {
70dec807
CM
505 if (!prealloc)
506 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
507 err = split_state(tree, state, prealloc, start);
508 BUG_ON(err == -EEXIST);
509 prealloc = NULL;
510 if (err)
511 goto out;
512 if (state->end <= end) {
513 start = state->end + 1;
514 set |= clear_state_bit(tree, state, bits,
515 wake, delete);
516 } else {
517 start = state->start;
518 }
519 goto search_again;
520 }
521 /*
522 * | ---- desired range ---- |
523 * | state |
524 * We need to split the extent, and clear the bit
525 * on the first half
526 */
527 if (state->start <= end && state->end > end) {
70dec807
CM
528 if (!prealloc)
529 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
530 err = split_state(tree, state, prealloc, end + 1);
531 BUG_ON(err == -EEXIST);
532
533 if (wake)
534 wake_up(&state->wq);
535 set |= clear_state_bit(tree, prealloc, bits,
536 wake, delete);
537 prealloc = NULL;
538 goto out;
539 }
540
541 start = state->end + 1;
542 set |= clear_state_bit(tree, state, bits, wake, delete);
543 goto search_again;
544
545out:
70dec807 546 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
547 if (prealloc)
548 free_extent_state(prealloc);
549
550 return set;
551
552search_again:
553 if (start > end)
554 goto out;
70dec807 555 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
556 if (mask & __GFP_WAIT)
557 cond_resched();
558 goto again;
559}
560EXPORT_SYMBOL(clear_extent_bit);
561
562static int wait_on_state(struct extent_io_tree *tree,
563 struct extent_state *state)
564{
565 DEFINE_WAIT(wait);
566 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
70dec807 567 spin_unlock_irq(&tree->lock);
d1310b2e 568 schedule();
70dec807 569 spin_lock_irq(&tree->lock);
d1310b2e
CM
570 finish_wait(&state->wq, &wait);
571 return 0;
572}
573
574/*
575 * waits for one or more bits to clear on a range in the state tree.
576 * The range [start, end] is inclusive.
577 * The tree lock is taken by this function
578 */
579int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
580{
581 struct extent_state *state;
582 struct rb_node *node;
583
70dec807 584 spin_lock_irq(&tree->lock);
d1310b2e
CM
585again:
586 while (1) {
587 /*
588 * this search will find all the extents that end after
589 * our range starts
590 */
80ea96b1 591 node = tree_search(tree, start);
d1310b2e
CM
592 if (!node)
593 break;
594
595 state = rb_entry(node, struct extent_state, rb_node);
596
597 if (state->start > end)
598 goto out;
599
600 if (state->state & bits) {
601 start = state->start;
602 atomic_inc(&state->refs);
603 wait_on_state(tree, state);
604 free_extent_state(state);
605 goto again;
606 }
607 start = state->end + 1;
608
609 if (start > end)
610 break;
611
612 if (need_resched()) {
70dec807 613 spin_unlock_irq(&tree->lock);
d1310b2e 614 cond_resched();
70dec807 615 spin_lock_irq(&tree->lock);
d1310b2e
CM
616 }
617 }
618out:
70dec807 619 spin_unlock_irq(&tree->lock);
d1310b2e
CM
620 return 0;
621}
622EXPORT_SYMBOL(wait_extent_bit);
623
624static void set_state_bits(struct extent_io_tree *tree,
625 struct extent_state *state,
626 int bits)
627{
628 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
629 u64 range = state->end - state->start + 1;
630 tree->dirty_bytes += range;
631 }
291d673e 632 set_state_cb(tree, state, bits);
b0c68f8b 633 state->state |= bits;
d1310b2e
CM
634}
635
636/*
637 * set some bits on a range in the tree. This may require allocations
638 * or sleeping, so the gfp mask is used to indicate what is allowed.
639 *
640 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
641 * range already has the desired bits set. The start of the existing
642 * range is returned in failed_start in this case.
643 *
644 * [start, end] is inclusive
645 * This takes the tree lock.
646 */
647int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
648 int exclusive, u64 *failed_start, gfp_t mask)
649{
650 struct extent_state *state;
651 struct extent_state *prealloc = NULL;
652 struct rb_node *node;
653 unsigned long flags;
654 int err = 0;
655 int set;
656 u64 last_start;
657 u64 last_end;
658again:
659 if (!prealloc && (mask & __GFP_WAIT)) {
660 prealloc = alloc_extent_state(mask);
661 if (!prealloc)
662 return -ENOMEM;
663 }
664
70dec807 665 spin_lock_irqsave(&tree->lock, flags);
d1310b2e
CM
666 /*
667 * this search will find all the extents that end after
668 * our range starts.
669 */
80ea96b1 670 node = tree_search(tree, start);
d1310b2e
CM
671 if (!node) {
672 err = insert_state(tree, prealloc, start, end, bits);
673 prealloc = NULL;
674 BUG_ON(err == -EEXIST);
675 goto out;
676 }
677
678 state = rb_entry(node, struct extent_state, rb_node);
679 last_start = state->start;
680 last_end = state->end;
681
682 /*
683 * | ---- desired range ---- |
684 * | state |
685 *
686 * Just lock what we found and keep going
687 */
688 if (state->start == start && state->end <= end) {
689 set = state->state & bits;
690 if (set && exclusive) {
691 *failed_start = state->start;
692 err = -EEXIST;
693 goto out;
694 }
695 set_state_bits(tree, state, bits);
696 start = state->end + 1;
697 merge_state(tree, state);
698 goto search_again;
699 }
700
701 /*
702 * | ---- desired range ---- |
703 * | state |
704 * or
705 * | ------------- state -------------- |
706 *
707 * We need to split the extent we found, and may flip bits on
708 * second half.
709 *
710 * If the extent we found extends past our
711 * range, we just split and search again. It'll get split
712 * again the next time though.
713 *
714 * If the extent we found is inside our range, we set the
715 * desired bit on it.
716 */
717 if (state->start < start) {
718 set = state->state & bits;
719 if (exclusive && set) {
720 *failed_start = start;
721 err = -EEXIST;
722 goto out;
723 }
724 err = split_state(tree, state, prealloc, start);
725 BUG_ON(err == -EEXIST);
726 prealloc = NULL;
727 if (err)
728 goto out;
729 if (state->end <= end) {
730 set_state_bits(tree, state, bits);
731 start = state->end + 1;
732 merge_state(tree, state);
733 } else {
734 start = state->start;
735 }
736 goto search_again;
737 }
738 /*
739 * | ---- desired range ---- |
740 * | state | or | state |
741 *
742 * There's a hole, we need to insert something in it and
743 * ignore the extent we found.
744 */
745 if (state->start > start) {
746 u64 this_end;
747 if (end < last_start)
748 this_end = end;
749 else
750 this_end = last_start -1;
751 err = insert_state(tree, prealloc, start, this_end,
752 bits);
753 prealloc = NULL;
754 BUG_ON(err == -EEXIST);
755 if (err)
756 goto out;
757 start = this_end + 1;
758 goto search_again;
759 }
760 /*
761 * | ---- desired range ---- |
762 * | state |
763 * We need to split the extent, and set the bit
764 * on the first half
765 */
766 if (state->start <= end && state->end > end) {
767 set = state->state & bits;
768 if (exclusive && set) {
769 *failed_start = start;
770 err = -EEXIST;
771 goto out;
772 }
773 err = split_state(tree, state, prealloc, end + 1);
774 BUG_ON(err == -EEXIST);
775
776 set_state_bits(tree, prealloc, bits);
777 merge_state(tree, prealloc);
778 prealloc = NULL;
779 goto out;
780 }
781
782 goto search_again;
783
784out:
70dec807 785 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
786 if (prealloc)
787 free_extent_state(prealloc);
788
789 return err;
790
791search_again:
792 if (start > end)
793 goto out;
70dec807 794 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
795 if (mask & __GFP_WAIT)
796 cond_resched();
797 goto again;
798}
799EXPORT_SYMBOL(set_extent_bit);
800
801/* wrappers around set/clear extent bit */
802int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
803 gfp_t mask)
804{
805 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
806 mask);
807}
808EXPORT_SYMBOL(set_extent_dirty);
809
e6dcd2dc
CM
810int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
811 gfp_t mask)
812{
813 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
814}
815EXPORT_SYMBOL(set_extent_ordered);
816
d1310b2e
CM
817int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
818 int bits, gfp_t mask)
819{
820 return set_extent_bit(tree, start, end, bits, 0, NULL,
821 mask);
822}
823EXPORT_SYMBOL(set_extent_bits);
824
825int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
826 int bits, gfp_t mask)
827{
828 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
829}
830EXPORT_SYMBOL(clear_extent_bits);
831
832int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
833 gfp_t mask)
834{
835 return set_extent_bit(tree, start, end,
e6dcd2dc
CM
836 EXTENT_DELALLOC | EXTENT_DIRTY,
837 0, NULL, mask);
d1310b2e
CM
838}
839EXPORT_SYMBOL(set_extent_delalloc);
840
841int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
842 gfp_t mask)
843{
844 return clear_extent_bit(tree, start, end,
845 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
846}
847EXPORT_SYMBOL(clear_extent_dirty);
848
e6dcd2dc
CM
849int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
850 gfp_t mask)
851{
852 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
853}
854EXPORT_SYMBOL(clear_extent_ordered);
855
d1310b2e
CM
856int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
857 gfp_t mask)
858{
859 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
860 mask);
861}
862EXPORT_SYMBOL(set_extent_new);
863
864int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
865 gfp_t mask)
866{
867 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
868}
869EXPORT_SYMBOL(clear_extent_new);
870
871int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
872 gfp_t mask)
873{
874 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
875 mask);
876}
877EXPORT_SYMBOL(set_extent_uptodate);
878
879int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
880 gfp_t mask)
881{
882 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
883}
884EXPORT_SYMBOL(clear_extent_uptodate);
885
886int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
887 gfp_t mask)
888{
889 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
890 0, NULL, mask);
891}
892EXPORT_SYMBOL(set_extent_writeback);
893
894int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
895 gfp_t mask)
896{
897 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
898}
899EXPORT_SYMBOL(clear_extent_writeback);
900
901int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
902{
903 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
904}
905EXPORT_SYMBOL(wait_on_extent_writeback);
906
d1310b2e
CM
907int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
908{
909 int err;
910 u64 failed_start;
911 while (1) {
912 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
913 &failed_start, mask);
914 if (err == -EEXIST && (mask & __GFP_WAIT)) {
915 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
916 start = failed_start;
917 } else {
918 break;
919 }
920 WARN_ON(start > end);
921 }
922 return err;
923}
924EXPORT_SYMBOL(lock_extent);
925
926int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
927 gfp_t mask)
928{
929 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
930}
931EXPORT_SYMBOL(unlock_extent);
932
933/*
934 * helper function to set pages and extents in the tree dirty
935 */
936int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
937{
938 unsigned long index = start >> PAGE_CACHE_SHIFT;
939 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
940 struct page *page;
941
942 while (index <= end_index) {
943 page = find_get_page(tree->mapping, index);
944 BUG_ON(!page);
945 __set_page_dirty_nobuffers(page);
946 page_cache_release(page);
947 index++;
948 }
949 set_extent_dirty(tree, start, end, GFP_NOFS);
950 return 0;
951}
952EXPORT_SYMBOL(set_range_dirty);
953
954/*
955 * helper function to set both pages and extents in the tree writeback
956 */
957int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
958{
959 unsigned long index = start >> PAGE_CACHE_SHIFT;
960 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
961 struct page *page;
962
963 while (index <= end_index) {
964 page = find_get_page(tree->mapping, index);
965 BUG_ON(!page);
966 set_page_writeback(page);
967 page_cache_release(page);
968 index++;
969 }
970 set_extent_writeback(tree, start, end, GFP_NOFS);
971 return 0;
972}
973EXPORT_SYMBOL(set_range_writeback);
974
975int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
976 u64 *start_ret, u64 *end_ret, int bits)
977{
978 struct rb_node *node;
979 struct extent_state *state;
980 int ret = 1;
981
70dec807 982 spin_lock_irq(&tree->lock);
d1310b2e
CM
983 /*
984 * this search will find all the extents that end after
985 * our range starts.
986 */
80ea96b1 987 node = tree_search(tree, start);
2b114d1d 988 if (!node) {
d1310b2e
CM
989 goto out;
990 }
991
992 while(1) {
993 state = rb_entry(node, struct extent_state, rb_node);
994 if (state->end >= start && (state->state & bits)) {
995 *start_ret = state->start;
996 *end_ret = state->end;
997 ret = 0;
998 break;
999 }
1000 node = rb_next(node);
1001 if (!node)
1002 break;
1003 }
1004out:
70dec807 1005 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1006 return ret;
1007}
1008EXPORT_SYMBOL(find_first_extent_bit);
1009
d7fc640e
CM
1010struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1011 u64 start, int bits)
1012{
1013 struct rb_node *node;
1014 struct extent_state *state;
1015
1016 /*
1017 * this search will find all the extents that end after
1018 * our range starts.
1019 */
1020 node = tree_search(tree, start);
2b114d1d 1021 if (!node) {
d7fc640e
CM
1022 goto out;
1023 }
1024
1025 while(1) {
1026 state = rb_entry(node, struct extent_state, rb_node);
1027 if (state->end >= start && (state->state & bits)) {
1028 return state;
1029 }
1030 node = rb_next(node);
1031 if (!node)
1032 break;
1033 }
1034out:
1035 return NULL;
1036}
1037EXPORT_SYMBOL(find_first_extent_bit_state);
1038
d1310b2e
CM
1039u64 find_lock_delalloc_range(struct extent_io_tree *tree,
1040 u64 *start, u64 *end, u64 max_bytes)
1041{
1042 struct rb_node *node;
1043 struct extent_state *state;
1044 u64 cur_start = *start;
1045 u64 found = 0;
1046 u64 total_bytes = 0;
1047
70dec807 1048 spin_lock_irq(&tree->lock);
d1310b2e
CM
1049 /*
1050 * this search will find all the extents that end after
1051 * our range starts.
1052 */
1053search_again:
80ea96b1 1054 node = tree_search(tree, cur_start);
2b114d1d 1055 if (!node) {
3b951516
CM
1056 if (!found)
1057 *end = (u64)-1;
d1310b2e
CM
1058 goto out;
1059 }
1060
1061 while(1) {
1062 state = rb_entry(node, struct extent_state, rb_node);
1063 if (found && state->start != cur_start) {
1064 goto out;
1065 }
1066 if (!(state->state & EXTENT_DELALLOC)) {
1067 if (!found)
1068 *end = state->end;
1069 goto out;
1070 }
1071 if (!found) {
1072 struct extent_state *prev_state;
1073 struct rb_node *prev_node = node;
1074 while(1) {
1075 prev_node = rb_prev(prev_node);
1076 if (!prev_node)
1077 break;
1078 prev_state = rb_entry(prev_node,
1079 struct extent_state,
1080 rb_node);
1081 if (!(prev_state->state & EXTENT_DELALLOC))
1082 break;
1083 state = prev_state;
1084 node = prev_node;
1085 }
1086 }
1087 if (state->state & EXTENT_LOCKED) {
1088 DEFINE_WAIT(wait);
1089 atomic_inc(&state->refs);
1090 prepare_to_wait(&state->wq, &wait,
1091 TASK_UNINTERRUPTIBLE);
70dec807 1092 spin_unlock_irq(&tree->lock);
d1310b2e 1093 schedule();
70dec807 1094 spin_lock_irq(&tree->lock);
d1310b2e
CM
1095 finish_wait(&state->wq, &wait);
1096 free_extent_state(state);
1097 goto search_again;
1098 }
291d673e 1099 set_state_cb(tree, state, EXTENT_LOCKED);
b0c68f8b 1100 state->state |= EXTENT_LOCKED;
d1310b2e
CM
1101 if (!found)
1102 *start = state->start;
1103 found++;
1104 *end = state->end;
1105 cur_start = state->end + 1;
1106 node = rb_next(node);
1107 if (!node)
1108 break;
1109 total_bytes += state->end - state->start + 1;
1110 if (total_bytes >= max_bytes)
1111 break;
1112 }
1113out:
70dec807 1114 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1115 return found;
1116}
1117
1118u64 count_range_bits(struct extent_io_tree *tree,
1119 u64 *start, u64 search_end, u64 max_bytes,
1120 unsigned long bits)
1121{
1122 struct rb_node *node;
1123 struct extent_state *state;
1124 u64 cur_start = *start;
1125 u64 total_bytes = 0;
1126 int found = 0;
1127
1128 if (search_end <= cur_start) {
1129 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1130 WARN_ON(1);
1131 return 0;
1132 }
1133
70dec807 1134 spin_lock_irq(&tree->lock);
d1310b2e
CM
1135 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1136 total_bytes = tree->dirty_bytes;
1137 goto out;
1138 }
1139 /*
1140 * this search will find all the extents that end after
1141 * our range starts.
1142 */
80ea96b1 1143 node = tree_search(tree, cur_start);
2b114d1d 1144 if (!node) {
d1310b2e
CM
1145 goto out;
1146 }
1147
1148 while(1) {
1149 state = rb_entry(node, struct extent_state, rb_node);
1150 if (state->start > search_end)
1151 break;
1152 if (state->end >= cur_start && (state->state & bits)) {
1153 total_bytes += min(search_end, state->end) + 1 -
1154 max(cur_start, state->start);
1155 if (total_bytes >= max_bytes)
1156 break;
1157 if (!found) {
1158 *start = state->start;
1159 found = 1;
1160 }
1161 }
1162 node = rb_next(node);
1163 if (!node)
1164 break;
1165 }
1166out:
70dec807 1167 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1168 return total_bytes;
1169}
1170/*
1171 * helper function to lock both pages and extents in the tree.
1172 * pages must be locked first.
1173 */
1174int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1175{
1176 unsigned long index = start >> PAGE_CACHE_SHIFT;
1177 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1178 struct page *page;
1179 int err;
1180
1181 while (index <= end_index) {
1182 page = grab_cache_page(tree->mapping, index);
1183 if (!page) {
1184 err = -ENOMEM;
1185 goto failed;
1186 }
1187 if (IS_ERR(page)) {
1188 err = PTR_ERR(page);
1189 goto failed;
1190 }
1191 index++;
1192 }
1193 lock_extent(tree, start, end, GFP_NOFS);
1194 return 0;
1195
1196failed:
1197 /*
1198 * we failed above in getting the page at 'index', so we undo here
1199 * up to but not including the page at 'index'
1200 */
1201 end_index = index;
1202 index = start >> PAGE_CACHE_SHIFT;
1203 while (index < end_index) {
1204 page = find_get_page(tree->mapping, index);
1205 unlock_page(page);
1206 page_cache_release(page);
1207 index++;
1208 }
1209 return err;
1210}
1211EXPORT_SYMBOL(lock_range);
1212
1213/*
1214 * helper function to unlock both pages and extents in the tree.
1215 */
1216int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1217{
1218 unsigned long index = start >> PAGE_CACHE_SHIFT;
1219 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1220 struct page *page;
1221
1222 while (index <= end_index) {
1223 page = find_get_page(tree->mapping, index);
1224 unlock_page(page);
1225 page_cache_release(page);
1226 index++;
1227 }
1228 unlock_extent(tree, start, end, GFP_NOFS);
1229 return 0;
1230}
1231EXPORT_SYMBOL(unlock_range);
1232
1233int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1234{
1235 struct rb_node *node;
1236 struct extent_state *state;
1237 int ret = 0;
1238
70dec807 1239 spin_lock_irq(&tree->lock);
d1310b2e
CM
1240 /*
1241 * this search will find all the extents that end after
1242 * our range starts.
1243 */
80ea96b1 1244 node = tree_search(tree, start);
2b114d1d 1245 if (!node) {
d1310b2e
CM
1246 ret = -ENOENT;
1247 goto out;
1248 }
1249 state = rb_entry(node, struct extent_state, rb_node);
1250 if (state->start != start) {
1251 ret = -ENOENT;
1252 goto out;
1253 }
1254 state->private = private;
1255out:
70dec807 1256 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1257 return ret;
1258}
1259
1260int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1261{
1262 struct rb_node *node;
1263 struct extent_state *state;
1264 int ret = 0;
1265
70dec807 1266 spin_lock_irq(&tree->lock);
d1310b2e
CM
1267 /*
1268 * this search will find all the extents that end after
1269 * our range starts.
1270 */
80ea96b1 1271 node = tree_search(tree, start);
2b114d1d 1272 if (!node) {
d1310b2e
CM
1273 ret = -ENOENT;
1274 goto out;
1275 }
1276 state = rb_entry(node, struct extent_state, rb_node);
1277 if (state->start != start) {
1278 ret = -ENOENT;
1279 goto out;
1280 }
1281 *private = state->private;
1282out:
70dec807 1283 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1284 return ret;
1285}
1286
1287/*
1288 * searches a range in the state tree for a given mask.
70dec807 1289 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1290 * has the bits set. Otherwise, 1 is returned if any bit in the
1291 * range is found set.
1292 */
1293int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1294 int bits, int filled)
1295{
1296 struct extent_state *state = NULL;
1297 struct rb_node *node;
1298 int bitset = 0;
1299 unsigned long flags;
1300
70dec807 1301 spin_lock_irqsave(&tree->lock, flags);
80ea96b1 1302 node = tree_search(tree, start);
d1310b2e
CM
1303 while (node && start <= end) {
1304 state = rb_entry(node, struct extent_state, rb_node);
1305
1306 if (filled && state->start > start) {
1307 bitset = 0;
1308 break;
1309 }
1310
1311 if (state->start > end)
1312 break;
1313
1314 if (state->state & bits) {
1315 bitset = 1;
1316 if (!filled)
1317 break;
1318 } else if (filled) {
1319 bitset = 0;
1320 break;
1321 }
1322 start = state->end + 1;
1323 if (start > end)
1324 break;
1325 node = rb_next(node);
1326 if (!node) {
1327 if (filled)
1328 bitset = 0;
1329 break;
1330 }
1331 }
70dec807 1332 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
1333 return bitset;
1334}
1335EXPORT_SYMBOL(test_range_bit);
1336
1337/*
1338 * helper function to set a given page up to date if all the
1339 * extents in the tree for that page are up to date
1340 */
1341static int check_page_uptodate(struct extent_io_tree *tree,
1342 struct page *page)
1343{
1344 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1345 u64 end = start + PAGE_CACHE_SIZE - 1;
1346 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1347 SetPageUptodate(page);
1348 return 0;
1349}
1350
1351/*
1352 * helper function to unlock a page if all the extents in the tree
1353 * for that page are unlocked
1354 */
1355static int check_page_locked(struct extent_io_tree *tree,
1356 struct page *page)
1357{
1358 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1359 u64 end = start + PAGE_CACHE_SIZE - 1;
1360 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1361 unlock_page(page);
1362 return 0;
1363}
1364
1365/*
1366 * helper function to end page writeback if all the extents
1367 * in the tree for that page are done with writeback
1368 */
1369static int check_page_writeback(struct extent_io_tree *tree,
1370 struct page *page)
1371{
1372 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1373 u64 end = start + PAGE_CACHE_SIZE - 1;
1374 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1375 end_page_writeback(page);
1376 return 0;
1377}
1378
1379/* lots and lots of room for performance fixes in the end_bio funcs */
1380
1381/*
1382 * after a writepage IO is done, we need to:
1383 * clear the uptodate bits on error
1384 * clear the writeback bits in the extent tree for this IO
1385 * end_page_writeback if the page has no more pending IO
1386 *
1387 * Scheduling is not allowed, so the extent state tree is expected
1388 * to have one and only one object corresponding to this IO.
1389 */
1390#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1391static void end_bio_extent_writepage(struct bio *bio, int err)
1392#else
1393static int end_bio_extent_writepage(struct bio *bio,
1394 unsigned int bytes_done, int err)
1395#endif
1396{
1259ab75 1397 int uptodate = err == 0;
d1310b2e 1398 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1399 struct extent_io_tree *tree;
d1310b2e
CM
1400 u64 start;
1401 u64 end;
1402 int whole_page;
1259ab75 1403 int ret;
d1310b2e
CM
1404
1405#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1406 if (bio->bi_size)
1407 return 1;
1408#endif
d1310b2e
CM
1409 do {
1410 struct page *page = bvec->bv_page;
902b22f3
DW
1411 tree = &BTRFS_I(page->mapping->host)->io_tree;
1412
d1310b2e
CM
1413 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1414 bvec->bv_offset;
1415 end = start + bvec->bv_len - 1;
1416
1417 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1418 whole_page = 1;
1419 else
1420 whole_page = 0;
1421
1422 if (--bvec >= bio->bi_io_vec)
1423 prefetchw(&bvec->bv_page->flags);
1259ab75
CM
1424 if (tree->ops && tree->ops->writepage_end_io_hook) {
1425 ret = tree->ops->writepage_end_io_hook(page, start,
902b22f3 1426 end, NULL, uptodate);
1259ab75
CM
1427 if (ret)
1428 uptodate = 0;
1429 }
1430
1431 if (!uptodate && tree->ops &&
1432 tree->ops->writepage_io_failed_hook) {
1433 ret = tree->ops->writepage_io_failed_hook(bio, page,
902b22f3 1434 start, end, NULL);
1259ab75 1435 if (ret == 0) {
1259ab75
CM
1436 uptodate = (err == 0);
1437 continue;
1438 }
1439 }
1440
d1310b2e
CM
1441 if (!uptodate) {
1442 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1443 ClearPageUptodate(page);
1444 SetPageError(page);
1445 }
70dec807 1446
902b22f3 1447 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
d1310b2e
CM
1448
1449 if (whole_page)
1450 end_page_writeback(page);
1451 else
1452 check_page_writeback(tree, page);
d1310b2e 1453 } while (bvec >= bio->bi_io_vec);
d1310b2e
CM
1454 bio_put(bio);
1455#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1456 return 0;
1457#endif
1458}
1459
1460/*
1461 * after a readpage IO is done, we need to:
1462 * clear the uptodate bits on error
1463 * set the uptodate bits if things worked
1464 * set the page up to date if all extents in the tree are uptodate
1465 * clear the lock bit in the extent tree
1466 * unlock the page if there are no other extents locked for it
1467 *
1468 * Scheduling is not allowed, so the extent state tree is expected
1469 * to have one and only one object corresponding to this IO.
1470 */
1471#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1472static void end_bio_extent_readpage(struct bio *bio, int err)
1473#else
1474static int end_bio_extent_readpage(struct bio *bio,
1475 unsigned int bytes_done, int err)
1476#endif
1477{
1478 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1479 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1480 struct extent_io_tree *tree;
d1310b2e
CM
1481 u64 start;
1482 u64 end;
1483 int whole_page;
1484 int ret;
1485
1486#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1487 if (bio->bi_size)
1488 return 1;
1489#endif
1490
1491 do {
1492 struct page *page = bvec->bv_page;
902b22f3
DW
1493 tree = &BTRFS_I(page->mapping->host)->io_tree;
1494
d1310b2e
CM
1495 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1496 bvec->bv_offset;
1497 end = start + bvec->bv_len - 1;
1498
1499 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1500 whole_page = 1;
1501 else
1502 whole_page = 0;
1503
1504 if (--bvec >= bio->bi_io_vec)
1505 prefetchw(&bvec->bv_page->flags);
1506
1507 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
70dec807 1508 ret = tree->ops->readpage_end_io_hook(page, start, end,
902b22f3 1509 NULL);
d1310b2e
CM
1510 if (ret)
1511 uptodate = 0;
1512 }
7e38326f
CM
1513 if (!uptodate && tree->ops &&
1514 tree->ops->readpage_io_failed_hook) {
1515 ret = tree->ops->readpage_io_failed_hook(bio, page,
902b22f3 1516 start, end, NULL);
7e38326f 1517 if (ret == 0) {
3b951516
CM
1518 uptodate =
1519 test_bit(BIO_UPTODATE, &bio->bi_flags);
7e38326f
CM
1520 continue;
1521 }
1522 }
d1310b2e 1523
902b22f3
DW
1524 if (uptodate)
1525 set_extent_uptodate(tree, start, end,
1526 GFP_ATOMIC);
1527 unlock_extent(tree, start, end, GFP_ATOMIC);
d1310b2e 1528
70dec807
CM
1529 if (whole_page) {
1530 if (uptodate) {
1531 SetPageUptodate(page);
1532 } else {
1533 ClearPageUptodate(page);
1534 SetPageError(page);
1535 }
d1310b2e 1536 unlock_page(page);
70dec807
CM
1537 } else {
1538 if (uptodate) {
1539 check_page_uptodate(tree, page);
1540 } else {
1541 ClearPageUptodate(page);
1542 SetPageError(page);
1543 }
d1310b2e 1544 check_page_locked(tree, page);
70dec807 1545 }
d1310b2e
CM
1546 } while (bvec >= bio->bi_io_vec);
1547
1548 bio_put(bio);
1549#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1550 return 0;
1551#endif
1552}
1553
1554/*
1555 * IO done from prepare_write is pretty simple, we just unlock
1556 * the structs in the extent tree when done, and set the uptodate bits
1557 * as appropriate.
1558 */
1559#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1560static void end_bio_extent_preparewrite(struct bio *bio, int err)
1561#else
1562static int end_bio_extent_preparewrite(struct bio *bio,
1563 unsigned int bytes_done, int err)
1564#endif
1565{
1566 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1567 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1568 struct extent_io_tree *tree;
d1310b2e
CM
1569 u64 start;
1570 u64 end;
1571
1572#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1573 if (bio->bi_size)
1574 return 1;
1575#endif
1576
1577 do {
1578 struct page *page = bvec->bv_page;
902b22f3
DW
1579 tree = &BTRFS_I(page->mapping->host)->io_tree;
1580
d1310b2e
CM
1581 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1582 bvec->bv_offset;
1583 end = start + bvec->bv_len - 1;
1584
1585 if (--bvec >= bio->bi_io_vec)
1586 prefetchw(&bvec->bv_page->flags);
1587
1588 if (uptodate) {
1589 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1590 } else {
1591 ClearPageUptodate(page);
1592 SetPageError(page);
1593 }
1594
1595 unlock_extent(tree, start, end, GFP_ATOMIC);
1596
1597 } while (bvec >= bio->bi_io_vec);
1598
1599 bio_put(bio);
1600#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1601 return 0;
1602#endif
1603}
1604
1605static struct bio *
1606extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1607 gfp_t gfp_flags)
1608{
1609 struct bio *bio;
1610
1611 bio = bio_alloc(gfp_flags, nr_vecs);
1612
1613 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1614 while (!bio && (nr_vecs /= 2))
1615 bio = bio_alloc(gfp_flags, nr_vecs);
1616 }
1617
1618 if (bio) {
e1c4b745 1619 bio->bi_size = 0;
d1310b2e
CM
1620 bio->bi_bdev = bdev;
1621 bio->bi_sector = first_sector;
1622 }
1623 return bio;
1624}
1625
f188591e 1626static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
d1310b2e 1627{
d1310b2e 1628 int ret = 0;
70dec807
CM
1629 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1630 struct page *page = bvec->bv_page;
1631 struct extent_io_tree *tree = bio->bi_private;
1632 struct rb_node *node;
1633 struct extent_state *state;
1634 u64 start;
1635 u64 end;
1636
1637 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1638 end = start + bvec->bv_len - 1;
1639
1640 spin_lock_irq(&tree->lock);
80ea96b1 1641 node = __etree_search(tree, start, NULL, NULL);
70dec807
CM
1642 BUG_ON(!node);
1643 state = rb_entry(node, struct extent_state, rb_node);
1644 while(state->end < end) {
1645 node = rb_next(node);
1646 state = rb_entry(node, struct extent_state, rb_node);
1647 }
1648 BUG_ON(state->end != end);
1649 spin_unlock_irq(&tree->lock);
1650
902b22f3 1651 bio->bi_private = NULL;
d1310b2e
CM
1652
1653 bio_get(bio);
1654
065631f6 1655 if (tree->ops && tree->ops->submit_bio_hook)
f188591e
CM
1656 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1657 mirror_num);
0b86a832
CM
1658 else
1659 submit_bio(rw, bio);
d1310b2e
CM
1660 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1661 ret = -EOPNOTSUPP;
1662 bio_put(bio);
1663 return ret;
1664}
1665
1666static int submit_extent_page(int rw, struct extent_io_tree *tree,
1667 struct page *page, sector_t sector,
1668 size_t size, unsigned long offset,
1669 struct block_device *bdev,
1670 struct bio **bio_ret,
1671 unsigned long max_pages,
f188591e
CM
1672 bio_end_io_t end_io_func,
1673 int mirror_num)
d1310b2e
CM
1674{
1675 int ret = 0;
1676 struct bio *bio;
1677 int nr;
1678
1679 if (bio_ret && *bio_ret) {
1680 bio = *bio_ret;
1681 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
239b14b3
CM
1682 (tree->ops && tree->ops->merge_bio_hook &&
1683 tree->ops->merge_bio_hook(page, offset, size, bio)) ||
d1310b2e 1684 bio_add_page(bio, page, size, offset) < size) {
f188591e 1685 ret = submit_one_bio(rw, bio, mirror_num);
d1310b2e
CM
1686 bio = NULL;
1687 } else {
1688 return 0;
1689 }
1690 }
961d0232 1691 nr = bio_get_nr_vecs(bdev);
d1310b2e
CM
1692 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1693 if (!bio) {
1694 printk("failed to allocate bio nr %d\n", nr);
1695 }
70dec807
CM
1696
1697
d1310b2e
CM
1698 bio_add_page(bio, page, size, offset);
1699 bio->bi_end_io = end_io_func;
1700 bio->bi_private = tree;
70dec807 1701
d1310b2e
CM
1702 if (bio_ret) {
1703 *bio_ret = bio;
1704 } else {
f188591e 1705 ret = submit_one_bio(rw, bio, mirror_num);
d1310b2e
CM
1706 }
1707
1708 return ret;
1709}
1710
1711void set_page_extent_mapped(struct page *page)
1712{
1713 if (!PagePrivate(page)) {
1714 SetPagePrivate(page);
d1310b2e 1715 page_cache_get(page);
6af118ce 1716 set_page_private(page, EXTENT_PAGE_PRIVATE);
d1310b2e
CM
1717 }
1718}
1719
1720void set_page_extent_head(struct page *page, unsigned long len)
1721{
1722 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1723}
1724
1725/*
1726 * basic readpage implementation. Locked extent state structs are inserted
1727 * into the tree that are removed when the IO is done (by the end_io
1728 * handlers)
1729 */
1730static int __extent_read_full_page(struct extent_io_tree *tree,
1731 struct page *page,
1732 get_extent_t *get_extent,
f188591e 1733 struct bio **bio, int mirror_num)
d1310b2e
CM
1734{
1735 struct inode *inode = page->mapping->host;
1736 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1737 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1738 u64 end;
1739 u64 cur = start;
1740 u64 extent_offset;
1741 u64 last_byte = i_size_read(inode);
1742 u64 block_start;
1743 u64 cur_end;
1744 sector_t sector;
1745 struct extent_map *em;
1746 struct block_device *bdev;
1747 int ret;
1748 int nr = 0;
1749 size_t page_offset = 0;
1750 size_t iosize;
1751 size_t blocksize = inode->i_sb->s_blocksize;
1752
1753 set_page_extent_mapped(page);
1754
1755 end = page_end;
1756 lock_extent(tree, start, end, GFP_NOFS);
1757
1758 while (cur <= end) {
1759 if (cur >= last_byte) {
1760 char *userpage;
1761 iosize = PAGE_CACHE_SIZE - page_offset;
1762 userpage = kmap_atomic(page, KM_USER0);
1763 memset(userpage + page_offset, 0, iosize);
1764 flush_dcache_page(page);
1765 kunmap_atomic(userpage, KM_USER0);
1766 set_extent_uptodate(tree, cur, cur + iosize - 1,
1767 GFP_NOFS);
1768 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1769 break;
1770 }
1771 em = get_extent(inode, page, page_offset, cur,
1772 end - cur + 1, 0);
1773 if (IS_ERR(em) || !em) {
1774 SetPageError(page);
1775 unlock_extent(tree, cur, end, GFP_NOFS);
1776 break;
1777 }
d1310b2e 1778 extent_offset = cur - em->start;
e6dcd2dc
CM
1779 if (extent_map_end(em) <= cur) {
1780printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1781 }
d1310b2e 1782 BUG_ON(extent_map_end(em) <= cur);
e6dcd2dc
CM
1783 if (end < cur) {
1784printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1785 }
d1310b2e
CM
1786 BUG_ON(end < cur);
1787
1788 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1789 cur_end = min(extent_map_end(em) - 1, end);
1790 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1791 sector = (em->block_start + extent_offset) >> 9;
1792 bdev = em->bdev;
1793 block_start = em->block_start;
1794 free_extent_map(em);
1795 em = NULL;
1796
1797 /* we've found a hole, just zero and go on */
1798 if (block_start == EXTENT_MAP_HOLE) {
1799 char *userpage;
1800 userpage = kmap_atomic(page, KM_USER0);
1801 memset(userpage + page_offset, 0, iosize);
1802 flush_dcache_page(page);
1803 kunmap_atomic(userpage, KM_USER0);
1804
1805 set_extent_uptodate(tree, cur, cur + iosize - 1,
1806 GFP_NOFS);
1807 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1808 cur = cur + iosize;
1809 page_offset += iosize;
1810 continue;
1811 }
1812 /* the get_extent function already copied into the page */
1813 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1814 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1815 cur = cur + iosize;
1816 page_offset += iosize;
1817 continue;
1818 }
70dec807
CM
1819 /* we have an inline extent but it didn't get marked up
1820 * to date. Error out
1821 */
1822 if (block_start == EXTENT_MAP_INLINE) {
1823 SetPageError(page);
1824 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1825 cur = cur + iosize;
1826 page_offset += iosize;
1827 continue;
1828 }
d1310b2e
CM
1829
1830 ret = 0;
1831 if (tree->ops && tree->ops->readpage_io_hook) {
1832 ret = tree->ops->readpage_io_hook(page, cur,
1833 cur + iosize - 1);
1834 }
1835 if (!ret) {
89642229
CM
1836 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1837 pnr -= page->index;
d1310b2e
CM
1838 ret = submit_extent_page(READ, tree, page,
1839 sector, iosize, page_offset,
89642229 1840 bdev, bio, pnr,
f188591e 1841 end_bio_extent_readpage, mirror_num);
89642229 1842 nr++;
d1310b2e
CM
1843 }
1844 if (ret)
1845 SetPageError(page);
1846 cur = cur + iosize;
1847 page_offset += iosize;
d1310b2e
CM
1848 }
1849 if (!nr) {
1850 if (!PageError(page))
1851 SetPageUptodate(page);
1852 unlock_page(page);
1853 }
1854 return 0;
1855}
1856
1857int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1858 get_extent_t *get_extent)
1859{
1860 struct bio *bio = NULL;
1861 int ret;
1862
f188591e 1863 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
d1310b2e 1864 if (bio)
f188591e 1865 submit_one_bio(READ, bio, 0);
d1310b2e
CM
1866 return ret;
1867}
1868EXPORT_SYMBOL(extent_read_full_page);
1869
1870/*
1871 * the writepage semantics are similar to regular writepage. extent
1872 * records are inserted to lock ranges in the tree, and as dirty areas
1873 * are found, they are marked writeback. Then the lock bits are removed
1874 * and the end_io handler clears the writeback ranges
1875 */
1876static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1877 void *data)
1878{
1879 struct inode *inode = page->mapping->host;
1880 struct extent_page_data *epd = data;
1881 struct extent_io_tree *tree = epd->tree;
1882 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1883 u64 delalloc_start;
1884 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1885 u64 end;
1886 u64 cur = start;
1887 u64 extent_offset;
1888 u64 last_byte = i_size_read(inode);
1889 u64 block_start;
1890 u64 iosize;
e6dcd2dc 1891 u64 unlock_start;
d1310b2e
CM
1892 sector_t sector;
1893 struct extent_map *em;
1894 struct block_device *bdev;
1895 int ret;
1896 int nr = 0;
7f3c74fb 1897 size_t pg_offset = 0;
d1310b2e
CM
1898 size_t blocksize;
1899 loff_t i_size = i_size_read(inode);
1900 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1901 u64 nr_delalloc;
1902 u64 delalloc_end;
1903
1904 WARN_ON(!PageLocked(page));
7f3c74fb 1905 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
211c17f5 1906 if (page->index > end_index ||
7f3c74fb 1907 (page->index == end_index && !pg_offset)) {
211c17f5 1908 page->mapping->a_ops->invalidatepage(page, 0);
d1310b2e
CM
1909 unlock_page(page);
1910 return 0;
1911 }
1912
1913 if (page->index == end_index) {
1914 char *userpage;
1915
d1310b2e 1916 userpage = kmap_atomic(page, KM_USER0);
7f3c74fb
CM
1917 memset(userpage + pg_offset, 0,
1918 PAGE_CACHE_SIZE - pg_offset);
d1310b2e 1919 kunmap_atomic(userpage, KM_USER0);
211c17f5 1920 flush_dcache_page(page);
d1310b2e 1921 }
7f3c74fb 1922 pg_offset = 0;
d1310b2e
CM
1923
1924 set_page_extent_mapped(page);
1925
1926 delalloc_start = start;
1927 delalloc_end = 0;
1928 while(delalloc_end < page_end) {
1929 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1930 &delalloc_end,
1931 128 * 1024 * 1024);
1932 if (nr_delalloc == 0) {
1933 delalloc_start = delalloc_end + 1;
1934 continue;
1935 }
1936 tree->ops->fill_delalloc(inode, delalloc_start,
1937 delalloc_end);
1938 clear_extent_bit(tree, delalloc_start,
1939 delalloc_end,
1940 EXTENT_LOCKED | EXTENT_DELALLOC,
1941 1, 0, GFP_NOFS);
1942 delalloc_start = delalloc_end + 1;
1943 }
1944 lock_extent(tree, start, page_end, GFP_NOFS);
e6dcd2dc 1945 unlock_start = start;
d1310b2e 1946
247e743c
CM
1947 if (tree->ops && tree->ops->writepage_start_hook) {
1948 ret = tree->ops->writepage_start_hook(page, start, page_end);
1949 if (ret == -EAGAIN) {
1950 unlock_extent(tree, start, page_end, GFP_NOFS);
1951 redirty_page_for_writepage(wbc, page);
1952 unlock_page(page);
1953 return 0;
1954 }
1955 }
1956
d1310b2e
CM
1957 end = page_end;
1958 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1959 printk("found delalloc bits after lock_extent\n");
1960 }
1961
1962 if (last_byte <= start) {
1963 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
e6dcd2dc
CM
1964 unlock_extent(tree, start, page_end, GFP_NOFS);
1965 if (tree->ops && tree->ops->writepage_end_io_hook)
1966 tree->ops->writepage_end_io_hook(page, start,
1967 page_end, NULL, 1);
1968 unlock_start = page_end + 1;
d1310b2e
CM
1969 goto done;
1970 }
1971
1972 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1973 blocksize = inode->i_sb->s_blocksize;
1974
1975 while (cur <= end) {
1976 if (cur >= last_byte) {
1977 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
e6dcd2dc
CM
1978 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
1979 if (tree->ops && tree->ops->writepage_end_io_hook)
1980 tree->ops->writepage_end_io_hook(page, cur,
1981 page_end, NULL, 1);
1982 unlock_start = page_end + 1;
d1310b2e
CM
1983 break;
1984 }
7f3c74fb 1985 em = epd->get_extent(inode, page, pg_offset, cur,
d1310b2e
CM
1986 end - cur + 1, 1);
1987 if (IS_ERR(em) || !em) {
1988 SetPageError(page);
1989 break;
1990 }
1991
1992 extent_offset = cur - em->start;
1993 BUG_ON(extent_map_end(em) <= cur);
1994 BUG_ON(end < cur);
1995 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1996 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1997 sector = (em->block_start + extent_offset) >> 9;
1998 bdev = em->bdev;
1999 block_start = em->block_start;
2000 free_extent_map(em);
2001 em = NULL;
2002
2003 if (block_start == EXTENT_MAP_HOLE ||
2004 block_start == EXTENT_MAP_INLINE) {
2005 clear_extent_dirty(tree, cur,
2006 cur + iosize - 1, GFP_NOFS);
e6dcd2dc
CM
2007
2008 unlock_extent(tree, unlock_start, cur + iosize -1,
2009 GFP_NOFS);
7f3c74fb 2010
e6dcd2dc
CM
2011 if (tree->ops && tree->ops->writepage_end_io_hook)
2012 tree->ops->writepage_end_io_hook(page, cur,
2013 cur + iosize - 1,
2014 NULL, 1);
d1310b2e 2015 cur = cur + iosize;
7f3c74fb 2016 pg_offset += iosize;
e6dcd2dc 2017 unlock_start = cur;
d1310b2e
CM
2018 continue;
2019 }
2020
2021 /* leave this out until we have a page_mkwrite call */
2022 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2023 EXTENT_DIRTY, 0)) {
2024 cur = cur + iosize;
7f3c74fb 2025 pg_offset += iosize;
d1310b2e
CM
2026 continue;
2027 }
2028 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2029 if (tree->ops && tree->ops->writepage_io_hook) {
2030 ret = tree->ops->writepage_io_hook(page, cur,
2031 cur + iosize - 1);
2032 } else {
2033 ret = 0;
2034 }
1259ab75 2035 if (ret) {
d1310b2e 2036 SetPageError(page);
1259ab75 2037 } else {
d1310b2e 2038 unsigned long max_nr = end_index + 1;
7f3c74fb 2039
d1310b2e
CM
2040 set_range_writeback(tree, cur, cur + iosize - 1);
2041 if (!PageWriteback(page)) {
2042 printk("warning page %lu not writeback, "
2043 "cur %llu end %llu\n", page->index,
2044 (unsigned long long)cur,
2045 (unsigned long long)end);
2046 }
2047
2048 ret = submit_extent_page(WRITE, tree, page, sector,
7f3c74fb 2049 iosize, pg_offset, bdev,
d1310b2e 2050 &epd->bio, max_nr,
f188591e 2051 end_bio_extent_writepage, 0);
d1310b2e
CM
2052 if (ret)
2053 SetPageError(page);
2054 }
2055 cur = cur + iosize;
7f3c74fb 2056 pg_offset += iosize;
d1310b2e
CM
2057 nr++;
2058 }
2059done:
2060 if (nr == 0) {
2061 /* make sure the mapping tag for page dirty gets cleared */
2062 set_page_writeback(page);
2063 end_page_writeback(page);
2064 }
e6dcd2dc
CM
2065 if (unlock_start <= page_end)
2066 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
d1310b2e
CM
2067 unlock_page(page);
2068 return 0;
2069}
2070
5e478dc9 2071#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
d1310b2e
CM
2072/* Taken directly from 2.6.23 for 2.6.18 back port */
2073typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2074 void *data);
2075
2076/**
2077 * write_cache_pages - walk the list of dirty pages of the given address space
2078 * and write all of them.
2079 * @mapping: address space structure to write
2080 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2081 * @writepage: function called for each page
2082 * @data: data passed to writepage function
2083 *
2084 * If a page is already under I/O, write_cache_pages() skips it, even
2085 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2086 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2087 * and msync() need to guarantee that all the data which was dirty at the time
2088 * the call was made get new I/O started against them. If wbc->sync_mode is
2089 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2090 * existing IO to complete.
2091 */
2092static int write_cache_pages(struct address_space *mapping,
2093 struct writeback_control *wbc, writepage_t writepage,
2094 void *data)
2095{
2096 struct backing_dev_info *bdi = mapping->backing_dev_info;
2097 int ret = 0;
2098 int done = 0;
2099 struct pagevec pvec;
2100 int nr_pages;
2101 pgoff_t index;
2102 pgoff_t end; /* Inclusive */
2103 int scanned = 0;
2104 int range_whole = 0;
2105
2106 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2107 wbc->encountered_congestion = 1;
2108 return 0;
2109 }
2110
2111 pagevec_init(&pvec, 0);
2112 if (wbc->range_cyclic) {
2113 index = mapping->writeback_index; /* Start from prev offset */
2114 end = -1;
2115 } else {
2116 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2117 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2118 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2119 range_whole = 1;
2120 scanned = 1;
2121 }
2122retry:
2123 while (!done && (index <= end) &&
2124 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2125 PAGECACHE_TAG_DIRTY,
2126 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2127 unsigned i;
2128
2129 scanned = 1;
2130 for (i = 0; i < nr_pages; i++) {
2131 struct page *page = pvec.pages[i];
2132
2133 /*
2134 * At this point we hold neither mapping->tree_lock nor
2135 * lock on the page itself: the page may be truncated or
2136 * invalidated (changing page->mapping to NULL), or even
2137 * swizzled back from swapper_space to tmpfs file
2138 * mapping
2139 */
2140 lock_page(page);
2141
2142 if (unlikely(page->mapping != mapping)) {
2143 unlock_page(page);
2144 continue;
2145 }
2146
2147 if (!wbc->range_cyclic && page->index > end) {
2148 done = 1;
2149 unlock_page(page);
2150 continue;
2151 }
2152
2153 if (wbc->sync_mode != WB_SYNC_NONE)
2154 wait_on_page_writeback(page);
2155
2156 if (PageWriteback(page) ||
2157 !clear_page_dirty_for_io(page)) {
2158 unlock_page(page);
2159 continue;
2160 }
2161
2162 ret = (*writepage)(page, wbc, data);
2163
2164 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2165 unlock_page(page);
2166 ret = 0;
2167 }
2168 if (ret || (--(wbc->nr_to_write) <= 0))
2169 done = 1;
2170 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2171 wbc->encountered_congestion = 1;
2172 done = 1;
2173 }
2174 }
2175 pagevec_release(&pvec);
2176 cond_resched();
2177 }
2178 if (!scanned && !done) {
2179 /*
2180 * We hit the last page and there is more work to be done: wrap
2181 * back to the start of the file
2182 */
2183 scanned = 1;
2184 index = 0;
2185 goto retry;
2186 }
2187 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2188 mapping->writeback_index = index;
2189 return ret;
2190}
2191#endif
2192
2193int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2194 get_extent_t *get_extent,
2195 struct writeback_control *wbc)
2196{
2197 int ret;
2198 struct address_space *mapping = page->mapping;
2199 struct extent_page_data epd = {
2200 .bio = NULL,
2201 .tree = tree,
2202 .get_extent = get_extent,
2203 };
2204 struct writeback_control wbc_writepages = {
2205 .bdi = wbc->bdi,
2206 .sync_mode = WB_SYNC_NONE,
2207 .older_than_this = NULL,
2208 .nr_to_write = 64,
2209 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2210 .range_end = (loff_t)-1,
2211 };
2212
2213
2214 ret = __extent_writepage(page, wbc, &epd);
2215
2216 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2217 if (epd.bio) {
f188591e 2218 submit_one_bio(WRITE, epd.bio, 0);
d1310b2e
CM
2219 }
2220 return ret;
2221}
2222EXPORT_SYMBOL(extent_write_full_page);
2223
2224
2225int extent_writepages(struct extent_io_tree *tree,
2226 struct address_space *mapping,
2227 get_extent_t *get_extent,
2228 struct writeback_control *wbc)
2229{
2230 int ret = 0;
2231 struct extent_page_data epd = {
2232 .bio = NULL,
2233 .tree = tree,
2234 .get_extent = get_extent,
2235 };
2236
2237 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2238 if (epd.bio) {
f188591e 2239 submit_one_bio(WRITE, epd.bio, 0);
d1310b2e
CM
2240 }
2241 return ret;
2242}
2243EXPORT_SYMBOL(extent_writepages);
2244
2245int extent_readpages(struct extent_io_tree *tree,
2246 struct address_space *mapping,
2247 struct list_head *pages, unsigned nr_pages,
2248 get_extent_t get_extent)
2249{
2250 struct bio *bio = NULL;
2251 unsigned page_idx;
2252 struct pagevec pvec;
2253
2254 pagevec_init(&pvec, 0);
2255 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2256 struct page *page = list_entry(pages->prev, struct page, lru);
2257
2258 prefetchw(&page->flags);
2259 list_del(&page->lru);
2260 /*
2261 * what we want to do here is call add_to_page_cache_lru,
2262 * but that isn't exported, so we reproduce it here
2263 */
2264 if (!add_to_page_cache(page, mapping,
2265 page->index, GFP_KERNEL)) {
2266
2267 /* open coding of lru_cache_add, also not exported */
2268 page_cache_get(page);
2269 if (!pagevec_add(&pvec, page))
2270 __pagevec_lru_add(&pvec);
f188591e
CM
2271 __extent_read_full_page(tree, page, get_extent,
2272 &bio, 0);
d1310b2e
CM
2273 }
2274 page_cache_release(page);
2275 }
2276 if (pagevec_count(&pvec))
2277 __pagevec_lru_add(&pvec);
2278 BUG_ON(!list_empty(pages));
2279 if (bio)
f188591e 2280 submit_one_bio(READ, bio, 0);
d1310b2e
CM
2281 return 0;
2282}
2283EXPORT_SYMBOL(extent_readpages);
2284
2285/*
2286 * basic invalidatepage code, this waits on any locked or writeback
2287 * ranges corresponding to the page, and then deletes any extent state
2288 * records from the tree
2289 */
2290int extent_invalidatepage(struct extent_io_tree *tree,
2291 struct page *page, unsigned long offset)
2292{
2293 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2294 u64 end = start + PAGE_CACHE_SIZE - 1;
2295 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2296
2297 start += (offset + blocksize -1) & ~(blocksize - 1);
2298 if (start > end)
2299 return 0;
2300
2301 lock_extent(tree, start, end, GFP_NOFS);
2302 wait_on_extent_writeback(tree, start, end);
2303 clear_extent_bit(tree, start, end,
2304 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2305 1, 1, GFP_NOFS);
2306 return 0;
2307}
2308EXPORT_SYMBOL(extent_invalidatepage);
2309
2310/*
2311 * simple commit_write call, set_range_dirty is used to mark both
2312 * the pages and the extent records as dirty
2313 */
2314int extent_commit_write(struct extent_io_tree *tree,
2315 struct inode *inode, struct page *page,
2316 unsigned from, unsigned to)
2317{
2318 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2319
2320 set_page_extent_mapped(page);
2321 set_page_dirty(page);
2322
2323 if (pos > inode->i_size) {
2324 i_size_write(inode, pos);
2325 mark_inode_dirty(inode);
2326 }
2327 return 0;
2328}
2329EXPORT_SYMBOL(extent_commit_write);
2330
2331int extent_prepare_write(struct extent_io_tree *tree,
2332 struct inode *inode, struct page *page,
2333 unsigned from, unsigned to, get_extent_t *get_extent)
2334{
2335 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2336 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2337 u64 block_start;
2338 u64 orig_block_start;
2339 u64 block_end;
2340 u64 cur_end;
2341 struct extent_map *em;
2342 unsigned blocksize = 1 << inode->i_blkbits;
2343 size_t page_offset = 0;
2344 size_t block_off_start;
2345 size_t block_off_end;
2346 int err = 0;
2347 int iocount = 0;
2348 int ret = 0;
2349 int isnew;
2350
2351 set_page_extent_mapped(page);
2352
2353 block_start = (page_start + from) & ~((u64)blocksize - 1);
2354 block_end = (page_start + to - 1) | (blocksize - 1);
2355 orig_block_start = block_start;
2356
2357 lock_extent(tree, page_start, page_end, GFP_NOFS);
2358 while(block_start <= block_end) {
2359 em = get_extent(inode, page, page_offset, block_start,
2360 block_end - block_start + 1, 1);
2361 if (IS_ERR(em) || !em) {
2362 goto err;
2363 }
2364 cur_end = min(block_end, extent_map_end(em) - 1);
2365 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2366 block_off_end = block_off_start + blocksize;
2367 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2368
2369 if (!PageUptodate(page) && isnew &&
2370 (block_off_end > to || block_off_start < from)) {
2371 void *kaddr;
2372
2373 kaddr = kmap_atomic(page, KM_USER0);
2374 if (block_off_end > to)
2375 memset(kaddr + to, 0, block_off_end - to);
2376 if (block_off_start < from)
2377 memset(kaddr + block_off_start, 0,
2378 from - block_off_start);
2379 flush_dcache_page(page);
2380 kunmap_atomic(kaddr, KM_USER0);
2381 }
2382 if ((em->block_start != EXTENT_MAP_HOLE &&
2383 em->block_start != EXTENT_MAP_INLINE) &&
2384 !isnew && !PageUptodate(page) &&
2385 (block_off_end > to || block_off_start < from) &&
2386 !test_range_bit(tree, block_start, cur_end,
2387 EXTENT_UPTODATE, 1)) {
2388 u64 sector;
2389 u64 extent_offset = block_start - em->start;
2390 size_t iosize;
2391 sector = (em->block_start + extent_offset) >> 9;
2392 iosize = (cur_end - block_start + blocksize) &
2393 ~((u64)blocksize - 1);
2394 /*
2395 * we've already got the extent locked, but we
2396 * need to split the state such that our end_bio
2397 * handler can clear the lock.
2398 */
2399 set_extent_bit(tree, block_start,
2400 block_start + iosize - 1,
2401 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2402 ret = submit_extent_page(READ, tree, page,
2403 sector, iosize, page_offset, em->bdev,
2404 NULL, 1,
f188591e 2405 end_bio_extent_preparewrite, 0);
d1310b2e
CM
2406 iocount++;
2407 block_start = block_start + iosize;
2408 } else {
2409 set_extent_uptodate(tree, block_start, cur_end,
2410 GFP_NOFS);
2411 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2412 block_start = cur_end + 1;
2413 }
2414 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2415 free_extent_map(em);
2416 }
2417 if (iocount) {
2418 wait_extent_bit(tree, orig_block_start,
2419 block_end, EXTENT_LOCKED);
2420 }
2421 check_page_uptodate(tree, page);
2422err:
2423 /* FIXME, zero out newly allocated blocks on error */
2424 return err;
2425}
2426EXPORT_SYMBOL(extent_prepare_write);
2427
7b13b7b1
CM
2428/*
2429 * a helper for releasepage, this tests for areas of the page that
2430 * are locked or under IO and drops the related state bits if it is safe
2431 * to drop the page.
2432 */
2433int try_release_extent_state(struct extent_map_tree *map,
2434 struct extent_io_tree *tree, struct page *page,
2435 gfp_t mask)
2436{
2437 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2438 u64 end = start + PAGE_CACHE_SIZE - 1;
2439 int ret = 1;
2440
211f90e6
CM
2441 if (test_range_bit(tree, start, end,
2442 EXTENT_IOBITS | EXTENT_ORDERED, 0))
7b13b7b1
CM
2443 ret = 0;
2444 else {
2445 if ((mask & GFP_NOFS) == GFP_NOFS)
2446 mask = GFP_NOFS;
2447 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2448 1, 1, mask);
2449 }
2450 return ret;
2451}
2452EXPORT_SYMBOL(try_release_extent_state);
2453
d1310b2e
CM
2454/*
2455 * a helper for releasepage. As long as there are no locked extents
2456 * in the range corresponding to the page, both state records and extent
2457 * map records are removed
2458 */
2459int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
2460 struct extent_io_tree *tree, struct page *page,
2461 gfp_t mask)
d1310b2e
CM
2462{
2463 struct extent_map *em;
2464 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2465 u64 end = start + PAGE_CACHE_SIZE - 1;
7b13b7b1 2466
70dec807
CM
2467 if ((mask & __GFP_WAIT) &&
2468 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 2469 u64 len;
70dec807 2470 while (start <= end) {
39b5637f 2471 len = end - start + 1;
70dec807 2472 spin_lock(&map->lock);
39b5637f 2473 em = lookup_extent_mapping(map, start, len);
70dec807
CM
2474 if (!em || IS_ERR(em)) {
2475 spin_unlock(&map->lock);
2476 break;
2477 }
7f3c74fb
CM
2478 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2479 em->start != start) {
70dec807
CM
2480 spin_unlock(&map->lock);
2481 free_extent_map(em);
2482 break;
2483 }
2484 if (!test_range_bit(tree, em->start,
2485 extent_map_end(em) - 1,
2486 EXTENT_LOCKED, 0)) {
2487 remove_extent_mapping(map, em);
2488 /* once for the rb tree */
2489 free_extent_map(em);
2490 }
2491 start = extent_map_end(em);
d1310b2e 2492 spin_unlock(&map->lock);
70dec807
CM
2493
2494 /* once for us */
d1310b2e
CM
2495 free_extent_map(em);
2496 }
d1310b2e 2497 }
7b13b7b1 2498 return try_release_extent_state(map, tree, page, mask);
d1310b2e
CM
2499}
2500EXPORT_SYMBOL(try_release_extent_mapping);
2501
2502sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2503 get_extent_t *get_extent)
2504{
2505 struct inode *inode = mapping->host;
2506 u64 start = iblock << inode->i_blkbits;
2507 sector_t sector = 0;
2508 struct extent_map *em;
2509
2510 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2511 if (!em || IS_ERR(em))
2512 return 0;
2513
2514 if (em->block_start == EXTENT_MAP_INLINE ||
2515 em->block_start == EXTENT_MAP_HOLE)
2516 goto out;
2517
2518 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
d1310b2e
CM
2519out:
2520 free_extent_map(em);
2521 return sector;
2522}
2523
d1310b2e
CM
2524static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2525 unsigned long i)
2526{
2527 struct page *p;
2528 struct address_space *mapping;
2529
2530 if (i == 0)
2531 return eb->first_page;
2532 i += eb->start >> PAGE_CACHE_SHIFT;
2533 mapping = eb->first_page->mapping;
33958dc6
CM
2534 if (!mapping)
2535 return NULL;
0ee0fda0
SW
2536
2537 /*
2538 * extent_buffer_page is only called after pinning the page
2539 * by increasing the reference count. So we know the page must
2540 * be in the radix tree.
2541 */
2542#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2543 rcu_read_lock();
2544#else
d1310b2e 2545 read_lock_irq(&mapping->tree_lock);
0ee0fda0 2546#endif
d1310b2e 2547 p = radix_tree_lookup(&mapping->page_tree, i);
0ee0fda0
SW
2548
2549#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2550 rcu_read_unlock();
2551#else
d1310b2e 2552 read_unlock_irq(&mapping->tree_lock);
0ee0fda0 2553#endif
d1310b2e
CM
2554 return p;
2555}
2556
6af118ce 2557static inline unsigned long num_extent_pages(u64 start, u64 len)
728131d8 2558{
6af118ce
CM
2559 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2560 (start >> PAGE_CACHE_SHIFT);
728131d8
CM
2561}
2562
d1310b2e
CM
2563static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2564 u64 start,
2565 unsigned long len,
2566 gfp_t mask)
2567{
2568 struct extent_buffer *eb = NULL;
2d2ae547 2569 unsigned long flags;
d1310b2e 2570
d1310b2e 2571 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
d1310b2e
CM
2572 eb->start = start;
2573 eb->len = len;
a61e6f29 2574 mutex_init(&eb->mutex);
2d2ae547
CM
2575 spin_lock_irqsave(&leak_lock, flags);
2576 list_add(&eb->leak_list, &buffers);
2577 spin_unlock_irqrestore(&leak_lock, flags);
d1310b2e
CM
2578 atomic_set(&eb->refs, 1);
2579
2580 return eb;
2581}
2582
2583static void __free_extent_buffer(struct extent_buffer *eb)
2584{
2d2ae547
CM
2585 unsigned long flags;
2586 spin_lock_irqsave(&leak_lock, flags);
2587 list_del(&eb->leak_list);
2588 spin_unlock_irqrestore(&leak_lock, flags);
d1310b2e
CM
2589 kmem_cache_free(extent_buffer_cache, eb);
2590}
2591
2592struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2593 u64 start, unsigned long len,
2594 struct page *page0,
2595 gfp_t mask)
2596{
2597 unsigned long num_pages = num_extent_pages(start, len);
2598 unsigned long i;
2599 unsigned long index = start >> PAGE_CACHE_SHIFT;
2600 struct extent_buffer *eb;
6af118ce 2601 struct extent_buffer *exists = NULL;
d1310b2e
CM
2602 struct page *p;
2603 struct address_space *mapping = tree->mapping;
2604 int uptodate = 1;
2605
6af118ce
CM
2606 spin_lock(&tree->buffer_lock);
2607 eb = buffer_search(tree, start);
2608 if (eb) {
2609 atomic_inc(&eb->refs);
2610 spin_unlock(&tree->buffer_lock);
2611 return eb;
2612 }
2613 spin_unlock(&tree->buffer_lock);
2614
d1310b2e 2615 eb = __alloc_extent_buffer(tree, start, len, mask);
2b114d1d 2616 if (!eb)
d1310b2e
CM
2617 return NULL;
2618
d1310b2e
CM
2619 if (page0) {
2620 eb->first_page = page0;
2621 i = 1;
2622 index++;
2623 page_cache_get(page0);
2624 mark_page_accessed(page0);
2625 set_page_extent_mapped(page0);
d1310b2e 2626 set_page_extent_head(page0, len);
f188591e 2627 uptodate = PageUptodate(page0);
d1310b2e
CM
2628 } else {
2629 i = 0;
2630 }
2631 for (; i < num_pages; i++, index++) {
2632 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2633 if (!p) {
2634 WARN_ON(1);
6af118ce 2635 goto free_eb;
d1310b2e
CM
2636 }
2637 set_page_extent_mapped(p);
2638 mark_page_accessed(p);
2639 if (i == 0) {
2640 eb->first_page = p;
2641 set_page_extent_head(p, len);
2642 } else {
2643 set_page_private(p, EXTENT_PAGE_PRIVATE);
2644 }
2645 if (!PageUptodate(p))
2646 uptodate = 0;
2647 unlock_page(p);
2648 }
2649 if (uptodate)
2650 eb->flags |= EXTENT_UPTODATE;
2651 eb->flags |= EXTENT_BUFFER_FILLED;
2652
6af118ce
CM
2653 spin_lock(&tree->buffer_lock);
2654 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2655 if (exists) {
2656 /* add one reference for the caller */
2657 atomic_inc(&exists->refs);
2658 spin_unlock(&tree->buffer_lock);
2659 goto free_eb;
2660 }
2661 spin_unlock(&tree->buffer_lock);
2662
2663 /* add one reference for the tree */
2664 atomic_inc(&eb->refs);
d1310b2e
CM
2665 return eb;
2666
6af118ce 2667free_eb:
d1310b2e 2668 if (!atomic_dec_and_test(&eb->refs))
6af118ce
CM
2669 return exists;
2670 for (index = 1; index < i; index++)
d1310b2e 2671 page_cache_release(extent_buffer_page(eb, index));
6af118ce 2672 page_cache_release(extent_buffer_page(eb, 0));
d1310b2e 2673 __free_extent_buffer(eb);
6af118ce 2674 return exists;
d1310b2e
CM
2675}
2676EXPORT_SYMBOL(alloc_extent_buffer);
2677
2678struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2679 u64 start, unsigned long len,
2680 gfp_t mask)
2681{
d1310b2e 2682 struct extent_buffer *eb;
d1310b2e 2683
6af118ce
CM
2684 spin_lock(&tree->buffer_lock);
2685 eb = buffer_search(tree, start);
2686 if (eb)
2687 atomic_inc(&eb->refs);
2688 spin_unlock(&tree->buffer_lock);
d1310b2e 2689
d1310b2e 2690 return eb;
d1310b2e
CM
2691}
2692EXPORT_SYMBOL(find_extent_buffer);
2693
2694void free_extent_buffer(struct extent_buffer *eb)
2695{
d1310b2e
CM
2696 if (!eb)
2697 return;
2698
2699 if (!atomic_dec_and_test(&eb->refs))
2700 return;
2701
6af118ce 2702 WARN_ON(1);
d1310b2e
CM
2703}
2704EXPORT_SYMBOL(free_extent_buffer);
2705
2706int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2707 struct extent_buffer *eb)
2708{
2709 int set;
2710 unsigned long i;
2711 unsigned long num_pages;
2712 struct page *page;
2713
2714 u64 start = eb->start;
2715 u64 end = start + eb->len - 1;
2716
2717 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2718 num_pages = num_extent_pages(eb->start, eb->len);
2719
2720 for (i = 0; i < num_pages; i++) {
2721 page = extent_buffer_page(eb, i);
a61e6f29 2722 lock_page(page);
d1310b2e
CM
2723 if (i == 0)
2724 set_page_extent_head(page, eb->len);
2725 else
2726 set_page_private(page, EXTENT_PAGE_PRIVATE);
2727
2728 /*
2729 * if we're on the last page or the first page and the
2730 * block isn't aligned on a page boundary, do extra checks
2731 * to make sure we don't clean page that is partially dirty
2732 */
2733 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2734 ((i == num_pages - 1) &&
2735 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2736 start = (u64)page->index << PAGE_CACHE_SHIFT;
2737 end = start + PAGE_CACHE_SIZE - 1;
2738 if (test_range_bit(tree, start, end,
2739 EXTENT_DIRTY, 0)) {
a61e6f29 2740 unlock_page(page);
d1310b2e
CM
2741 continue;
2742 }
2743 }
2744 clear_page_dirty_for_io(page);
0ee0fda0
SW
2745#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2746 spin_lock_irq(&page->mapping->tree_lock);
2747#else
70dec807 2748 read_lock_irq(&page->mapping->tree_lock);
0ee0fda0 2749#endif
d1310b2e
CM
2750 if (!PageDirty(page)) {
2751 radix_tree_tag_clear(&page->mapping->page_tree,
2752 page_index(page),
2753 PAGECACHE_TAG_DIRTY);
2754 }
0ee0fda0
SW
2755#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2756 spin_unlock_irq(&page->mapping->tree_lock);
2757#else
70dec807 2758 read_unlock_irq(&page->mapping->tree_lock);
0ee0fda0 2759#endif
a61e6f29 2760 unlock_page(page);
d1310b2e
CM
2761 }
2762 return 0;
2763}
2764EXPORT_SYMBOL(clear_extent_buffer_dirty);
2765
2766int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2767 struct extent_buffer *eb)
2768{
2769 return wait_on_extent_writeback(tree, eb->start,
2770 eb->start + eb->len - 1);
2771}
2772EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2773
2774int set_extent_buffer_dirty(struct extent_io_tree *tree,
2775 struct extent_buffer *eb)
2776{
2777 unsigned long i;
2778 unsigned long num_pages;
2779
2780 num_pages = num_extent_pages(eb->start, eb->len);
2781 for (i = 0; i < num_pages; i++) {
2782 struct page *page = extent_buffer_page(eb, i);
2783 /* writepage may need to do something special for the
2784 * first page, we have to make sure page->private is
2785 * properly set. releasepage may drop page->private
2786 * on us if the page isn't already dirty.
2787 */
2788 if (i == 0) {
a61e6f29 2789 lock_page(page);
d1310b2e
CM
2790 set_page_extent_head(page, eb->len);
2791 } else if (PagePrivate(page) &&
2792 page->private != EXTENT_PAGE_PRIVATE) {
a61e6f29 2793 lock_page(page);
d1310b2e 2794 set_page_extent_mapped(page);
a61e6f29 2795 unlock_page(page);
d1310b2e
CM
2796 }
2797 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
a61e6f29
CM
2798 if (i == 0)
2799 unlock_page(page);
d1310b2e
CM
2800 }
2801 return set_extent_dirty(tree, eb->start,
2802 eb->start + eb->len - 1, GFP_NOFS);
2803}
2804EXPORT_SYMBOL(set_extent_buffer_dirty);
2805
1259ab75
CM
2806int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2807 struct extent_buffer *eb)
2808{
2809 unsigned long i;
2810 struct page *page;
2811 unsigned long num_pages;
2812
2813 num_pages = num_extent_pages(eb->start, eb->len);
2814 eb->flags &= ~EXTENT_UPTODATE;
2815
2816 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2817 GFP_NOFS);
2818 for (i = 0; i < num_pages; i++) {
2819 page = extent_buffer_page(eb, i);
33958dc6
CM
2820 if (page)
2821 ClearPageUptodate(page);
1259ab75
CM
2822 }
2823 return 0;
2824}
2825
d1310b2e
CM
2826int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2827 struct extent_buffer *eb)
2828{
2829 unsigned long i;
2830 struct page *page;
2831 unsigned long num_pages;
2832
2833 num_pages = num_extent_pages(eb->start, eb->len);
2834
2835 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2836 GFP_NOFS);
2837 for (i = 0; i < num_pages; i++) {
2838 page = extent_buffer_page(eb, i);
2839 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2840 ((i == num_pages - 1) &&
2841 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2842 check_page_uptodate(tree, page);
2843 continue;
2844 }
2845 SetPageUptodate(page);
2846 }
2847 return 0;
2848}
2849EXPORT_SYMBOL(set_extent_buffer_uptodate);
2850
ce9adaa5
CM
2851int extent_range_uptodate(struct extent_io_tree *tree,
2852 u64 start, u64 end)
2853{
2854 struct page *page;
2855 int ret;
2856 int pg_uptodate = 1;
2857 int uptodate;
2858 unsigned long index;
2859
2860 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
2861 if (ret)
2862 return 1;
2863 while(start <= end) {
2864 index = start >> PAGE_CACHE_SHIFT;
2865 page = find_get_page(tree->mapping, index);
2866 uptodate = PageUptodate(page);
2867 page_cache_release(page);
2868 if (!uptodate) {
2869 pg_uptodate = 0;
2870 break;
2871 }
2872 start += PAGE_CACHE_SIZE;
2873 }
2874 return pg_uptodate;
2875}
2876
d1310b2e 2877int extent_buffer_uptodate(struct extent_io_tree *tree,
ce9adaa5 2878 struct extent_buffer *eb)
d1310b2e 2879{
728131d8 2880 int ret = 0;
ce9adaa5
CM
2881 unsigned long num_pages;
2882 unsigned long i;
728131d8
CM
2883 struct page *page;
2884 int pg_uptodate = 1;
2885
d1310b2e 2886 if (eb->flags & EXTENT_UPTODATE)
4235298e 2887 return 1;
728131d8 2888
4235298e 2889 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
d1310b2e 2890 EXTENT_UPTODATE, 1);
4235298e
CM
2891 if (ret)
2892 return ret;
728131d8
CM
2893
2894 num_pages = num_extent_pages(eb->start, eb->len);
2895 for (i = 0; i < num_pages; i++) {
2896 page = extent_buffer_page(eb, i);
2897 if (!PageUptodate(page)) {
2898 pg_uptodate = 0;
2899 break;
2900 }
2901 }
4235298e 2902 return pg_uptodate;
d1310b2e
CM
2903}
2904EXPORT_SYMBOL(extent_buffer_uptodate);
2905
2906int read_extent_buffer_pages(struct extent_io_tree *tree,
2907 struct extent_buffer *eb,
a86c12c7 2908 u64 start, int wait,
f188591e 2909 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
2910{
2911 unsigned long i;
2912 unsigned long start_i;
2913 struct page *page;
2914 int err;
2915 int ret = 0;
ce9adaa5
CM
2916 int locked_pages = 0;
2917 int all_uptodate = 1;
2918 int inc_all_pages = 0;
d1310b2e 2919 unsigned long num_pages;
a86c12c7
CM
2920 struct bio *bio = NULL;
2921
d1310b2e
CM
2922 if (eb->flags & EXTENT_UPTODATE)
2923 return 0;
2924
ce9adaa5 2925 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
d1310b2e
CM
2926 EXTENT_UPTODATE, 1)) {
2927 return 0;
2928 }
2929
2930 if (start) {
2931 WARN_ON(start < eb->start);
2932 start_i = (start >> PAGE_CACHE_SHIFT) -
2933 (eb->start >> PAGE_CACHE_SHIFT);
2934 } else {
2935 start_i = 0;
2936 }
2937
2938 num_pages = num_extent_pages(eb->start, eb->len);
2939 for (i = start_i; i < num_pages; i++) {
2940 page = extent_buffer_page(eb, i);
d1310b2e 2941 if (!wait) {
2db04966 2942 if (!trylock_page(page))
ce9adaa5 2943 goto unlock_exit;
d1310b2e
CM
2944 } else {
2945 lock_page(page);
2946 }
ce9adaa5 2947 locked_pages++;
d1310b2e 2948 if (!PageUptodate(page)) {
ce9adaa5
CM
2949 all_uptodate = 0;
2950 }
2951 }
2952 if (all_uptodate) {
2953 if (start_i == 0)
2954 eb->flags |= EXTENT_UPTODATE;
2955 goto unlock_exit;
2956 }
2957
2958 for (i = start_i; i < num_pages; i++) {
2959 page = extent_buffer_page(eb, i);
2960 if (inc_all_pages)
2961 page_cache_get(page);
2962 if (!PageUptodate(page)) {
2963 if (start_i == 0)
2964 inc_all_pages = 1;
f188591e 2965 ClearPageError(page);
a86c12c7 2966 err = __extent_read_full_page(tree, page,
f188591e
CM
2967 get_extent, &bio,
2968 mirror_num);
d1310b2e
CM
2969 if (err) {
2970 ret = err;
2971 }
2972 } else {
2973 unlock_page(page);
2974 }
2975 }
2976
a86c12c7 2977 if (bio)
f188591e 2978 submit_one_bio(READ, bio, mirror_num);
a86c12c7 2979
d1310b2e
CM
2980 if (ret || !wait) {
2981 return ret;
2982 }
d1310b2e
CM
2983 for (i = start_i; i < num_pages; i++) {
2984 page = extent_buffer_page(eb, i);
2985 wait_on_page_locked(page);
2986 if (!PageUptodate(page)) {
2987 ret = -EIO;
2988 }
2989 }
2990 if (!ret)
2991 eb->flags |= EXTENT_UPTODATE;
2992 return ret;
ce9adaa5
CM
2993
2994unlock_exit:
2995 i = start_i;
2996 while(locked_pages > 0) {
2997 page = extent_buffer_page(eb, i);
2998 i++;
2999 unlock_page(page);
3000 locked_pages--;
3001 }
3002 return ret;
d1310b2e
CM
3003}
3004EXPORT_SYMBOL(read_extent_buffer_pages);
3005
3006void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3007 unsigned long start,
3008 unsigned long len)
3009{
3010 size_t cur;
3011 size_t offset;
3012 struct page *page;
3013 char *kaddr;
3014 char *dst = (char *)dstv;
3015 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3016 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
3017
3018 WARN_ON(start > eb->len);
3019 WARN_ON(start + len > eb->start + eb->len);
3020
3021 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3022
3023 while(len > 0) {
3024 page = extent_buffer_page(eb, i);
d1310b2e
CM
3025
3026 cur = min(len, (PAGE_CACHE_SIZE - offset));
3027 kaddr = kmap_atomic(page, KM_USER1);
3028 memcpy(dst, kaddr + offset, cur);
3029 kunmap_atomic(kaddr, KM_USER1);
3030
3031 dst += cur;
3032 len -= cur;
3033 offset = 0;
3034 i++;
3035 }
3036}
3037EXPORT_SYMBOL(read_extent_buffer);
3038
3039int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3040 unsigned long min_len, char **token, char **map,
3041 unsigned long *map_start,
3042 unsigned long *map_len, int km)
3043{
3044 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3045 char *kaddr;
3046 struct page *p;
3047 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3048 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3049 unsigned long end_i = (start_offset + start + min_len - 1) >>
3050 PAGE_CACHE_SHIFT;
3051
3052 if (i != end_i)
3053 return -EINVAL;
3054
3055 if (i == 0) {
3056 offset = start_offset;
3057 *map_start = 0;
3058 } else {
3059 offset = 0;
3060 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3061 }
3062 if (start + min_len > eb->len) {
3063printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3064 WARN_ON(1);
3065 }
3066
3067 p = extent_buffer_page(eb, i);
d1310b2e
CM
3068 kaddr = kmap_atomic(p, km);
3069 *token = kaddr;
3070 *map = kaddr + offset;
3071 *map_len = PAGE_CACHE_SIZE - offset;
3072 return 0;
3073}
3074EXPORT_SYMBOL(map_private_extent_buffer);
3075
3076int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3077 unsigned long min_len,
3078 char **token, char **map,
3079 unsigned long *map_start,
3080 unsigned long *map_len, int km)
3081{
3082 int err;
3083 int save = 0;
3084 if (eb->map_token) {
3085 unmap_extent_buffer(eb, eb->map_token, km);
3086 eb->map_token = NULL;
3087 save = 1;
3088 }
3089 err = map_private_extent_buffer(eb, start, min_len, token, map,
3090 map_start, map_len, km);
3091 if (!err && save) {
3092 eb->map_token = *token;
3093 eb->kaddr = *map;
3094 eb->map_start = *map_start;
3095 eb->map_len = *map_len;
3096 }
3097 return err;
3098}
3099EXPORT_SYMBOL(map_extent_buffer);
3100
3101void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3102{
3103 kunmap_atomic(token, km);
3104}
3105EXPORT_SYMBOL(unmap_extent_buffer);
3106
3107int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3108 unsigned long start,
3109 unsigned long len)
3110{
3111 size_t cur;
3112 size_t offset;
3113 struct page *page;
3114 char *kaddr;
3115 char *ptr = (char *)ptrv;
3116 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3117 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3118 int ret = 0;
3119
3120 WARN_ON(start > eb->len);
3121 WARN_ON(start + len > eb->start + eb->len);
3122
3123 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3124
3125 while(len > 0) {
3126 page = extent_buffer_page(eb, i);
d1310b2e
CM
3127
3128 cur = min(len, (PAGE_CACHE_SIZE - offset));
3129
3130 kaddr = kmap_atomic(page, KM_USER0);
3131 ret = memcmp(ptr, kaddr + offset, cur);
3132 kunmap_atomic(kaddr, KM_USER0);
3133 if (ret)
3134 break;
3135
3136 ptr += cur;
3137 len -= cur;
3138 offset = 0;
3139 i++;
3140 }
3141 return ret;
3142}
3143EXPORT_SYMBOL(memcmp_extent_buffer);
3144
3145void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3146 unsigned long start, unsigned long len)
3147{
3148 size_t cur;
3149 size_t offset;
3150 struct page *page;
3151 char *kaddr;
3152 char *src = (char *)srcv;
3153 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3154 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3155
3156 WARN_ON(start > eb->len);
3157 WARN_ON(start + len > eb->start + eb->len);
3158
3159 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3160
3161 while(len > 0) {
3162 page = extent_buffer_page(eb, i);
3163 WARN_ON(!PageUptodate(page));
3164
3165 cur = min(len, PAGE_CACHE_SIZE - offset);
3166 kaddr = kmap_atomic(page, KM_USER1);
3167 memcpy(kaddr + offset, src, cur);
3168 kunmap_atomic(kaddr, KM_USER1);
3169
3170 src += cur;
3171 len -= cur;
3172 offset = 0;
3173 i++;
3174 }
3175}
3176EXPORT_SYMBOL(write_extent_buffer);
3177
3178void memset_extent_buffer(struct extent_buffer *eb, char c,
3179 unsigned long start, unsigned long len)
3180{
3181 size_t cur;
3182 size_t offset;
3183 struct page *page;
3184 char *kaddr;
3185 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3186 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3187
3188 WARN_ON(start > eb->len);
3189 WARN_ON(start + len > eb->start + eb->len);
3190
3191 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3192
3193 while(len > 0) {
3194 page = extent_buffer_page(eb, i);
3195 WARN_ON(!PageUptodate(page));
3196
3197 cur = min(len, PAGE_CACHE_SIZE - offset);
3198 kaddr = kmap_atomic(page, KM_USER0);
3199 memset(kaddr + offset, c, cur);
3200 kunmap_atomic(kaddr, KM_USER0);
3201
3202 len -= cur;
3203 offset = 0;
3204 i++;
3205 }
3206}
3207EXPORT_SYMBOL(memset_extent_buffer);
3208
3209void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3210 unsigned long dst_offset, unsigned long src_offset,
3211 unsigned long len)
3212{
3213 u64 dst_len = dst->len;
3214 size_t cur;
3215 size_t offset;
3216 struct page *page;
3217 char *kaddr;
3218 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3219 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3220
3221 WARN_ON(src->len != dst_len);
3222
3223 offset = (start_offset + dst_offset) &
3224 ((unsigned long)PAGE_CACHE_SIZE - 1);
3225
3226 while(len > 0) {
3227 page = extent_buffer_page(dst, i);
3228 WARN_ON(!PageUptodate(page));
3229
3230 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3231
3232 kaddr = kmap_atomic(page, KM_USER0);
3233 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3234 kunmap_atomic(kaddr, KM_USER0);
3235
3236 src_offset += cur;
3237 len -= cur;
3238 offset = 0;
3239 i++;
3240 }
3241}
3242EXPORT_SYMBOL(copy_extent_buffer);
3243
3244static void move_pages(struct page *dst_page, struct page *src_page,
3245 unsigned long dst_off, unsigned long src_off,
3246 unsigned long len)
3247{
3248 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3249 if (dst_page == src_page) {
3250 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3251 } else {
3252 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3253 char *p = dst_kaddr + dst_off + len;
3254 char *s = src_kaddr + src_off + len;
3255
3256 while (len--)
3257 *--p = *--s;
3258
3259 kunmap_atomic(src_kaddr, KM_USER1);
3260 }
3261 kunmap_atomic(dst_kaddr, KM_USER0);
3262}
3263
3264static void copy_pages(struct page *dst_page, struct page *src_page,
3265 unsigned long dst_off, unsigned long src_off,
3266 unsigned long len)
3267{
3268 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3269 char *src_kaddr;
3270
3271 if (dst_page != src_page)
3272 src_kaddr = kmap_atomic(src_page, KM_USER1);
3273 else
3274 src_kaddr = dst_kaddr;
3275
3276 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3277 kunmap_atomic(dst_kaddr, KM_USER0);
3278 if (dst_page != src_page)
3279 kunmap_atomic(src_kaddr, KM_USER1);
3280}
3281
3282void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3283 unsigned long src_offset, unsigned long len)
3284{
3285 size_t cur;
3286 size_t dst_off_in_page;
3287 size_t src_off_in_page;
3288 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3289 unsigned long dst_i;
3290 unsigned long src_i;
3291
3292 if (src_offset + len > dst->len) {
3293 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3294 src_offset, len, dst->len);
3295 BUG_ON(1);
3296 }
3297 if (dst_offset + len > dst->len) {
3298 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3299 dst_offset, len, dst->len);
3300 BUG_ON(1);
3301 }
3302
3303 while(len > 0) {
3304 dst_off_in_page = (start_offset + dst_offset) &
3305 ((unsigned long)PAGE_CACHE_SIZE - 1);
3306 src_off_in_page = (start_offset + src_offset) &
3307 ((unsigned long)PAGE_CACHE_SIZE - 1);
3308
3309 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3310 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3311
3312 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3313 src_off_in_page));
3314 cur = min_t(unsigned long, cur,
3315 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3316
3317 copy_pages(extent_buffer_page(dst, dst_i),
3318 extent_buffer_page(dst, src_i),
3319 dst_off_in_page, src_off_in_page, cur);
3320
3321 src_offset += cur;
3322 dst_offset += cur;
3323 len -= cur;
3324 }
3325}
3326EXPORT_SYMBOL(memcpy_extent_buffer);
3327
3328void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3329 unsigned long src_offset, unsigned long len)
3330{
3331 size_t cur;
3332 size_t dst_off_in_page;
3333 size_t src_off_in_page;
3334 unsigned long dst_end = dst_offset + len - 1;
3335 unsigned long src_end = src_offset + len - 1;
3336 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3337 unsigned long dst_i;
3338 unsigned long src_i;
3339
3340 if (src_offset + len > dst->len) {
3341 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3342 src_offset, len, dst->len);
3343 BUG_ON(1);
3344 }
3345 if (dst_offset + len > dst->len) {
3346 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3347 dst_offset, len, dst->len);
3348 BUG_ON(1);
3349 }
3350 if (dst_offset < src_offset) {
3351 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3352 return;
3353 }
3354 while(len > 0) {
3355 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3356 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3357
3358 dst_off_in_page = (start_offset + dst_end) &
3359 ((unsigned long)PAGE_CACHE_SIZE - 1);
3360 src_off_in_page = (start_offset + src_end) &
3361 ((unsigned long)PAGE_CACHE_SIZE - 1);
3362
3363 cur = min_t(unsigned long, len, src_off_in_page + 1);
3364 cur = min(cur, dst_off_in_page + 1);
3365 move_pages(extent_buffer_page(dst, dst_i),
3366 extent_buffer_page(dst, src_i),
3367 dst_off_in_page - cur + 1,
3368 src_off_in_page - cur + 1, cur);
3369
3370 dst_end -= cur;
3371 src_end -= cur;
3372 len -= cur;
3373 }
3374}
3375EXPORT_SYMBOL(memmove_extent_buffer);
6af118ce
CM
3376
3377int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3378{
3379 u64 start = page_offset(page);
3380 struct extent_buffer *eb;
3381 int ret = 1;
3382 unsigned long i;
3383 unsigned long num_pages;
3384
3385 spin_lock(&tree->buffer_lock);
3386 eb = buffer_search(tree, start);
3387 if (!eb)
3388 goto out;
3389
3390 if (atomic_read(&eb->refs) > 1) {
3391 ret = 0;
3392 goto out;
3393 }
3394 /* at this point we can safely release the extent buffer */
3395 num_pages = num_extent_pages(eb->start, eb->len);
3396 for (i = 0; i < num_pages; i++) {
3397 struct page *page = extent_buffer_page(eb, i);
3398 page_cache_release(page);
3399 }
3400 rb_erase(&eb->rb_node, &tree->buffer);
3401 __free_extent_buffer(eb);
3402out:
3403 spin_unlock(&tree->buffer_lock);
3404 return ret;
3405}
3406EXPORT_SYMBOL(try_release_extent_buffer);