1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 unsigned long extra_flags,
20 void (*ctor)(void *, struct kmem_cache *,
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
37 struct rb_node rb_node;
40 struct extent_page_data {
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
45 int __init extent_map_init(void)
47 extent_map_cache = btrfs_cache_create("extent_map",
48 sizeof(struct extent_map), 0,
50 if (!extent_map_cache)
52 extent_state_cache = btrfs_cache_create("extent_state",
53 sizeof(struct extent_state), 0,
55 if (!extent_state_cache)
57 extent_buffer_cache = btrfs_cache_create("extent_buffers",
58 sizeof(struct extent_buffer), 0,
60 if (!extent_buffer_cache)
61 goto free_state_cache;
65 kmem_cache_destroy(extent_state_cache);
67 kmem_cache_destroy(extent_map_cache);
71 void __exit extent_map_exit(void)
73 struct extent_state *state;
75 while (!list_empty(&states)) {
76 state = list_entry(states.next, struct extent_state, list);
77 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
78 list_del(&state->list);
79 kmem_cache_free(extent_state_cache, state);
84 kmem_cache_destroy(extent_map_cache);
85 if (extent_state_cache)
86 kmem_cache_destroy(extent_state_cache);
87 if (extent_buffer_cache)
88 kmem_cache_destroy(extent_buffer_cache);
91 void extent_map_tree_init(struct extent_map_tree *tree,
92 struct address_space *mapping, gfp_t mask)
94 tree->map.rb_node = NULL;
95 tree->state.rb_node = NULL;
97 rwlock_init(&tree->lock);
98 spin_lock_init(&tree->lru_lock);
99 tree->mapping = mapping;
100 INIT_LIST_HEAD(&tree->buffer_lru);
103 EXPORT_SYMBOL(extent_map_tree_init);
105 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
107 struct extent_buffer *eb;
108 while(!list_empty(&tree->buffer_lru)) {
109 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
111 list_del_init(&eb->lru);
112 free_extent_buffer(eb);
115 EXPORT_SYMBOL(extent_map_tree_empty_lru);
117 struct extent_map *alloc_extent_map(gfp_t mask)
119 struct extent_map *em;
120 em = kmem_cache_alloc(extent_map_cache, mask);
121 if (!em || IS_ERR(em))
124 atomic_set(&em->refs, 1);
127 EXPORT_SYMBOL(alloc_extent_map);
129 void free_extent_map(struct extent_map *em)
133 if (atomic_dec_and_test(&em->refs)) {
134 WARN_ON(em->in_tree);
135 kmem_cache_free(extent_map_cache, em);
138 EXPORT_SYMBOL(free_extent_map);
141 struct extent_state *alloc_extent_state(gfp_t mask)
143 struct extent_state *state;
146 state = kmem_cache_alloc(extent_state_cache, mask);
147 if (!state || IS_ERR(state))
153 spin_lock_irqsave(&state_lock, flags);
154 list_add(&state->list, &states);
155 spin_unlock_irqrestore(&state_lock, flags);
157 atomic_set(&state->refs, 1);
158 init_waitqueue_head(&state->wq);
161 EXPORT_SYMBOL(alloc_extent_state);
163 void free_extent_state(struct extent_state *state)
168 if (atomic_dec_and_test(&state->refs)) {
169 WARN_ON(state->in_tree);
170 spin_lock_irqsave(&state_lock, flags);
171 list_del(&state->list);
172 spin_unlock_irqrestore(&state_lock, flags);
173 kmem_cache_free(extent_state_cache, state);
176 EXPORT_SYMBOL(free_extent_state);
178 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
179 struct rb_node *node)
181 struct rb_node ** p = &root->rb_node;
182 struct rb_node * parent = NULL;
183 struct tree_entry *entry;
187 entry = rb_entry(parent, struct tree_entry, rb_node);
189 if (offset < entry->start)
191 else if (offset > entry->end)
197 entry = rb_entry(node, struct tree_entry, rb_node);
199 rb_link_node(node, parent, p);
200 rb_insert_color(node, root);
204 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
205 struct rb_node **prev_ret)
207 struct rb_node * n = root->rb_node;
208 struct rb_node *prev = NULL;
209 struct tree_entry *entry;
210 struct tree_entry *prev_entry = NULL;
213 entry = rb_entry(n, struct tree_entry, rb_node);
217 if (offset < entry->start)
219 else if (offset > entry->end)
226 while(prev && offset > prev_entry->end) {
227 prev = rb_next(prev);
228 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
234 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
236 struct rb_node *prev;
238 ret = __tree_search(root, offset, &prev);
244 static int tree_delete(struct rb_root *root, u64 offset)
246 struct rb_node *node;
247 struct tree_entry *entry;
249 node = __tree_search(root, offset, NULL);
252 entry = rb_entry(node, struct tree_entry, rb_node);
254 rb_erase(node, root);
259 * add_extent_mapping tries a simple backward merge with existing
260 * mappings. The extent_map struct passed in will be inserted into
261 * the tree directly (no copies made, just a reference taken).
263 int add_extent_mapping(struct extent_map_tree *tree,
264 struct extent_map *em)
267 struct extent_map *prev = NULL;
270 write_lock_irq(&tree->lock);
271 rb = tree_insert(&tree->map, em->end, &em->rb_node);
273 prev = rb_entry(rb, struct extent_map, rb_node);
274 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
278 atomic_inc(&em->refs);
279 if (em->start != 0) {
280 rb = rb_prev(&em->rb_node);
282 prev = rb_entry(rb, struct extent_map, rb_node);
283 if (prev && prev->end + 1 == em->start &&
284 ((em->block_start == EXTENT_MAP_HOLE &&
285 prev->block_start == EXTENT_MAP_HOLE) ||
286 (em->block_start == EXTENT_MAP_INLINE &&
287 prev->block_start == EXTENT_MAP_INLINE) ||
288 (em->block_start == EXTENT_MAP_DELALLOC &&
289 prev->block_start == EXTENT_MAP_DELALLOC) ||
290 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
291 em->block_start == prev->block_end + 1))) {
292 em->start = prev->start;
293 em->block_start = prev->block_start;
294 rb_erase(&prev->rb_node, &tree->map);
296 free_extent_map(prev);
300 write_unlock_irq(&tree->lock);
303 EXPORT_SYMBOL(add_extent_mapping);
306 * lookup_extent_mapping returns the first extent_map struct in the
307 * tree that intersects the [start, end] (inclusive) range. There may
308 * be additional objects in the tree that intersect, so check the object
309 * returned carefully to make sure you don't need additional lookups.
311 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
314 struct extent_map *em;
315 struct rb_node *rb_node;
317 read_lock_irq(&tree->lock);
318 rb_node = tree_search(&tree->map, start);
323 if (IS_ERR(rb_node)) {
324 em = ERR_PTR(PTR_ERR(rb_node));
327 em = rb_entry(rb_node, struct extent_map, rb_node);
328 if (em->end < start || em->start > end) {
332 atomic_inc(&em->refs);
334 read_unlock_irq(&tree->lock);
337 EXPORT_SYMBOL(lookup_extent_mapping);
340 * removes an extent_map struct from the tree. No reference counts are
341 * dropped, and no checks are done to see if the range is in use
343 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
347 write_lock_irq(&tree->lock);
348 ret = tree_delete(&tree->map, em->end);
349 write_unlock_irq(&tree->lock);
352 EXPORT_SYMBOL(remove_extent_mapping);
355 * utility function to look for merge candidates inside a given range.
356 * Any extents with matching state are merged together into a single
357 * extent in the tree. Extents with EXTENT_IO in their state field
358 * are not merged because the end_io handlers need to be able to do
359 * operations on them without sleeping (or doing allocations/splits).
361 * This should be called with the tree lock held.
363 static int merge_state(struct extent_map_tree *tree,
364 struct extent_state *state)
366 struct extent_state *other;
367 struct rb_node *other_node;
369 if (state->state & EXTENT_IOBITS)
372 other_node = rb_prev(&state->rb_node);
374 other = rb_entry(other_node, struct extent_state, rb_node);
375 if (other->end == state->start - 1 &&
376 other->state == state->state) {
377 state->start = other->start;
379 rb_erase(&other->rb_node, &tree->state);
380 free_extent_state(other);
383 other_node = rb_next(&state->rb_node);
385 other = rb_entry(other_node, struct extent_state, rb_node);
386 if (other->start == state->end + 1 &&
387 other->state == state->state) {
388 other->start = state->start;
390 rb_erase(&state->rb_node, &tree->state);
391 free_extent_state(state);
398 * insert an extent_state struct into the tree. 'bits' are set on the
399 * struct before it is inserted.
401 * This may return -EEXIST if the extent is already there, in which case the
402 * state struct is freed.
404 * The tree lock is not taken internally. This is a utility function and
405 * probably isn't what you want to call (see set/clear_extent_bit).
407 static int insert_state(struct extent_map_tree *tree,
408 struct extent_state *state, u64 start, u64 end,
411 struct rb_node *node;
414 printk("end < start %Lu %Lu\n", end, start);
417 state->state |= bits;
418 state->start = start;
420 node = tree_insert(&tree->state, end, &state->rb_node);
422 struct extent_state *found;
423 found = rb_entry(node, struct extent_state, rb_node);
424 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
425 free_extent_state(state);
428 merge_state(tree, state);
433 * split a given extent state struct in two, inserting the preallocated
434 * struct 'prealloc' as the newly created second half. 'split' indicates an
435 * offset inside 'orig' where it should be split.
438 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
439 * are two extent state structs in the tree:
440 * prealloc: [orig->start, split - 1]
441 * orig: [ split, orig->end ]
443 * The tree locks are not taken by this function. They need to be held
446 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
447 struct extent_state *prealloc, u64 split)
449 struct rb_node *node;
450 prealloc->start = orig->start;
451 prealloc->end = split - 1;
452 prealloc->state = orig->state;
455 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
457 struct extent_state *found;
458 found = rb_entry(node, struct extent_state, rb_node);
459 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
460 free_extent_state(prealloc);
467 * utility function to clear some bits in an extent state struct.
468 * it will optionally wake up any one waiting on this state (wake == 1), or
469 * forcibly remove the state from the tree (delete == 1).
471 * If no bits are set on the state struct after clearing things, the
472 * struct is freed and removed from the tree
474 static int clear_state_bit(struct extent_map_tree *tree,
475 struct extent_state *state, int bits, int wake,
478 int ret = state->state & bits;
479 state->state &= ~bits;
482 if (delete || state->state == 0) {
483 if (state->in_tree) {
484 rb_erase(&state->rb_node, &tree->state);
486 free_extent_state(state);
491 merge_state(tree, state);
497 * clear some bits on a range in the tree. This may require splitting
498 * or inserting elements in the tree, so the gfp mask is used to
499 * indicate which allocations or sleeping are allowed.
501 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
502 * the given range from the tree regardless of state (ie for truncate).
504 * the range [start, end] is inclusive.
506 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
507 * bits were already set, or zero if none of the bits were already set.
509 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
510 int bits, int wake, int delete, gfp_t mask)
512 struct extent_state *state;
513 struct extent_state *prealloc = NULL;
514 struct rb_node *node;
520 if (!prealloc && (mask & __GFP_WAIT)) {
521 prealloc = alloc_extent_state(mask);
526 write_lock_irqsave(&tree->lock, flags);
528 * this search will find the extents that end after
531 node = tree_search(&tree->state, start);
534 state = rb_entry(node, struct extent_state, rb_node);
535 if (state->start > end)
537 WARN_ON(state->end < start);
540 * | ---- desired range ---- |
542 * | ------------- state -------------- |
544 * We need to split the extent we found, and may flip
545 * bits on second half.
547 * If the extent we found extends past our range, we
548 * just split and search again. It'll get split again
549 * the next time though.
551 * If the extent we found is inside our range, we clear
552 * the desired bit on it.
555 if (state->start < start) {
556 err = split_state(tree, state, prealloc, start);
557 BUG_ON(err == -EEXIST);
561 if (state->end <= end) {
562 start = state->end + 1;
563 set |= clear_state_bit(tree, state, bits,
566 start = state->start;
571 * | ---- desired range ---- |
573 * We need to split the extent, and clear the bit
576 if (state->start <= end && state->end > end) {
577 err = split_state(tree, state, prealloc, end + 1);
578 BUG_ON(err == -EEXIST);
582 set |= clear_state_bit(tree, prealloc, bits,
588 start = state->end + 1;
589 set |= clear_state_bit(tree, state, bits, wake, delete);
593 write_unlock_irqrestore(&tree->lock, flags);
595 free_extent_state(prealloc);
602 write_unlock_irqrestore(&tree->lock, flags);
603 if (mask & __GFP_WAIT)
607 EXPORT_SYMBOL(clear_extent_bit);
609 static int wait_on_state(struct extent_map_tree *tree,
610 struct extent_state *state)
613 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
614 read_unlock_irq(&tree->lock);
616 read_lock_irq(&tree->lock);
617 finish_wait(&state->wq, &wait);
622 * waits for one or more bits to clear on a range in the state tree.
623 * The range [start, end] is inclusive.
624 * The tree lock is taken by this function
626 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
628 struct extent_state *state;
629 struct rb_node *node;
631 read_lock_irq(&tree->lock);
635 * this search will find all the extents that end after
638 node = tree_search(&tree->state, start);
642 state = rb_entry(node, struct extent_state, rb_node);
644 if (state->start > end)
647 if (state->state & bits) {
648 start = state->start;
649 atomic_inc(&state->refs);
650 wait_on_state(tree, state);
651 free_extent_state(state);
654 start = state->end + 1;
659 if (need_resched()) {
660 read_unlock_irq(&tree->lock);
662 read_lock_irq(&tree->lock);
666 read_unlock_irq(&tree->lock);
669 EXPORT_SYMBOL(wait_extent_bit);
672 * set some bits on a range in the tree. This may require allocations
673 * or sleeping, so the gfp mask is used to indicate what is allowed.
675 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
676 * range already has the desired bits set. The start of the existing
677 * range is returned in failed_start in this case.
679 * [start, end] is inclusive
680 * This takes the tree lock.
682 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
683 int exclusive, u64 *failed_start, gfp_t mask)
685 struct extent_state *state;
686 struct extent_state *prealloc = NULL;
687 struct rb_node *node;
694 if (!prealloc && (mask & __GFP_WAIT)) {
695 prealloc = alloc_extent_state(mask);
700 write_lock_irqsave(&tree->lock, flags);
702 * this search will find all the extents that end after
705 node = tree_search(&tree->state, start);
707 err = insert_state(tree, prealloc, start, end, bits);
709 BUG_ON(err == -EEXIST);
713 state = rb_entry(node, struct extent_state, rb_node);
714 last_start = state->start;
715 last_end = state->end;
718 * | ---- desired range ---- |
721 * Just lock what we found and keep going
723 if (state->start == start && state->end <= end) {
724 set = state->state & bits;
725 if (set && exclusive) {
726 *failed_start = state->start;
730 state->state |= bits;
731 start = state->end + 1;
732 merge_state(tree, state);
737 * | ---- desired range ---- |
740 * | ------------- state -------------- |
742 * We need to split the extent we found, and may flip bits on
745 * If the extent we found extends past our
746 * range, we just split and search again. It'll get split
747 * again the next time though.
749 * If the extent we found is inside our range, we set the
752 if (state->start < start) {
753 set = state->state & bits;
754 if (exclusive && set) {
755 *failed_start = start;
759 err = split_state(tree, state, prealloc, start);
760 BUG_ON(err == -EEXIST);
764 if (state->end <= end) {
765 state->state |= bits;
766 start = state->end + 1;
767 merge_state(tree, state);
769 start = state->start;
774 * | ---- desired range ---- |
775 * | state | or | state |
777 * There's a hole, we need to insert something in it and
778 * ignore the extent we found.
780 if (state->start > start) {
782 if (end < last_start)
785 this_end = last_start -1;
786 err = insert_state(tree, prealloc, start, this_end,
789 BUG_ON(err == -EEXIST);
792 start = this_end + 1;
796 * | ---- desired range ---- |
798 * We need to split the extent, and set the bit
801 if (state->start <= end && state->end > end) {
802 set = state->state & bits;
803 if (exclusive && set) {
804 *failed_start = start;
808 err = split_state(tree, state, prealloc, end + 1);
809 BUG_ON(err == -EEXIST);
811 prealloc->state |= bits;
812 merge_state(tree, prealloc);
820 write_unlock_irqrestore(&tree->lock, flags);
822 free_extent_state(prealloc);
829 write_unlock_irqrestore(&tree->lock, flags);
830 if (mask & __GFP_WAIT)
834 EXPORT_SYMBOL(set_extent_bit);
836 /* wrappers around set/clear extent bit */
837 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
840 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
843 EXPORT_SYMBOL(set_extent_dirty);
845 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
846 int bits, gfp_t mask)
848 return set_extent_bit(tree, start, end, bits, 0, NULL,
851 EXPORT_SYMBOL(set_extent_bits);
853 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
854 int bits, gfp_t mask)
856 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
858 EXPORT_SYMBOL(clear_extent_bits);
860 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
863 return set_extent_bit(tree, start, end,
864 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
867 EXPORT_SYMBOL(set_extent_delalloc);
869 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
872 return clear_extent_bit(tree, start, end,
873 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
875 EXPORT_SYMBOL(clear_extent_dirty);
877 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
880 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
883 EXPORT_SYMBOL(set_extent_new);
885 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
888 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
890 EXPORT_SYMBOL(clear_extent_new);
892 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
895 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
898 EXPORT_SYMBOL(set_extent_uptodate);
900 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
903 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
905 EXPORT_SYMBOL(clear_extent_uptodate);
907 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
910 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
913 EXPORT_SYMBOL(set_extent_writeback);
915 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
918 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
920 EXPORT_SYMBOL(clear_extent_writeback);
922 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
924 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
926 EXPORT_SYMBOL(wait_on_extent_writeback);
929 * locks a range in ascending order, waiting for any locked regions
930 * it hits on the way. [start,end] are inclusive, and this will sleep.
932 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
937 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
938 &failed_start, mask);
939 if (err == -EEXIST && (mask & __GFP_WAIT)) {
940 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
941 start = failed_start;
945 WARN_ON(start > end);
949 EXPORT_SYMBOL(lock_extent);
951 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
954 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
956 EXPORT_SYMBOL(unlock_extent);
959 * helper function to set pages and extents in the tree dirty
961 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
963 unsigned long index = start >> PAGE_CACHE_SHIFT;
964 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
967 while (index <= end_index) {
968 page = find_get_page(tree->mapping, index);
970 __set_page_dirty_nobuffers(page);
971 page_cache_release(page);
974 set_extent_dirty(tree, start, end, GFP_NOFS);
977 EXPORT_SYMBOL(set_range_dirty);
980 * helper function to set both pages and extents in the tree writeback
982 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
984 unsigned long index = start >> PAGE_CACHE_SHIFT;
985 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
988 while (index <= end_index) {
989 page = find_get_page(tree->mapping, index);
991 set_page_writeback(page);
992 page_cache_release(page);
995 set_extent_writeback(tree, start, end, GFP_NOFS);
998 EXPORT_SYMBOL(set_range_writeback);
1000 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1001 u64 *start_ret, u64 *end_ret, int bits)
1003 struct rb_node *node;
1004 struct extent_state *state;
1007 read_lock_irq(&tree->lock);
1009 * this search will find all the extents that end after
1012 node = tree_search(&tree->state, start);
1013 if (!node || IS_ERR(node)) {
1018 state = rb_entry(node, struct extent_state, rb_node);
1019 if (state->end >= start && (state->state & bits)) {
1020 *start_ret = state->start;
1021 *end_ret = state->end;
1025 node = rb_next(node);
1030 read_unlock_irq(&tree->lock);
1033 EXPORT_SYMBOL(find_first_extent_bit);
1035 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1036 u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1038 struct rb_node *node;
1039 struct extent_state *state;
1040 u64 cur_start = start;
1042 u64 total_bytes = 0;
1044 write_lock_irq(&tree->lock);
1046 * this search will find all the extents that end after
1050 node = tree_search(&tree->state, cur_start);
1051 if (!node || IS_ERR(node)) {
1056 state = rb_entry(node, struct extent_state, rb_node);
1057 if (state->start != cur_start) {
1060 if (!(state->state & EXTENT_DELALLOC)) {
1063 if (state->start >= lock_start) {
1064 if (state->state & EXTENT_LOCKED) {
1066 atomic_inc(&state->refs);
1067 prepare_to_wait(&state->wq, &wait,
1068 TASK_UNINTERRUPTIBLE);
1069 write_unlock_irq(&tree->lock);
1071 write_lock_irq(&tree->lock);
1072 finish_wait(&state->wq, &wait);
1073 free_extent_state(state);
1076 state->state |= EXTENT_LOCKED;
1080 cur_start = state->end + 1;
1081 node = rb_next(node);
1084 total_bytes += state->end - state->start + 1;
1085 if (total_bytes >= max_bytes)
1089 write_unlock_irq(&tree->lock);
1094 * helper function to lock both pages and extents in the tree.
1095 * pages must be locked first.
1097 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1099 unsigned long index = start >> PAGE_CACHE_SHIFT;
1100 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1104 while (index <= end_index) {
1105 page = grab_cache_page(tree->mapping, index);
1111 err = PTR_ERR(page);
1116 lock_extent(tree, start, end, GFP_NOFS);
1121 * we failed above in getting the page at 'index', so we undo here
1122 * up to but not including the page at 'index'
1125 index = start >> PAGE_CACHE_SHIFT;
1126 while (index < end_index) {
1127 page = find_get_page(tree->mapping, index);
1129 page_cache_release(page);
1134 EXPORT_SYMBOL(lock_range);
1137 * helper function to unlock both pages and extents in the tree.
1139 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1141 unsigned long index = start >> PAGE_CACHE_SHIFT;
1142 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1145 while (index <= end_index) {
1146 page = find_get_page(tree->mapping, index);
1148 page_cache_release(page);
1151 unlock_extent(tree, start, end, GFP_NOFS);
1154 EXPORT_SYMBOL(unlock_range);
1156 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1158 struct rb_node *node;
1159 struct extent_state *state;
1162 write_lock_irq(&tree->lock);
1164 * this search will find all the extents that end after
1167 node = tree_search(&tree->state, start);
1168 if (!node || IS_ERR(node)) {
1172 state = rb_entry(node, struct extent_state, rb_node);
1173 if (state->start != start) {
1177 state->private = private;
1179 write_unlock_irq(&tree->lock);
1183 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1185 struct rb_node *node;
1186 struct extent_state *state;
1189 read_lock_irq(&tree->lock);
1191 * this search will find all the extents that end after
1194 node = tree_search(&tree->state, start);
1195 if (!node || IS_ERR(node)) {
1199 state = rb_entry(node, struct extent_state, rb_node);
1200 if (state->start != start) {
1204 *private = state->private;
1206 read_unlock_irq(&tree->lock);
1211 * searches a range in the state tree for a given mask.
1212 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1213 * has the bits set. Otherwise, 1 is returned if any bit in the
1214 * range is found set.
1216 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1217 int bits, int filled)
1219 struct extent_state *state = NULL;
1220 struct rb_node *node;
1223 read_lock_irq(&tree->lock);
1224 node = tree_search(&tree->state, start);
1225 while (node && start <= end) {
1226 state = rb_entry(node, struct extent_state, rb_node);
1228 if (filled && state->start > start) {
1233 if (state->start > end)
1236 if (state->state & bits) {
1240 } else if (filled) {
1244 start = state->end + 1;
1247 node = rb_next(node);
1249 read_unlock_irq(&tree->lock);
1252 EXPORT_SYMBOL(test_range_bit);
1255 * helper function to set a given page up to date if all the
1256 * extents in the tree for that page are up to date
1258 static int check_page_uptodate(struct extent_map_tree *tree,
1261 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1262 u64 end = start + PAGE_CACHE_SIZE - 1;
1263 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1264 SetPageUptodate(page);
1269 * helper function to unlock a page if all the extents in the tree
1270 * for that page are unlocked
1272 static int check_page_locked(struct extent_map_tree *tree,
1275 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1276 u64 end = start + PAGE_CACHE_SIZE - 1;
1277 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1283 * helper function to end page writeback if all the extents
1284 * in the tree for that page are done with writeback
1286 static int check_page_writeback(struct extent_map_tree *tree,
1289 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1290 u64 end = start + PAGE_CACHE_SIZE - 1;
1291 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1292 end_page_writeback(page);
1296 /* lots and lots of room for performance fixes in the end_bio funcs */
1299 * after a writepage IO is done, we need to:
1300 * clear the uptodate bits on error
1301 * clear the writeback bits in the extent tree for this IO
1302 * end_page_writeback if the page has no more pending IO
1304 * Scheduling is not allowed, so the extent state tree is expected
1305 * to have one and only one object corresponding to this IO.
1307 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1308 static void end_bio_extent_writepage(struct bio *bio, int err)
1310 static int end_bio_extent_writepage(struct bio *bio,
1311 unsigned int bytes_done, int err)
1314 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1315 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1316 struct extent_map_tree *tree = bio->bi_private;
1321 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1327 struct page *page = bvec->bv_page;
1328 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1330 end = start + bvec->bv_len - 1;
1332 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1337 if (--bvec >= bio->bi_io_vec)
1338 prefetchw(&bvec->bv_page->flags);
1341 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1342 ClearPageUptodate(page);
1345 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1348 end_page_writeback(page);
1350 check_page_writeback(tree, page);
1351 if (tree->ops && tree->ops->writepage_end_io_hook)
1352 tree->ops->writepage_end_io_hook(page, start, end);
1353 } while (bvec >= bio->bi_io_vec);
1356 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1362 * after a readpage IO is done, we need to:
1363 * clear the uptodate bits on error
1364 * set the uptodate bits if things worked
1365 * set the page up to date if all extents in the tree are uptodate
1366 * clear the lock bit in the extent tree
1367 * unlock the page if there are no other extents locked for it
1369 * Scheduling is not allowed, so the extent state tree is expected
1370 * to have one and only one object corresponding to this IO.
1372 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1373 static void end_bio_extent_readpage(struct bio *bio, int err)
1375 static int end_bio_extent_readpage(struct bio *bio,
1376 unsigned int bytes_done, int err)
1379 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1380 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1381 struct extent_map_tree *tree = bio->bi_private;
1387 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1393 struct page *page = bvec->bv_page;
1394 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1396 end = start + bvec->bv_len - 1;
1398 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1403 if (--bvec >= bio->bi_io_vec)
1404 prefetchw(&bvec->bv_page->flags);
1406 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1407 ret = tree->ops->readpage_end_io_hook(page, start, end);
1412 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1414 SetPageUptodate(page);
1416 check_page_uptodate(tree, page);
1418 ClearPageUptodate(page);
1422 unlock_extent(tree, start, end, GFP_ATOMIC);
1427 check_page_locked(tree, page);
1428 } while (bvec >= bio->bi_io_vec);
1431 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1437 * IO done from prepare_write is pretty simple, we just unlock
1438 * the structs in the extent tree when done, and set the uptodate bits
1441 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1442 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1444 static int end_bio_extent_preparewrite(struct bio *bio,
1445 unsigned int bytes_done, int err)
1448 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1449 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1450 struct extent_map_tree *tree = bio->bi_private;
1454 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1460 struct page *page = bvec->bv_page;
1461 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1463 end = start + bvec->bv_len - 1;
1465 if (--bvec >= bio->bi_io_vec)
1466 prefetchw(&bvec->bv_page->flags);
1469 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1471 ClearPageUptodate(page);
1475 unlock_extent(tree, start, end, GFP_ATOMIC);
1477 } while (bvec >= bio->bi_io_vec);
1480 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1486 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1491 bio = bio_alloc(gfp_flags, nr_vecs);
1493 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1494 while (!bio && (nr_vecs /= 2))
1495 bio = bio_alloc(gfp_flags, nr_vecs);
1499 bio->bi_bdev = bdev;
1500 bio->bi_sector = first_sector;
1505 static int submit_one_bio(int rw, struct bio *bio)
1509 submit_bio(rw, bio);
1510 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1516 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1517 struct page *page, sector_t sector,
1518 size_t size, unsigned long offset,
1519 struct block_device *bdev,
1520 struct bio **bio_ret,
1521 unsigned long max_pages,
1522 bio_end_io_t end_io_func)
1528 if (bio_ret && *bio_ret) {
1530 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1531 bio_add_page(bio, page, size, offset) < size) {
1532 ret = submit_one_bio(rw, bio);
1538 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1539 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1541 printk("failed to allocate bio nr %d\n", nr);
1543 bio_add_page(bio, page, size, offset);
1544 bio->bi_end_io = end_io_func;
1545 bio->bi_private = tree;
1549 ret = submit_one_bio(rw, bio);
1555 void set_page_extent_mapped(struct page *page)
1557 if (!PagePrivate(page)) {
1558 SetPagePrivate(page);
1559 WARN_ON(!page->mapping->a_ops->invalidatepage);
1560 set_page_private(page, EXTENT_PAGE_PRIVATE);
1561 page_cache_get(page);
1566 * basic readpage implementation. Locked extent state structs are inserted
1567 * into the tree that are removed when the IO is done (by the end_io
1570 static int __extent_read_full_page(struct extent_map_tree *tree,
1572 get_extent_t *get_extent,
1575 struct inode *inode = page->mapping->host;
1576 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1577 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1581 u64 last_byte = i_size_read(inode);
1585 struct extent_map *em;
1586 struct block_device *bdev;
1589 size_t page_offset = 0;
1591 size_t blocksize = inode->i_sb->s_blocksize;
1593 set_page_extent_mapped(page);
1596 lock_extent(tree, start, end, GFP_NOFS);
1598 while (cur <= end) {
1599 if (cur >= last_byte) {
1600 iosize = PAGE_CACHE_SIZE - page_offset;
1601 zero_user_page(page, page_offset, iosize, KM_USER0);
1602 set_extent_uptodate(tree, cur, cur + iosize - 1,
1604 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1607 em = get_extent(inode, page, page_offset, cur, end, 0);
1608 if (IS_ERR(em) || !em) {
1610 unlock_extent(tree, cur, end, GFP_NOFS);
1614 extent_offset = cur - em->start;
1615 BUG_ON(em->end < cur);
1618 iosize = min(em->end - cur, end - cur) + 1;
1619 cur_end = min(em->end, end);
1620 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1621 sector = (em->block_start + extent_offset) >> 9;
1623 block_start = em->block_start;
1624 free_extent_map(em);
1627 /* we've found a hole, just zero and go on */
1628 if (block_start == EXTENT_MAP_HOLE) {
1629 zero_user_page(page, page_offset, iosize, KM_USER0);
1630 set_extent_uptodate(tree, cur, cur + iosize - 1,
1632 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1634 page_offset += iosize;
1637 /* the get_extent function already copied into the page */
1638 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1639 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1641 page_offset += iosize;
1646 if (tree->ops && tree->ops->readpage_io_hook) {
1647 ret = tree->ops->readpage_io_hook(page, cur,
1651 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1653 ret = submit_extent_page(READ, tree, page,
1654 sector, iosize, page_offset,
1656 end_bio_extent_readpage);
1661 page_offset += iosize;
1665 if (!PageError(page))
1666 SetPageUptodate(page);
1672 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1673 get_extent_t *get_extent)
1675 struct bio *bio = NULL;
1678 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1680 submit_one_bio(READ, bio);
1683 EXPORT_SYMBOL(extent_read_full_page);
1686 * the writepage semantics are similar to regular writepage. extent
1687 * records are inserted to lock ranges in the tree, and as dirty areas
1688 * are found, they are marked writeback. Then the lock bits are removed
1689 * and the end_io handler clears the writeback ranges
1691 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1694 struct inode *inode = page->mapping->host;
1695 struct extent_page_data *epd = data;
1696 struct extent_map_tree *tree = epd->tree;
1697 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1698 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1702 u64 last_byte = i_size_read(inode);
1706 struct extent_map *em;
1707 struct block_device *bdev;
1710 size_t page_offset = 0;
1712 loff_t i_size = i_size_read(inode);
1713 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1717 WARN_ON(!PageLocked(page));
1718 if (page->index > end_index) {
1719 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1724 if (page->index == end_index) {
1725 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1726 zero_user_page(page, offset,
1727 PAGE_CACHE_SIZE - offset, KM_USER0);
1730 set_page_extent_mapped(page);
1732 lock_extent(tree, start, page_end, GFP_NOFS);
1733 nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1737 tree->ops->fill_delalloc(inode, start, delalloc_end);
1738 if (delalloc_end >= page_end + 1) {
1739 clear_extent_bit(tree, page_end + 1, delalloc_end,
1740 EXTENT_LOCKED | EXTENT_DELALLOC,
1743 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1745 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1746 printk("found delalloc bits after clear extent_bit\n");
1748 } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1749 printk("found delalloc bits after find_delalloc_range returns 0\n");
1753 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1754 printk("found delalloc bits after lock_extent\n");
1757 if (last_byte <= start) {
1758 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1762 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1763 blocksize = inode->i_sb->s_blocksize;
1765 while (cur <= end) {
1766 if (cur >= last_byte) {
1767 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1770 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1771 if (IS_ERR(em) || !em) {
1776 extent_offset = cur - em->start;
1777 BUG_ON(em->end < cur);
1779 iosize = min(em->end - cur, end - cur) + 1;
1780 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1781 sector = (em->block_start + extent_offset) >> 9;
1783 block_start = em->block_start;
1784 free_extent_map(em);
1787 if (block_start == EXTENT_MAP_HOLE ||
1788 block_start == EXTENT_MAP_INLINE) {
1789 clear_extent_dirty(tree, cur,
1790 cur + iosize - 1, GFP_NOFS);
1792 page_offset += iosize;
1796 /* leave this out until we have a page_mkwrite call */
1797 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1800 page_offset += iosize;
1803 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1804 if (tree->ops && tree->ops->writepage_io_hook) {
1805 ret = tree->ops->writepage_io_hook(page, cur,
1813 unsigned long nr = end_index + 1;
1814 set_range_writeback(tree, cur, cur + iosize - 1);
1816 ret = submit_extent_page(WRITE, tree, page, sector,
1817 iosize, page_offset, bdev,
1819 end_bio_extent_writepage);
1824 page_offset += iosize;
1828 unlock_extent(tree, start, page_end, GFP_NOFS);
1833 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1834 get_extent_t *get_extent,
1835 struct writeback_control *wbc)
1838 struct extent_page_data epd = {
1841 .get_extent = get_extent,
1844 ret = __extent_writepage(page, wbc, &epd);
1846 submit_one_bio(WRITE, epd.bio);
1849 EXPORT_SYMBOL(extent_write_full_page);
1851 int extent_writepages(struct extent_map_tree *tree,
1852 struct address_space *mapping,
1853 get_extent_t *get_extent,
1854 struct writeback_control *wbc)
1857 struct extent_page_data epd = {
1860 .get_extent = get_extent,
1863 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
1865 submit_one_bio(WRITE, epd.bio);
1868 EXPORT_SYMBOL(extent_writepages);
1870 int extent_readpages(struct extent_map_tree *tree,
1871 struct address_space *mapping,
1872 struct list_head *pages, unsigned nr_pages,
1873 get_extent_t get_extent)
1875 struct bio *bio = NULL;
1877 struct pagevec pvec;
1879 pagevec_init(&pvec, 0);
1880 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
1881 struct page *page = list_entry(pages->prev, struct page, lru);
1883 prefetchw(&page->flags);
1884 list_del(&page->lru);
1886 * what we want to do here is call add_to_page_cache_lru,
1887 * but that isn't exported, so we reproduce it here
1889 if (!add_to_page_cache(page, mapping,
1890 page->index, GFP_KERNEL)) {
1892 /* open coding of lru_cache_add, also not exported */
1893 page_cache_get(page);
1894 if (!pagevec_add(&pvec, page))
1895 __pagevec_lru_add(&pvec);
1896 __extent_read_full_page(tree, page, get_extent, &bio);
1898 page_cache_release(page);
1900 if (pagevec_count(&pvec))
1901 __pagevec_lru_add(&pvec);
1902 BUG_ON(!list_empty(pages));
1904 submit_one_bio(READ, bio);
1907 EXPORT_SYMBOL(extent_readpages);
1910 * basic invalidatepage code, this waits on any locked or writeback
1911 * ranges corresponding to the page, and then deletes any extent state
1912 * records from the tree
1914 int extent_invalidatepage(struct extent_map_tree *tree,
1915 struct page *page, unsigned long offset)
1917 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
1918 u64 end = start + PAGE_CACHE_SIZE - 1;
1919 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1921 start += (offset + blocksize -1) & ~(blocksize - 1);
1925 lock_extent(tree, start, end, GFP_NOFS);
1926 wait_on_extent_writeback(tree, start, end);
1927 clear_extent_bit(tree, start, end,
1928 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1932 EXPORT_SYMBOL(extent_invalidatepage);
1935 * simple commit_write call, set_range_dirty is used to mark both
1936 * the pages and the extent records as dirty
1938 int extent_commit_write(struct extent_map_tree *tree,
1939 struct inode *inode, struct page *page,
1940 unsigned from, unsigned to)
1942 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1944 set_page_extent_mapped(page);
1945 set_page_dirty(page);
1947 if (pos > inode->i_size) {
1948 i_size_write(inode, pos);
1949 mark_inode_dirty(inode);
1953 EXPORT_SYMBOL(extent_commit_write);
1955 int extent_prepare_write(struct extent_map_tree *tree,
1956 struct inode *inode, struct page *page,
1957 unsigned from, unsigned to, get_extent_t *get_extent)
1959 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1960 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1962 u64 orig_block_start;
1965 struct extent_map *em;
1966 unsigned blocksize = 1 << inode->i_blkbits;
1967 size_t page_offset = 0;
1968 size_t block_off_start;
1969 size_t block_off_end;
1975 set_page_extent_mapped(page);
1977 block_start = (page_start + from) & ~((u64)blocksize - 1);
1978 block_end = (page_start + to - 1) | (blocksize - 1);
1979 orig_block_start = block_start;
1981 lock_extent(tree, page_start, page_end, GFP_NOFS);
1982 while(block_start <= block_end) {
1983 em = get_extent(inode, page, page_offset, block_start,
1985 if (IS_ERR(em) || !em) {
1988 cur_end = min(block_end, em->end);
1989 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1990 block_off_end = block_off_start + blocksize;
1991 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1993 if (!PageUptodate(page) && isnew &&
1994 (block_off_end > to || block_off_start < from)) {
1997 kaddr = kmap_atomic(page, KM_USER0);
1998 if (block_off_end > to)
1999 memset(kaddr + to, 0, block_off_end - to);
2000 if (block_off_start < from)
2001 memset(kaddr + block_off_start, 0,
2002 from - block_off_start);
2003 flush_dcache_page(page);
2004 kunmap_atomic(kaddr, KM_USER0);
2006 if (!isnew && !PageUptodate(page) &&
2007 (block_off_end > to || block_off_start < from) &&
2008 !test_range_bit(tree, block_start, cur_end,
2009 EXTENT_UPTODATE, 1)) {
2011 u64 extent_offset = block_start - em->start;
2013 sector = (em->block_start + extent_offset) >> 9;
2014 iosize = (cur_end - block_start + blocksize - 1) &
2015 ~((u64)blocksize - 1);
2017 * we've already got the extent locked, but we
2018 * need to split the state such that our end_bio
2019 * handler can clear the lock.
2021 set_extent_bit(tree, block_start,
2022 block_start + iosize - 1,
2023 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2024 ret = submit_extent_page(READ, tree, page,
2025 sector, iosize, page_offset, em->bdev,
2027 end_bio_extent_preparewrite);
2029 block_start = block_start + iosize;
2031 set_extent_uptodate(tree, block_start, cur_end,
2033 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2034 block_start = cur_end + 1;
2036 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2037 free_extent_map(em);
2040 wait_extent_bit(tree, orig_block_start,
2041 block_end, EXTENT_LOCKED);
2043 check_page_uptodate(tree, page);
2045 /* FIXME, zero out newly allocated blocks on error */
2048 EXPORT_SYMBOL(extent_prepare_write);
2051 * a helper for releasepage. As long as there are no locked extents
2052 * in the range corresponding to the page, both state records and extent
2053 * map records are removed
2055 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2057 struct extent_map *em;
2058 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2059 u64 end = start + PAGE_CACHE_SIZE - 1;
2060 u64 orig_start = start;
2063 while (start <= end) {
2064 em = lookup_extent_mapping(tree, start, end);
2065 if (!em || IS_ERR(em))
2067 if (!test_range_bit(tree, em->start, em->end,
2068 EXTENT_LOCKED, 0)) {
2069 remove_extent_mapping(tree, em);
2070 /* once for the rb tree */
2071 free_extent_map(em);
2073 start = em->end + 1;
2075 free_extent_map(em);
2077 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2080 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2084 EXPORT_SYMBOL(try_release_extent_mapping);
2086 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2087 get_extent_t *get_extent)
2089 struct inode *inode = mapping->host;
2090 u64 start = iblock << inode->i_blkbits;
2091 u64 end = start + (1 << inode->i_blkbits) - 1;
2092 sector_t sector = 0;
2093 struct extent_map *em;
2095 em = get_extent(inode, NULL, 0, start, end, 0);
2096 if (!em || IS_ERR(em))
2099 if (em->block_start == EXTENT_MAP_INLINE ||
2100 em->block_start == EXTENT_MAP_HOLE)
2103 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2105 free_extent_map(em);
2109 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2111 if (list_empty(&eb->lru)) {
2112 extent_buffer_get(eb);
2113 list_add(&eb->lru, &tree->buffer_lru);
2115 if (tree->lru_size >= BUFFER_LRU_MAX) {
2116 struct extent_buffer *rm;
2117 rm = list_entry(tree->buffer_lru.prev,
2118 struct extent_buffer, lru);
2120 list_del_init(&rm->lru);
2121 free_extent_buffer(rm);
2124 list_move(&eb->lru, &tree->buffer_lru);
2127 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2128 u64 start, unsigned long len)
2130 struct list_head *lru = &tree->buffer_lru;
2131 struct list_head *cur = lru->next;
2132 struct extent_buffer *eb;
2134 if (list_empty(lru))
2138 eb = list_entry(cur, struct extent_buffer, lru);
2139 if (eb->start == start && eb->len == len) {
2140 extent_buffer_get(eb);
2144 } while (cur != lru);
2148 static inline unsigned long num_extent_pages(u64 start, u64 len)
2150 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2151 (start >> PAGE_CACHE_SHIFT);
2154 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2158 struct address_space *mapping;
2161 return eb->first_page;
2162 i += eb->start >> PAGE_CACHE_SHIFT;
2163 mapping = eb->first_page->mapping;
2164 read_lock_irq(&mapping->tree_lock);
2165 p = radix_tree_lookup(&mapping->page_tree, i);
2166 read_unlock_irq(&mapping->tree_lock);
2170 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2175 struct extent_buffer *eb = NULL;
2177 spin_lock(&tree->lru_lock);
2178 eb = find_lru(tree, start, len);
2179 spin_unlock(&tree->lru_lock);
2184 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2185 INIT_LIST_HEAD(&eb->lru);
2188 atomic_set(&eb->refs, 1);
2193 static void __free_extent_buffer(struct extent_buffer *eb)
2195 kmem_cache_free(extent_buffer_cache, eb);
2198 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2199 u64 start, unsigned long len,
2203 unsigned long num_pages = num_extent_pages(start, len);
2205 unsigned long index = start >> PAGE_CACHE_SHIFT;
2206 struct extent_buffer *eb;
2208 struct address_space *mapping = tree->mapping;
2211 eb = __alloc_extent_buffer(tree, start, len, mask);
2212 if (!eb || IS_ERR(eb))
2215 if (eb->flags & EXTENT_BUFFER_FILLED)
2219 eb->first_page = page0;
2222 page_cache_get(page0);
2223 mark_page_accessed(page0);
2224 set_page_extent_mapped(page0);
2225 WARN_ON(!PageUptodate(page0));
2226 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2231 for (; i < num_pages; i++, index++) {
2232 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2237 set_page_extent_mapped(p);
2238 mark_page_accessed(p);
2241 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2244 set_page_private(p, EXTENT_PAGE_PRIVATE);
2246 if (!PageUptodate(p))
2251 eb->flags |= EXTENT_UPTODATE;
2252 eb->flags |= EXTENT_BUFFER_FILLED;
2255 spin_lock(&tree->lru_lock);
2257 spin_unlock(&tree->lru_lock);
2261 spin_lock(&tree->lru_lock);
2262 list_del_init(&eb->lru);
2263 spin_unlock(&tree->lru_lock);
2264 if (!atomic_dec_and_test(&eb->refs))
2266 for (index = 1; index < i; index++) {
2267 page_cache_release(extent_buffer_page(eb, index));
2270 page_cache_release(extent_buffer_page(eb, 0));
2271 __free_extent_buffer(eb);
2274 EXPORT_SYMBOL(alloc_extent_buffer);
2276 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2277 u64 start, unsigned long len,
2280 unsigned long num_pages = num_extent_pages(start, len);
2282 unsigned long index = start >> PAGE_CACHE_SHIFT;
2283 struct extent_buffer *eb;
2285 struct address_space *mapping = tree->mapping;
2288 eb = __alloc_extent_buffer(tree, start, len, mask);
2289 if (!eb || IS_ERR(eb))
2292 if (eb->flags & EXTENT_BUFFER_FILLED)
2295 for (i = 0; i < num_pages; i++, index++) {
2296 p = find_lock_page(mapping, index);
2300 set_page_extent_mapped(p);
2301 mark_page_accessed(p);
2305 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2308 set_page_private(p, EXTENT_PAGE_PRIVATE);
2311 if (!PageUptodate(p))
2316 eb->flags |= EXTENT_UPTODATE;
2317 eb->flags |= EXTENT_BUFFER_FILLED;
2320 spin_lock(&tree->lru_lock);
2322 spin_unlock(&tree->lru_lock);
2325 spin_lock(&tree->lru_lock);
2326 list_del_init(&eb->lru);
2327 spin_unlock(&tree->lru_lock);
2328 if (!atomic_dec_and_test(&eb->refs))
2330 for (index = 1; index < i; index++) {
2331 page_cache_release(extent_buffer_page(eb, index));
2334 page_cache_release(extent_buffer_page(eb, 0));
2335 __free_extent_buffer(eb);
2338 EXPORT_SYMBOL(find_extent_buffer);
2340 void free_extent_buffer(struct extent_buffer *eb)
2343 unsigned long num_pages;
2348 if (!atomic_dec_and_test(&eb->refs))
2351 WARN_ON(!list_empty(&eb->lru));
2352 num_pages = num_extent_pages(eb->start, eb->len);
2354 for (i = 1; i < num_pages; i++) {
2355 page_cache_release(extent_buffer_page(eb, i));
2357 page_cache_release(extent_buffer_page(eb, 0));
2358 __free_extent_buffer(eb);
2360 EXPORT_SYMBOL(free_extent_buffer);
2362 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2363 struct extent_buffer *eb)
2367 unsigned long num_pages;
2370 u64 start = eb->start;
2371 u64 end = start + eb->len - 1;
2373 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2374 num_pages = num_extent_pages(eb->start, eb->len);
2376 for (i = 0; i < num_pages; i++) {
2377 page = extent_buffer_page(eb, i);
2380 * if we're on the last page or the first page and the
2381 * block isn't aligned on a page boundary, do extra checks
2382 * to make sure we don't clean page that is partially dirty
2384 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2385 ((i == num_pages - 1) &&
2386 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2387 start = (u64)page->index << PAGE_CACHE_SHIFT;
2388 end = start + PAGE_CACHE_SIZE - 1;
2389 if (test_range_bit(tree, start, end,
2395 clear_page_dirty_for_io(page);
2400 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2402 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2403 struct extent_buffer *eb)
2405 return wait_on_extent_writeback(tree, eb->start,
2406 eb->start + eb->len - 1);
2408 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2410 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2411 struct extent_buffer *eb)
2414 unsigned long num_pages;
2416 num_pages = num_extent_pages(eb->start, eb->len);
2417 for (i = 0; i < num_pages; i++) {
2418 struct page *page = extent_buffer_page(eb, i);
2419 /* writepage may need to do something special for the
2420 * first page, we have to make sure page->private is
2421 * properly set. releasepage may drop page->private
2422 * on us if the page isn't already dirty.
2426 set_page_private(page,
2427 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2430 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2434 return set_extent_dirty(tree, eb->start,
2435 eb->start + eb->len - 1, GFP_NOFS);
2437 EXPORT_SYMBOL(set_extent_buffer_dirty);
2439 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2440 struct extent_buffer *eb)
2444 unsigned long num_pages;
2446 num_pages = num_extent_pages(eb->start, eb->len);
2448 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2450 for (i = 0; i < num_pages; i++) {
2451 page = extent_buffer_page(eb, i);
2452 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2453 ((i == num_pages - 1) &&
2454 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2455 check_page_uptodate(tree, page);
2458 SetPageUptodate(page);
2462 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2464 int extent_buffer_uptodate(struct extent_map_tree *tree,
2465 struct extent_buffer *eb)
2467 if (eb->flags & EXTENT_UPTODATE)
2469 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2470 EXTENT_UPTODATE, 1);
2472 EXPORT_SYMBOL(extent_buffer_uptodate);
2474 int read_extent_buffer_pages(struct extent_map_tree *tree,
2475 struct extent_buffer *eb,
2480 unsigned long start_i;
2484 unsigned long num_pages;
2486 if (eb->flags & EXTENT_UPTODATE)
2489 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2490 EXTENT_UPTODATE, 1)) {
2495 WARN_ON(start < eb->start);
2496 start_i = (start >> PAGE_CACHE_SHIFT) -
2497 (eb->start >> PAGE_CACHE_SHIFT);
2502 num_pages = num_extent_pages(eb->start, eb->len);
2503 for (i = start_i; i < num_pages; i++) {
2504 page = extent_buffer_page(eb, i);
2505 if (PageUptodate(page)) {
2509 if (TestSetPageLocked(page)) {
2515 if (!PageUptodate(page)) {
2516 err = page->mapping->a_ops->readpage(NULL, page);
2529 for (i = start_i; i < num_pages; i++) {
2530 page = extent_buffer_page(eb, i);
2531 wait_on_page_locked(page);
2532 if (!PageUptodate(page)) {
2537 eb->flags |= EXTENT_UPTODATE;
2540 EXPORT_SYMBOL(read_extent_buffer_pages);
2542 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2543 unsigned long start,
2550 char *dst = (char *)dstv;
2551 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2552 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2553 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2555 WARN_ON(start > eb->len);
2556 WARN_ON(start + len > eb->start + eb->len);
2558 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2561 page = extent_buffer_page(eb, i);
2562 if (!PageUptodate(page)) {
2563 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2566 WARN_ON(!PageUptodate(page));
2568 cur = min(len, (PAGE_CACHE_SIZE - offset));
2569 kaddr = kmap_atomic(page, KM_USER1);
2570 memcpy(dst, kaddr + offset, cur);
2571 kunmap_atomic(kaddr, KM_USER1);
2579 EXPORT_SYMBOL(read_extent_buffer);
2581 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2582 unsigned long min_len, char **token, char **map,
2583 unsigned long *map_start,
2584 unsigned long *map_len, int km)
2586 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2589 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2590 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2591 unsigned long end_i = (start_offset + start + min_len - 1) >>
2598 offset = start_offset;
2602 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2604 if (start + min_len > eb->len) {
2605 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2609 p = extent_buffer_page(eb, i);
2610 WARN_ON(!PageUptodate(p));
2611 kaddr = kmap_atomic(p, km);
2613 *map = kaddr + offset;
2614 *map_len = PAGE_CACHE_SIZE - offset;
2617 EXPORT_SYMBOL(map_private_extent_buffer);
2619 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2620 unsigned long min_len,
2621 char **token, char **map,
2622 unsigned long *map_start,
2623 unsigned long *map_len, int km)
2627 if (eb->map_token) {
2628 unmap_extent_buffer(eb, eb->map_token, km);
2629 eb->map_token = NULL;
2632 err = map_private_extent_buffer(eb, start, min_len, token, map,
2633 map_start, map_len, km);
2635 eb->map_token = *token;
2637 eb->map_start = *map_start;
2638 eb->map_len = *map_len;
2642 EXPORT_SYMBOL(map_extent_buffer);
2644 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2646 kunmap_atomic(token, km);
2648 EXPORT_SYMBOL(unmap_extent_buffer);
2650 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2651 unsigned long start,
2658 char *ptr = (char *)ptrv;
2659 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2660 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2663 WARN_ON(start > eb->len);
2664 WARN_ON(start + len > eb->start + eb->len);
2666 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2669 page = extent_buffer_page(eb, i);
2670 WARN_ON(!PageUptodate(page));
2672 cur = min(len, (PAGE_CACHE_SIZE - offset));
2674 kaddr = kmap_atomic(page, KM_USER0);
2675 ret = memcmp(ptr, kaddr + offset, cur);
2676 kunmap_atomic(kaddr, KM_USER0);
2687 EXPORT_SYMBOL(memcmp_extent_buffer);
2689 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2690 unsigned long start, unsigned long len)
2696 char *src = (char *)srcv;
2697 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2698 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2700 WARN_ON(start > eb->len);
2701 WARN_ON(start + len > eb->start + eb->len);
2703 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2706 page = extent_buffer_page(eb, i);
2707 WARN_ON(!PageUptodate(page));
2709 cur = min(len, PAGE_CACHE_SIZE - offset);
2710 kaddr = kmap_atomic(page, KM_USER1);
2711 memcpy(kaddr + offset, src, cur);
2712 kunmap_atomic(kaddr, KM_USER1);
2720 EXPORT_SYMBOL(write_extent_buffer);
2722 void memset_extent_buffer(struct extent_buffer *eb, char c,
2723 unsigned long start, unsigned long len)
2729 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2730 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2732 WARN_ON(start > eb->len);
2733 WARN_ON(start + len > eb->start + eb->len);
2735 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2738 page = extent_buffer_page(eb, i);
2739 WARN_ON(!PageUptodate(page));
2741 cur = min(len, PAGE_CACHE_SIZE - offset);
2742 kaddr = kmap_atomic(page, KM_USER0);
2743 memset(kaddr + offset, c, cur);
2744 kunmap_atomic(kaddr, KM_USER0);
2751 EXPORT_SYMBOL(memset_extent_buffer);
2753 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2754 unsigned long dst_offset, unsigned long src_offset,
2757 u64 dst_len = dst->len;
2762 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2763 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2765 WARN_ON(src->len != dst_len);
2767 offset = (start_offset + dst_offset) &
2768 ((unsigned long)PAGE_CACHE_SIZE - 1);
2771 page = extent_buffer_page(dst, i);
2772 WARN_ON(!PageUptodate(page));
2774 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2776 kaddr = kmap_atomic(page, KM_USER0);
2777 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2778 kunmap_atomic(kaddr, KM_USER0);
2786 EXPORT_SYMBOL(copy_extent_buffer);
2788 static void move_pages(struct page *dst_page, struct page *src_page,
2789 unsigned long dst_off, unsigned long src_off,
2792 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2793 if (dst_page == src_page) {
2794 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2796 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2797 char *p = dst_kaddr + dst_off + len;
2798 char *s = src_kaddr + src_off + len;
2803 kunmap_atomic(src_kaddr, KM_USER1);
2805 kunmap_atomic(dst_kaddr, KM_USER0);
2808 static void copy_pages(struct page *dst_page, struct page *src_page,
2809 unsigned long dst_off, unsigned long src_off,
2812 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2815 if (dst_page != src_page)
2816 src_kaddr = kmap_atomic(src_page, KM_USER1);
2818 src_kaddr = dst_kaddr;
2820 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2821 kunmap_atomic(dst_kaddr, KM_USER0);
2822 if (dst_page != src_page)
2823 kunmap_atomic(src_kaddr, KM_USER1);
2826 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2827 unsigned long src_offset, unsigned long len)
2830 size_t dst_off_in_page;
2831 size_t src_off_in_page;
2832 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2833 unsigned long dst_i;
2834 unsigned long src_i;
2836 if (src_offset + len > dst->len) {
2837 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2838 src_offset, len, dst->len);
2841 if (dst_offset + len > dst->len) {
2842 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2843 dst_offset, len, dst->len);
2848 dst_off_in_page = (start_offset + dst_offset) &
2849 ((unsigned long)PAGE_CACHE_SIZE - 1);
2850 src_off_in_page = (start_offset + src_offset) &
2851 ((unsigned long)PAGE_CACHE_SIZE - 1);
2853 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2854 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2856 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2858 cur = min_t(unsigned long, cur,
2859 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
2861 copy_pages(extent_buffer_page(dst, dst_i),
2862 extent_buffer_page(dst, src_i),
2863 dst_off_in_page, src_off_in_page, cur);
2870 EXPORT_SYMBOL(memcpy_extent_buffer);
2872 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2873 unsigned long src_offset, unsigned long len)
2876 size_t dst_off_in_page;
2877 size_t src_off_in_page;
2878 unsigned long dst_end = dst_offset + len - 1;
2879 unsigned long src_end = src_offset + len - 1;
2880 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2881 unsigned long dst_i;
2882 unsigned long src_i;
2884 if (src_offset + len > dst->len) {
2885 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2886 src_offset, len, dst->len);
2889 if (dst_offset + len > dst->len) {
2890 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2891 dst_offset, len, dst->len);
2894 if (dst_offset < src_offset) {
2895 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2899 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2900 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2902 dst_off_in_page = (start_offset + dst_end) &
2903 ((unsigned long)PAGE_CACHE_SIZE - 1);
2904 src_off_in_page = (start_offset + src_end) &
2905 ((unsigned long)PAGE_CACHE_SIZE - 1);
2907 cur = min_t(unsigned long, len, src_off_in_page + 1);
2908 cur = min(cur, dst_off_in_page + 1);
2909 move_pages(extent_buffer_page(dst, dst_i),
2910 extent_buffer_page(dst, src_i),
2911 dst_off_in_page - cur + 1,
2912 src_off_in_page - cur + 1, cur);
2919 EXPORT_SYMBOL(memmove_extent_buffer);