Btrfs: fix enospc when there is plenty of space
[linux-2.6-block.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include "extent_io.h"
16#include "extent_map.h"
2db04966 17#include "compat.h"
902b22f3
DW
18#include "ctree.h"
19#include "btrfs_inode.h"
d1310b2e
CM
20
21/* temporary define until extent_map moves out of btrfs */
22struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
25 unsigned long));
26
27static struct kmem_cache *extent_state_cache;
28static struct kmem_cache *extent_buffer_cache;
29
30static LIST_HEAD(buffers);
31static LIST_HEAD(states);
4bef0848 32
c8b97818 33#define LEAK_DEBUG 1
4bef0848 34#ifdef LEAK_DEBUG
2d2ae547 35static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
4bef0848 36#endif
d1310b2e 37
d1310b2e
CM
38#define BUFFER_LRU_MAX 64
39
40struct tree_entry {
41 u64 start;
42 u64 end;
d1310b2e
CM
43 struct rb_node rb_node;
44};
45
46struct extent_page_data {
47 struct bio *bio;
48 struct extent_io_tree *tree;
49 get_extent_t *get_extent;
50};
51
52int __init extent_io_init(void)
53{
54 extent_state_cache = btrfs_cache_create("extent_state",
55 sizeof(struct extent_state), 0,
56 NULL);
57 if (!extent_state_cache)
58 return -ENOMEM;
59
60 extent_buffer_cache = btrfs_cache_create("extent_buffers",
61 sizeof(struct extent_buffer), 0,
62 NULL);
63 if (!extent_buffer_cache)
64 goto free_state_cache;
65 return 0;
66
67free_state_cache:
68 kmem_cache_destroy(extent_state_cache);
69 return -ENOMEM;
70}
71
72void extent_io_exit(void)
73{
74 struct extent_state *state;
2d2ae547 75 struct extent_buffer *eb;
d1310b2e
CM
76
77 while (!list_empty(&states)) {
2d2ae547 78 state = list_entry(states.next, struct extent_state, leak_list);
70dec807 79 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
2d2ae547 80 list_del(&state->leak_list);
d1310b2e
CM
81 kmem_cache_free(extent_state_cache, state);
82
83 }
84
2d2ae547
CM
85 while (!list_empty(&buffers)) {
86 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
87 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
88 list_del(&eb->leak_list);
89 kmem_cache_free(extent_buffer_cache, eb);
90 }
d1310b2e
CM
91 if (extent_state_cache)
92 kmem_cache_destroy(extent_state_cache);
93 if (extent_buffer_cache)
94 kmem_cache_destroy(extent_buffer_cache);
95}
96
97void extent_io_tree_init(struct extent_io_tree *tree,
98 struct address_space *mapping, gfp_t mask)
99{
100 tree->state.rb_node = NULL;
6af118ce 101 tree->buffer.rb_node = NULL;
d1310b2e
CM
102 tree->ops = NULL;
103 tree->dirty_bytes = 0;
70dec807 104 spin_lock_init(&tree->lock);
6af118ce 105 spin_lock_init(&tree->buffer_lock);
d1310b2e 106 tree->mapping = mapping;
d1310b2e
CM
107}
108EXPORT_SYMBOL(extent_io_tree_init);
109
d1310b2e
CM
110struct extent_state *alloc_extent_state(gfp_t mask)
111{
112 struct extent_state *state;
4bef0848 113#ifdef LEAK_DEBUG
2d2ae547 114 unsigned long flags;
4bef0848 115#endif
d1310b2e
CM
116
117 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 118 if (!state)
d1310b2e
CM
119 return state;
120 state->state = 0;
d1310b2e 121 state->private = 0;
70dec807 122 state->tree = NULL;
4bef0848 123#ifdef LEAK_DEBUG
2d2ae547
CM
124 spin_lock_irqsave(&leak_lock, flags);
125 list_add(&state->leak_list, &states);
126 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 127#endif
d1310b2e
CM
128 atomic_set(&state->refs, 1);
129 init_waitqueue_head(&state->wq);
130 return state;
131}
132EXPORT_SYMBOL(alloc_extent_state);
133
134void free_extent_state(struct extent_state *state)
135{
d1310b2e
CM
136 if (!state)
137 return;
138 if (atomic_dec_and_test(&state->refs)) {
4bef0848 139#ifdef LEAK_DEBUG
2d2ae547 140 unsigned long flags;
4bef0848 141#endif
70dec807 142 WARN_ON(state->tree);
4bef0848 143#ifdef LEAK_DEBUG
2d2ae547
CM
144 spin_lock_irqsave(&leak_lock, flags);
145 list_del(&state->leak_list);
146 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 147#endif
d1310b2e
CM
148 kmem_cache_free(extent_state_cache, state);
149 }
150}
151EXPORT_SYMBOL(free_extent_state);
152
153static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
154 struct rb_node *node)
155{
156 struct rb_node ** p = &root->rb_node;
157 struct rb_node * parent = NULL;
158 struct tree_entry *entry;
159
160 while(*p) {
161 parent = *p;
162 entry = rb_entry(parent, struct tree_entry, rb_node);
163
164 if (offset < entry->start)
165 p = &(*p)->rb_left;
166 else if (offset > entry->end)
167 p = &(*p)->rb_right;
168 else
169 return parent;
170 }
171
172 entry = rb_entry(node, struct tree_entry, rb_node);
d1310b2e
CM
173 rb_link_node(node, parent, p);
174 rb_insert_color(node, root);
175 return NULL;
176}
177
80ea96b1 178static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
179 struct rb_node **prev_ret,
180 struct rb_node **next_ret)
181{
80ea96b1 182 struct rb_root *root = &tree->state;
d1310b2e
CM
183 struct rb_node * n = root->rb_node;
184 struct rb_node *prev = NULL;
185 struct rb_node *orig_prev = NULL;
186 struct tree_entry *entry;
187 struct tree_entry *prev_entry = NULL;
188
189 while(n) {
190 entry = rb_entry(n, struct tree_entry, rb_node);
191 prev = n;
192 prev_entry = entry;
193
194 if (offset < entry->start)
195 n = n->rb_left;
196 else if (offset > entry->end)
197 n = n->rb_right;
80ea96b1 198 else {
d1310b2e 199 return n;
80ea96b1 200 }
d1310b2e
CM
201 }
202
203 if (prev_ret) {
204 orig_prev = prev;
205 while(prev && offset > prev_entry->end) {
206 prev = rb_next(prev);
207 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
208 }
209 *prev_ret = prev;
210 prev = orig_prev;
211 }
212
213 if (next_ret) {
214 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
215 while(prev && offset < prev_entry->start) {
216 prev = rb_prev(prev);
217 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
218 }
219 *next_ret = prev;
220 }
221 return NULL;
222}
223
80ea96b1
CM
224static inline struct rb_node *tree_search(struct extent_io_tree *tree,
225 u64 offset)
d1310b2e 226{
70dec807 227 struct rb_node *prev = NULL;
d1310b2e 228 struct rb_node *ret;
70dec807 229
80ea96b1
CM
230 ret = __etree_search(tree, offset, &prev, NULL);
231 if (!ret) {
d1310b2e 232 return prev;
80ea96b1 233 }
d1310b2e
CM
234 return ret;
235}
236
6af118ce
CM
237static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
238 u64 offset, struct rb_node *node)
239{
240 struct rb_root *root = &tree->buffer;
241 struct rb_node ** p = &root->rb_node;
242 struct rb_node * parent = NULL;
243 struct extent_buffer *eb;
244
245 while(*p) {
246 parent = *p;
247 eb = rb_entry(parent, struct extent_buffer, rb_node);
248
249 if (offset < eb->start)
250 p = &(*p)->rb_left;
251 else if (offset > eb->start)
252 p = &(*p)->rb_right;
253 else
254 return eb;
255 }
256
257 rb_link_node(node, parent, p);
258 rb_insert_color(node, root);
259 return NULL;
260}
261
262static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
263 u64 offset)
264{
265 struct rb_root *root = &tree->buffer;
266 struct rb_node * n = root->rb_node;
267 struct extent_buffer *eb;
268
269 while(n) {
270 eb = rb_entry(n, struct extent_buffer, rb_node);
271 if (offset < eb->start)
272 n = n->rb_left;
273 else if (offset > eb->start)
274 n = n->rb_right;
275 else
276 return eb;
277 }
278 return NULL;
279}
280
d1310b2e
CM
281/*
282 * utility function to look for merge candidates inside a given range.
283 * Any extents with matching state are merged together into a single
284 * extent in the tree. Extents with EXTENT_IO in their state field
285 * are not merged because the end_io handlers need to be able to do
286 * operations on them without sleeping (or doing allocations/splits).
287 *
288 * This should be called with the tree lock held.
289 */
290static int merge_state(struct extent_io_tree *tree,
291 struct extent_state *state)
292{
293 struct extent_state *other;
294 struct rb_node *other_node;
295
5b21f2ed 296 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
d1310b2e
CM
297 return 0;
298
299 other_node = rb_prev(&state->rb_node);
300 if (other_node) {
301 other = rb_entry(other_node, struct extent_state, rb_node);
302 if (other->end == state->start - 1 &&
303 other->state == state->state) {
304 state->start = other->start;
70dec807 305 other->tree = NULL;
d1310b2e
CM
306 rb_erase(&other->rb_node, &tree->state);
307 free_extent_state(other);
308 }
309 }
310 other_node = rb_next(&state->rb_node);
311 if (other_node) {
312 other = rb_entry(other_node, struct extent_state, rb_node);
313 if (other->start == state->end + 1 &&
314 other->state == state->state) {
315 other->start = state->start;
70dec807 316 state->tree = NULL;
d1310b2e
CM
317 rb_erase(&state->rb_node, &tree->state);
318 free_extent_state(state);
319 }
320 }
321 return 0;
322}
323
291d673e
CM
324static void set_state_cb(struct extent_io_tree *tree,
325 struct extent_state *state,
326 unsigned long bits)
327{
328 if (tree->ops && tree->ops->set_bit_hook) {
329 tree->ops->set_bit_hook(tree->mapping->host, state->start,
b0c68f8b 330 state->end, state->state, bits);
291d673e
CM
331 }
332}
333
334static void clear_state_cb(struct extent_io_tree *tree,
335 struct extent_state *state,
336 unsigned long bits)
337{
338 if (tree->ops && tree->ops->set_bit_hook) {
339 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
b0c68f8b 340 state->end, state->state, bits);
291d673e
CM
341 }
342}
343
d1310b2e
CM
344/*
345 * insert an extent_state struct into the tree. 'bits' are set on the
346 * struct before it is inserted.
347 *
348 * This may return -EEXIST if the extent is already there, in which case the
349 * state struct is freed.
350 *
351 * The tree lock is not taken internally. This is a utility function and
352 * probably isn't what you want to call (see set/clear_extent_bit).
353 */
354static int insert_state(struct extent_io_tree *tree,
355 struct extent_state *state, u64 start, u64 end,
356 int bits)
357{
358 struct rb_node *node;
359
360 if (end < start) {
361 printk("end < start %Lu %Lu\n", end, start);
362 WARN_ON(1);
363 }
364 if (bits & EXTENT_DIRTY)
365 tree->dirty_bytes += end - start + 1;
b0c68f8b 366 set_state_cb(tree, state, bits);
d1310b2e
CM
367 state->state |= bits;
368 state->start = start;
369 state->end = end;
370 node = tree_insert(&tree->state, end, &state->rb_node);
371 if (node) {
372 struct extent_state *found;
373 found = rb_entry(node, struct extent_state, rb_node);
374 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
375 free_extent_state(state);
376 return -EEXIST;
377 }
70dec807 378 state->tree = tree;
d1310b2e
CM
379 merge_state(tree, state);
380 return 0;
381}
382
383/*
384 * split a given extent state struct in two, inserting the preallocated
385 * struct 'prealloc' as the newly created second half. 'split' indicates an
386 * offset inside 'orig' where it should be split.
387 *
388 * Before calling,
389 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
390 * are two extent state structs in the tree:
391 * prealloc: [orig->start, split - 1]
392 * orig: [ split, orig->end ]
393 *
394 * The tree locks are not taken by this function. They need to be held
395 * by the caller.
396 */
397static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
398 struct extent_state *prealloc, u64 split)
399{
400 struct rb_node *node;
401 prealloc->start = orig->start;
402 prealloc->end = split - 1;
403 prealloc->state = orig->state;
404 orig->start = split;
405
406 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
407 if (node) {
408 struct extent_state *found;
409 found = rb_entry(node, struct extent_state, rb_node);
410 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
411 free_extent_state(prealloc);
412 return -EEXIST;
413 }
70dec807 414 prealloc->tree = tree;
d1310b2e
CM
415 return 0;
416}
417
418/*
419 * utility function to clear some bits in an extent state struct.
420 * it will optionally wake up any one waiting on this state (wake == 1), or
421 * forcibly remove the state from the tree (delete == 1).
422 *
423 * If no bits are set on the state struct after clearing things, the
424 * struct is freed and removed from the tree
425 */
426static int clear_state_bit(struct extent_io_tree *tree,
427 struct extent_state *state, int bits, int wake,
428 int delete)
429{
430 int ret = state->state & bits;
431
432 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
433 u64 range = state->end - state->start + 1;
434 WARN_ON(range > tree->dirty_bytes);
435 tree->dirty_bytes -= range;
436 }
291d673e 437 clear_state_cb(tree, state, bits);
b0c68f8b 438 state->state &= ~bits;
d1310b2e
CM
439 if (wake)
440 wake_up(&state->wq);
441 if (delete || state->state == 0) {
70dec807 442 if (state->tree) {
ae9d1285 443 clear_state_cb(tree, state, state->state);
d1310b2e 444 rb_erase(&state->rb_node, &tree->state);
70dec807 445 state->tree = NULL;
d1310b2e
CM
446 free_extent_state(state);
447 } else {
448 WARN_ON(1);
449 }
450 } else {
451 merge_state(tree, state);
452 }
453 return ret;
454}
455
456/*
457 * clear some bits on a range in the tree. This may require splitting
458 * or inserting elements in the tree, so the gfp mask is used to
459 * indicate which allocations or sleeping are allowed.
460 *
461 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
462 * the given range from the tree regardless of state (ie for truncate).
463 *
464 * the range [start, end] is inclusive.
465 *
466 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
467 * bits were already set, or zero if none of the bits were already set.
468 */
469int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
470 int bits, int wake, int delete, gfp_t mask)
471{
472 struct extent_state *state;
473 struct extent_state *prealloc = NULL;
474 struct rb_node *node;
475 unsigned long flags;
476 int err;
477 int set = 0;
478
479again:
480 if (!prealloc && (mask & __GFP_WAIT)) {
481 prealloc = alloc_extent_state(mask);
482 if (!prealloc)
483 return -ENOMEM;
484 }
485
70dec807 486 spin_lock_irqsave(&tree->lock, flags);
d1310b2e
CM
487 /*
488 * this search will find the extents that end after
489 * our range starts
490 */
80ea96b1 491 node = tree_search(tree, start);
d1310b2e
CM
492 if (!node)
493 goto out;
494 state = rb_entry(node, struct extent_state, rb_node);
495 if (state->start > end)
496 goto out;
497 WARN_ON(state->end < start);
498
499 /*
500 * | ---- desired range ---- |
501 * | state | or
502 * | ------------- state -------------- |
503 *
504 * We need to split the extent we found, and may flip
505 * bits on second half.
506 *
507 * If the extent we found extends past our range, we
508 * just split and search again. It'll get split again
509 * the next time though.
510 *
511 * If the extent we found is inside our range, we clear
512 * the desired bit on it.
513 */
514
515 if (state->start < start) {
70dec807
CM
516 if (!prealloc)
517 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
518 err = split_state(tree, state, prealloc, start);
519 BUG_ON(err == -EEXIST);
520 prealloc = NULL;
521 if (err)
522 goto out;
523 if (state->end <= end) {
524 start = state->end + 1;
525 set |= clear_state_bit(tree, state, bits,
526 wake, delete);
527 } else {
528 start = state->start;
529 }
530 goto search_again;
531 }
532 /*
533 * | ---- desired range ---- |
534 * | state |
535 * We need to split the extent, and clear the bit
536 * on the first half
537 */
538 if (state->start <= end && state->end > end) {
70dec807
CM
539 if (!prealloc)
540 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
541 err = split_state(tree, state, prealloc, end + 1);
542 BUG_ON(err == -EEXIST);
543
544 if (wake)
545 wake_up(&state->wq);
546 set |= clear_state_bit(tree, prealloc, bits,
547 wake, delete);
548 prealloc = NULL;
549 goto out;
550 }
551
552 start = state->end + 1;
553 set |= clear_state_bit(tree, state, bits, wake, delete);
554 goto search_again;
555
556out:
70dec807 557 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
558 if (prealloc)
559 free_extent_state(prealloc);
560
561 return set;
562
563search_again:
564 if (start > end)
565 goto out;
70dec807 566 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
567 if (mask & __GFP_WAIT)
568 cond_resched();
569 goto again;
570}
571EXPORT_SYMBOL(clear_extent_bit);
572
573static int wait_on_state(struct extent_io_tree *tree,
574 struct extent_state *state)
575{
576 DEFINE_WAIT(wait);
577 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
70dec807 578 spin_unlock_irq(&tree->lock);
d1310b2e 579 schedule();
70dec807 580 spin_lock_irq(&tree->lock);
d1310b2e
CM
581 finish_wait(&state->wq, &wait);
582 return 0;
583}
584
585/*
586 * waits for one or more bits to clear on a range in the state tree.
587 * The range [start, end] is inclusive.
588 * The tree lock is taken by this function
589 */
590int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
591{
592 struct extent_state *state;
593 struct rb_node *node;
594
70dec807 595 spin_lock_irq(&tree->lock);
d1310b2e
CM
596again:
597 while (1) {
598 /*
599 * this search will find all the extents that end after
600 * our range starts
601 */
80ea96b1 602 node = tree_search(tree, start);
d1310b2e
CM
603 if (!node)
604 break;
605
606 state = rb_entry(node, struct extent_state, rb_node);
607
608 if (state->start > end)
609 goto out;
610
611 if (state->state & bits) {
612 start = state->start;
613 atomic_inc(&state->refs);
614 wait_on_state(tree, state);
615 free_extent_state(state);
616 goto again;
617 }
618 start = state->end + 1;
619
620 if (start > end)
621 break;
622
623 if (need_resched()) {
70dec807 624 spin_unlock_irq(&tree->lock);
d1310b2e 625 cond_resched();
70dec807 626 spin_lock_irq(&tree->lock);
d1310b2e
CM
627 }
628 }
629out:
70dec807 630 spin_unlock_irq(&tree->lock);
d1310b2e
CM
631 return 0;
632}
633EXPORT_SYMBOL(wait_extent_bit);
634
635static void set_state_bits(struct extent_io_tree *tree,
636 struct extent_state *state,
637 int bits)
638{
639 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
640 u64 range = state->end - state->start + 1;
641 tree->dirty_bytes += range;
642 }
291d673e 643 set_state_cb(tree, state, bits);
b0c68f8b 644 state->state |= bits;
d1310b2e
CM
645}
646
647/*
648 * set some bits on a range in the tree. This may require allocations
649 * or sleeping, so the gfp mask is used to indicate what is allowed.
650 *
651 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
652 * range already has the desired bits set. The start of the existing
653 * range is returned in failed_start in this case.
654 *
655 * [start, end] is inclusive
656 * This takes the tree lock.
657 */
658int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
659 int exclusive, u64 *failed_start, gfp_t mask)
660{
661 struct extent_state *state;
662 struct extent_state *prealloc = NULL;
663 struct rb_node *node;
664 unsigned long flags;
665 int err = 0;
666 int set;
667 u64 last_start;
668 u64 last_end;
669again:
670 if (!prealloc && (mask & __GFP_WAIT)) {
671 prealloc = alloc_extent_state(mask);
672 if (!prealloc)
673 return -ENOMEM;
674 }
675
70dec807 676 spin_lock_irqsave(&tree->lock, flags);
d1310b2e
CM
677 /*
678 * this search will find all the extents that end after
679 * our range starts.
680 */
80ea96b1 681 node = tree_search(tree, start);
d1310b2e
CM
682 if (!node) {
683 err = insert_state(tree, prealloc, start, end, bits);
684 prealloc = NULL;
685 BUG_ON(err == -EEXIST);
686 goto out;
687 }
688
689 state = rb_entry(node, struct extent_state, rb_node);
690 last_start = state->start;
691 last_end = state->end;
692
693 /*
694 * | ---- desired range ---- |
695 * | state |
696 *
697 * Just lock what we found and keep going
698 */
699 if (state->start == start && state->end <= end) {
700 set = state->state & bits;
701 if (set && exclusive) {
702 *failed_start = state->start;
703 err = -EEXIST;
704 goto out;
705 }
706 set_state_bits(tree, state, bits);
707 start = state->end + 1;
708 merge_state(tree, state);
709 goto search_again;
710 }
711
712 /*
713 * | ---- desired range ---- |
714 * | state |
715 * or
716 * | ------------- state -------------- |
717 *
718 * We need to split the extent we found, and may flip bits on
719 * second half.
720 *
721 * If the extent we found extends past our
722 * range, we just split and search again. It'll get split
723 * again the next time though.
724 *
725 * If the extent we found is inside our range, we set the
726 * desired bit on it.
727 */
728 if (state->start < start) {
729 set = state->state & bits;
730 if (exclusive && set) {
731 *failed_start = start;
732 err = -EEXIST;
733 goto out;
734 }
735 err = split_state(tree, state, prealloc, start);
736 BUG_ON(err == -EEXIST);
737 prealloc = NULL;
738 if (err)
739 goto out;
740 if (state->end <= end) {
741 set_state_bits(tree, state, bits);
742 start = state->end + 1;
743 merge_state(tree, state);
744 } else {
745 start = state->start;
746 }
747 goto search_again;
748 }
749 /*
750 * | ---- desired range ---- |
751 * | state | or | state |
752 *
753 * There's a hole, we need to insert something in it and
754 * ignore the extent we found.
755 */
756 if (state->start > start) {
757 u64 this_end;
758 if (end < last_start)
759 this_end = end;
760 else
761 this_end = last_start -1;
762 err = insert_state(tree, prealloc, start, this_end,
763 bits);
764 prealloc = NULL;
765 BUG_ON(err == -EEXIST);
766 if (err)
767 goto out;
768 start = this_end + 1;
769 goto search_again;
770 }
771 /*
772 * | ---- desired range ---- |
773 * | state |
774 * We need to split the extent, and set the bit
775 * on the first half
776 */
777 if (state->start <= end && state->end > end) {
778 set = state->state & bits;
779 if (exclusive && set) {
780 *failed_start = start;
781 err = -EEXIST;
782 goto out;
783 }
784 err = split_state(tree, state, prealloc, end + 1);
785 BUG_ON(err == -EEXIST);
786
787 set_state_bits(tree, prealloc, bits);
788 merge_state(tree, prealloc);
789 prealloc = NULL;
790 goto out;
791 }
792
793 goto search_again;
794
795out:
70dec807 796 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
797 if (prealloc)
798 free_extent_state(prealloc);
799
800 return err;
801
802search_again:
803 if (start > end)
804 goto out;
70dec807 805 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
806 if (mask & __GFP_WAIT)
807 cond_resched();
808 goto again;
809}
810EXPORT_SYMBOL(set_extent_bit);
811
812/* wrappers around set/clear extent bit */
813int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
814 gfp_t mask)
815{
816 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
817 mask);
818}
819EXPORT_SYMBOL(set_extent_dirty);
820
e6dcd2dc
CM
821int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
822 gfp_t mask)
823{
824 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
825}
826EXPORT_SYMBOL(set_extent_ordered);
827
d1310b2e
CM
828int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
829 int bits, gfp_t mask)
830{
831 return set_extent_bit(tree, start, end, bits, 0, NULL,
832 mask);
833}
834EXPORT_SYMBOL(set_extent_bits);
835
836int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
837 int bits, gfp_t mask)
838{
839 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
840}
841EXPORT_SYMBOL(clear_extent_bits);
842
843int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
844 gfp_t mask)
845{
846 return set_extent_bit(tree, start, end,
e6dcd2dc
CM
847 EXTENT_DELALLOC | EXTENT_DIRTY,
848 0, NULL, mask);
d1310b2e
CM
849}
850EXPORT_SYMBOL(set_extent_delalloc);
851
852int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
853 gfp_t mask)
854{
855 return clear_extent_bit(tree, start, end,
856 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
857}
858EXPORT_SYMBOL(clear_extent_dirty);
859
e6dcd2dc
CM
860int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
861 gfp_t mask)
862{
863 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
864}
865EXPORT_SYMBOL(clear_extent_ordered);
866
d1310b2e
CM
867int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
868 gfp_t mask)
869{
870 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
871 mask);
872}
873EXPORT_SYMBOL(set_extent_new);
874
875int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
876 gfp_t mask)
877{
878 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
879}
880EXPORT_SYMBOL(clear_extent_new);
881
882int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
883 gfp_t mask)
884{
885 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
886 mask);
887}
888EXPORT_SYMBOL(set_extent_uptodate);
889
890int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
891 gfp_t mask)
892{
893 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
894}
895EXPORT_SYMBOL(clear_extent_uptodate);
896
897int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
898 gfp_t mask)
899{
900 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
901 0, NULL, mask);
902}
903EXPORT_SYMBOL(set_extent_writeback);
904
905int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
906 gfp_t mask)
907{
908 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
909}
910EXPORT_SYMBOL(clear_extent_writeback);
911
912int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
913{
914 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
915}
916EXPORT_SYMBOL(wait_on_extent_writeback);
917
d352ac68
CM
918/*
919 * either insert or lock state struct between start and end use mask to tell
920 * us if waiting is desired.
921 */
d1310b2e
CM
922int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
923{
924 int err;
925 u64 failed_start;
926 while (1) {
927 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
928 &failed_start, mask);
929 if (err == -EEXIST && (mask & __GFP_WAIT)) {
930 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
931 start = failed_start;
932 } else {
933 break;
934 }
935 WARN_ON(start > end);
936 }
937 return err;
938}
939EXPORT_SYMBOL(lock_extent);
940
941int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
942 gfp_t mask)
943{
944 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
945}
946EXPORT_SYMBOL(unlock_extent);
947
948/*
949 * helper function to set pages and extents in the tree dirty
950 */
951int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
952{
953 unsigned long index = start >> PAGE_CACHE_SHIFT;
954 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
955 struct page *page;
956
957 while (index <= end_index) {
958 page = find_get_page(tree->mapping, index);
959 BUG_ON(!page);
960 __set_page_dirty_nobuffers(page);
961 page_cache_release(page);
962 index++;
963 }
964 set_extent_dirty(tree, start, end, GFP_NOFS);
965 return 0;
966}
967EXPORT_SYMBOL(set_range_dirty);
968
969/*
970 * helper function to set both pages and extents in the tree writeback
971 */
972int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
973{
974 unsigned long index = start >> PAGE_CACHE_SHIFT;
975 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
976 struct page *page;
977
978 while (index <= end_index) {
979 page = find_get_page(tree->mapping, index);
980 BUG_ON(!page);
981 set_page_writeback(page);
982 page_cache_release(page);
983 index++;
984 }
985 set_extent_writeback(tree, start, end, GFP_NOFS);
986 return 0;
987}
988EXPORT_SYMBOL(set_range_writeback);
989
d352ac68
CM
990/*
991 * find the first offset in the io tree with 'bits' set. zero is
992 * returned if we find something, and *start_ret and *end_ret are
993 * set to reflect the state struct that was found.
994 *
995 * If nothing was found, 1 is returned, < 0 on error
996 */
d1310b2e
CM
997int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
998 u64 *start_ret, u64 *end_ret, int bits)
999{
1000 struct rb_node *node;
1001 struct extent_state *state;
1002 int ret = 1;
1003
70dec807 1004 spin_lock_irq(&tree->lock);
d1310b2e
CM
1005 /*
1006 * this search will find all the extents that end after
1007 * our range starts.
1008 */
80ea96b1 1009 node = tree_search(tree, start);
2b114d1d 1010 if (!node) {
d1310b2e
CM
1011 goto out;
1012 }
1013
1014 while(1) {
1015 state = rb_entry(node, struct extent_state, rb_node);
1016 if (state->end >= start && (state->state & bits)) {
1017 *start_ret = state->start;
1018 *end_ret = state->end;
1019 ret = 0;
1020 break;
1021 }
1022 node = rb_next(node);
1023 if (!node)
1024 break;
1025 }
1026out:
70dec807 1027 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1028 return ret;
1029}
1030EXPORT_SYMBOL(find_first_extent_bit);
1031
d352ac68
CM
1032/* find the first state struct with 'bits' set after 'start', and
1033 * return it. tree->lock must be held. NULL will returned if
1034 * nothing was found after 'start'
1035 */
d7fc640e
CM
1036struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1037 u64 start, int bits)
1038{
1039 struct rb_node *node;
1040 struct extent_state *state;
1041
1042 /*
1043 * this search will find all the extents that end after
1044 * our range starts.
1045 */
1046 node = tree_search(tree, start);
2b114d1d 1047 if (!node) {
d7fc640e
CM
1048 goto out;
1049 }
1050
1051 while(1) {
1052 state = rb_entry(node, struct extent_state, rb_node);
1053 if (state->end >= start && (state->state & bits)) {
1054 return state;
1055 }
1056 node = rb_next(node);
1057 if (!node)
1058 break;
1059 }
1060out:
1061 return NULL;
1062}
1063EXPORT_SYMBOL(find_first_extent_bit_state);
1064
d352ac68
CM
1065/*
1066 * find a contiguous range of bytes in the file marked as delalloc, not
1067 * more than 'max_bytes'. start and end are used to return the range,
1068 *
1069 * 1 is returned if we find something, 0 if nothing was in the tree
1070 */
c8b97818
CM
1071static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1072 u64 *start, u64 *end, u64 max_bytes)
d1310b2e
CM
1073{
1074 struct rb_node *node;
1075 struct extent_state *state;
1076 u64 cur_start = *start;
1077 u64 found = 0;
1078 u64 total_bytes = 0;
1079
70dec807 1080 spin_lock_irq(&tree->lock);
c8b97818 1081
d1310b2e
CM
1082 /*
1083 * this search will find all the extents that end after
1084 * our range starts.
1085 */
80ea96b1 1086 node = tree_search(tree, cur_start);
2b114d1d 1087 if (!node) {
3b951516
CM
1088 if (!found)
1089 *end = (u64)-1;
d1310b2e
CM
1090 goto out;
1091 }
1092
1093 while(1) {
1094 state = rb_entry(node, struct extent_state, rb_node);
5b21f2ed
ZY
1095 if (found && (state->start != cur_start ||
1096 (state->state & EXTENT_BOUNDARY))) {
d1310b2e
CM
1097 goto out;
1098 }
1099 if (!(state->state & EXTENT_DELALLOC)) {
1100 if (!found)
1101 *end = state->end;
1102 goto out;
1103 }
d1310b2e
CM
1104 if (!found)
1105 *start = state->start;
1106 found++;
1107 *end = state->end;
1108 cur_start = state->end + 1;
1109 node = rb_next(node);
1110 if (!node)
1111 break;
1112 total_bytes += state->end - state->start + 1;
1113 if (total_bytes >= max_bytes)
1114 break;
1115 }
1116out:
70dec807 1117 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1118 return found;
1119}
1120
c8b97818
CM
1121static noinline int __unlock_for_delalloc(struct inode *inode,
1122 struct page *locked_page,
1123 u64 start, u64 end)
1124{
1125 int ret;
1126 struct page *pages[16];
1127 unsigned long index = start >> PAGE_CACHE_SHIFT;
1128 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1129 unsigned long nr_pages = end_index - index + 1;
1130 int i;
1131
1132 if (index == locked_page->index && end_index == index)
1133 return 0;
1134
1135 while(nr_pages > 0) {
1136 ret = find_get_pages_contig(inode->i_mapping, index,
1137 min(nr_pages, ARRAY_SIZE(pages)), pages);
1138 for (i = 0; i < ret; i++) {
1139 if (pages[i] != locked_page)
1140 unlock_page(pages[i]);
1141 page_cache_release(pages[i]);
1142 }
1143 nr_pages -= ret;
1144 index += ret;
1145 cond_resched();
1146 }
1147 return 0;
1148}
1149
1150static noinline int lock_delalloc_pages(struct inode *inode,
1151 struct page *locked_page,
1152 u64 delalloc_start,
1153 u64 delalloc_end)
1154{
1155 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1156 unsigned long start_index = index;
1157 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1158 unsigned long pages_locked = 0;
1159 struct page *pages[16];
1160 unsigned long nrpages;
1161 int ret;
1162 int i;
1163
1164 /* the caller is responsible for locking the start index */
1165 if (index == locked_page->index && index == end_index)
1166 return 0;
1167
1168 /* skip the page at the start index */
1169 nrpages = end_index - index + 1;
1170 while(nrpages > 0) {
1171 ret = find_get_pages_contig(inode->i_mapping, index,
1172 min(nrpages, ARRAY_SIZE(pages)), pages);
1173 if (ret == 0) {
1174 ret = -EAGAIN;
1175 goto done;
1176 }
1177 /* now we have an array of pages, lock them all */
1178 for (i = 0; i < ret; i++) {
1179 /*
1180 * the caller is taking responsibility for
1181 * locked_page
1182 */
1183 if (pages[i] != locked_page)
1184 lock_page(pages[i]);
1185 page_cache_release(pages[i]);
1186 }
1187 pages_locked += ret;
1188 nrpages -= ret;
1189 index += ret;
1190 cond_resched();
1191 }
1192 ret = 0;
1193done:
1194 if (ret && pages_locked) {
1195 __unlock_for_delalloc(inode, locked_page,
1196 delalloc_start,
1197 ((u64)(start_index + pages_locked - 1)) <<
1198 PAGE_CACHE_SHIFT);
1199 }
1200 return ret;
1201}
1202
1203/*
1204 * find a contiguous range of bytes in the file marked as delalloc, not
1205 * more than 'max_bytes'. start and end are used to return the range,
1206 *
1207 * 1 is returned if we find something, 0 if nothing was in the tree
1208 */
1209static noinline u64 find_lock_delalloc_range(struct inode *inode,
1210 struct extent_io_tree *tree,
1211 struct page *locked_page,
1212 u64 *start, u64 *end,
1213 u64 max_bytes)
1214{
1215 u64 delalloc_start;
1216 u64 delalloc_end;
1217 u64 found;
1218 int ret;
1219 int loops = 0;
1220
1221again:
1222 /* step one, find a bunch of delalloc bytes starting at start */
1223 delalloc_start = *start;
1224 delalloc_end = 0;
1225 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1226 max_bytes);
1227 if (!found) {
1228 *start = delalloc_start;
1229 *end = delalloc_end;
1230 return found;
1231 }
1232
1233 /*
1234 * make sure to limit the number of pages we try to lock down
1235 * if we're looping.
1236 */
1237 if (delalloc_end + 1 - delalloc_start > max_bytes && loops) {
1238 delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) &
1239 ~((u64)PAGE_CACHE_SIZE - 1);
1240 }
1241 /* step two, lock all the pages after the page that has start */
1242 ret = lock_delalloc_pages(inode, locked_page,
1243 delalloc_start, delalloc_end);
1244 if (ret == -EAGAIN) {
1245 /* some of the pages are gone, lets avoid looping by
1246 * shortening the size of the delalloc range we're searching
1247 */
1248 if (!loops) {
1249 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1250 max_bytes = PAGE_CACHE_SIZE - offset;
1251 loops = 1;
1252 goto again;
1253 } else {
1254 found = 0;
1255 goto out_failed;
1256 }
1257 }
1258 BUG_ON(ret);
1259
1260 /* step three, lock the state bits for the whole range */
1261 lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1262
1263 /* then test to make sure it is all still delalloc */
1264 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1265 EXTENT_DELALLOC, 1);
1266 if (!ret) {
1267 unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1268 __unlock_for_delalloc(inode, locked_page,
1269 delalloc_start, delalloc_end);
1270 cond_resched();
1271 goto again;
1272 }
1273 *start = delalloc_start;
1274 *end = delalloc_end;
1275out_failed:
1276 return found;
1277}
1278
1279int extent_clear_unlock_delalloc(struct inode *inode,
1280 struct extent_io_tree *tree,
1281 u64 start, u64 end, struct page *locked_page,
1282 int clear_dirty, int set_writeback,
1283 int end_writeback)
1284{
1285 int ret;
1286 struct page *pages[16];
1287 unsigned long index = start >> PAGE_CACHE_SHIFT;
1288 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1289 unsigned long nr_pages = end_index - index + 1;
1290 int i;
1291 int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
1292
1293 if (clear_dirty)
1294 clear_bits |= EXTENT_DIRTY;
1295
1296 clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
1297
1298 while(nr_pages > 0) {
1299 ret = find_get_pages_contig(inode->i_mapping, index,
1300 min(nr_pages, ARRAY_SIZE(pages)), pages);
1301 for (i = 0; i < ret; i++) {
1302 if (pages[i] == locked_page) {
1303 page_cache_release(pages[i]);
1304 continue;
1305 }
1306 if (clear_dirty)
1307 clear_page_dirty_for_io(pages[i]);
1308 if (set_writeback)
1309 set_page_writeback(pages[i]);
1310 if (end_writeback)
1311 end_page_writeback(pages[i]);
1312 unlock_page(pages[i]);
1313 page_cache_release(pages[i]);
1314 }
1315 nr_pages -= ret;
1316 index += ret;
1317 cond_resched();
1318 }
1319 return 0;
1320}
1321EXPORT_SYMBOL(extent_clear_unlock_delalloc);
1322
d352ac68
CM
1323/*
1324 * count the number of bytes in the tree that have a given bit(s)
1325 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1326 * cached. The total number found is returned.
1327 */
d1310b2e
CM
1328u64 count_range_bits(struct extent_io_tree *tree,
1329 u64 *start, u64 search_end, u64 max_bytes,
1330 unsigned long bits)
1331{
1332 struct rb_node *node;
1333 struct extent_state *state;
1334 u64 cur_start = *start;
1335 u64 total_bytes = 0;
1336 int found = 0;
1337
1338 if (search_end <= cur_start) {
1339 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1340 WARN_ON(1);
1341 return 0;
1342 }
1343
70dec807 1344 spin_lock_irq(&tree->lock);
d1310b2e
CM
1345 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1346 total_bytes = tree->dirty_bytes;
1347 goto out;
1348 }
1349 /*
1350 * this search will find all the extents that end after
1351 * our range starts.
1352 */
80ea96b1 1353 node = tree_search(tree, cur_start);
2b114d1d 1354 if (!node) {
d1310b2e
CM
1355 goto out;
1356 }
1357
1358 while(1) {
1359 state = rb_entry(node, struct extent_state, rb_node);
1360 if (state->start > search_end)
1361 break;
1362 if (state->end >= cur_start && (state->state & bits)) {
1363 total_bytes += min(search_end, state->end) + 1 -
1364 max(cur_start, state->start);
1365 if (total_bytes >= max_bytes)
1366 break;
1367 if (!found) {
1368 *start = state->start;
1369 found = 1;
1370 }
1371 }
1372 node = rb_next(node);
1373 if (!node)
1374 break;
1375 }
1376out:
70dec807 1377 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1378 return total_bytes;
1379}
1380/*
1381 * helper function to lock both pages and extents in the tree.
1382 * pages must be locked first.
1383 */
1384int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1385{
1386 unsigned long index = start >> PAGE_CACHE_SHIFT;
1387 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1388 struct page *page;
1389 int err;
1390
1391 while (index <= end_index) {
1392 page = grab_cache_page(tree->mapping, index);
1393 if (!page) {
1394 err = -ENOMEM;
1395 goto failed;
1396 }
1397 if (IS_ERR(page)) {
1398 err = PTR_ERR(page);
1399 goto failed;
1400 }
1401 index++;
1402 }
1403 lock_extent(tree, start, end, GFP_NOFS);
1404 return 0;
1405
1406failed:
1407 /*
1408 * we failed above in getting the page at 'index', so we undo here
1409 * up to but not including the page at 'index'
1410 */
1411 end_index = index;
1412 index = start >> PAGE_CACHE_SHIFT;
1413 while (index < end_index) {
1414 page = find_get_page(tree->mapping, index);
1415 unlock_page(page);
1416 page_cache_release(page);
1417 index++;
1418 }
1419 return err;
1420}
1421EXPORT_SYMBOL(lock_range);
1422
1423/*
1424 * helper function to unlock both pages and extents in the tree.
1425 */
1426int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1427{
1428 unsigned long index = start >> PAGE_CACHE_SHIFT;
1429 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1430 struct page *page;
1431
1432 while (index <= end_index) {
1433 page = find_get_page(tree->mapping, index);
1434 unlock_page(page);
1435 page_cache_release(page);
1436 index++;
1437 }
1438 unlock_extent(tree, start, end, GFP_NOFS);
1439 return 0;
1440}
1441EXPORT_SYMBOL(unlock_range);
1442
d352ac68
CM
1443/*
1444 * set the private field for a given byte offset in the tree. If there isn't
1445 * an extent_state there already, this does nothing.
1446 */
d1310b2e
CM
1447int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1448{
1449 struct rb_node *node;
1450 struct extent_state *state;
1451 int ret = 0;
1452
70dec807 1453 spin_lock_irq(&tree->lock);
d1310b2e
CM
1454 /*
1455 * this search will find all the extents that end after
1456 * our range starts.
1457 */
80ea96b1 1458 node = tree_search(tree, start);
2b114d1d 1459 if (!node) {
d1310b2e
CM
1460 ret = -ENOENT;
1461 goto out;
1462 }
1463 state = rb_entry(node, struct extent_state, rb_node);
1464 if (state->start != start) {
1465 ret = -ENOENT;
1466 goto out;
1467 }
1468 state->private = private;
1469out:
70dec807 1470 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1471 return ret;
1472}
1473
1474int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1475{
1476 struct rb_node *node;
1477 struct extent_state *state;
1478 int ret = 0;
1479
70dec807 1480 spin_lock_irq(&tree->lock);
d1310b2e
CM
1481 /*
1482 * this search will find all the extents that end after
1483 * our range starts.
1484 */
80ea96b1 1485 node = tree_search(tree, start);
2b114d1d 1486 if (!node) {
d1310b2e
CM
1487 ret = -ENOENT;
1488 goto out;
1489 }
1490 state = rb_entry(node, struct extent_state, rb_node);
1491 if (state->start != start) {
1492 ret = -ENOENT;
1493 goto out;
1494 }
1495 *private = state->private;
1496out:
70dec807 1497 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1498 return ret;
1499}
1500
1501/*
1502 * searches a range in the state tree for a given mask.
70dec807 1503 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1504 * has the bits set. Otherwise, 1 is returned if any bit in the
1505 * range is found set.
1506 */
1507int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1508 int bits, int filled)
1509{
1510 struct extent_state *state = NULL;
1511 struct rb_node *node;
1512 int bitset = 0;
1513 unsigned long flags;
1514
70dec807 1515 spin_lock_irqsave(&tree->lock, flags);
80ea96b1 1516 node = tree_search(tree, start);
d1310b2e
CM
1517 while (node && start <= end) {
1518 state = rb_entry(node, struct extent_state, rb_node);
1519
1520 if (filled && state->start > start) {
1521 bitset = 0;
1522 break;
1523 }
1524
1525 if (state->start > end)
1526 break;
1527
1528 if (state->state & bits) {
1529 bitset = 1;
1530 if (!filled)
1531 break;
1532 } else if (filled) {
1533 bitset = 0;
1534 break;
1535 }
1536 start = state->end + 1;
1537 if (start > end)
1538 break;
1539 node = rb_next(node);
1540 if (!node) {
1541 if (filled)
1542 bitset = 0;
1543 break;
1544 }
1545 }
70dec807 1546 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
1547 return bitset;
1548}
1549EXPORT_SYMBOL(test_range_bit);
1550
1551/*
1552 * helper function to set a given page up to date if all the
1553 * extents in the tree for that page are up to date
1554 */
1555static int check_page_uptodate(struct extent_io_tree *tree,
1556 struct page *page)
1557{
1558 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1559 u64 end = start + PAGE_CACHE_SIZE - 1;
1560 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1561 SetPageUptodate(page);
1562 return 0;
1563}
1564
1565/*
1566 * helper function to unlock a page if all the extents in the tree
1567 * for that page are unlocked
1568 */
1569static int check_page_locked(struct extent_io_tree *tree,
1570 struct page *page)
1571{
1572 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1573 u64 end = start + PAGE_CACHE_SIZE - 1;
1574 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1575 unlock_page(page);
1576 return 0;
1577}
1578
1579/*
1580 * helper function to end page writeback if all the extents
1581 * in the tree for that page are done with writeback
1582 */
1583static int check_page_writeback(struct extent_io_tree *tree,
1584 struct page *page)
1585{
1586 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1587 u64 end = start + PAGE_CACHE_SIZE - 1;
1588 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1589 end_page_writeback(page);
1590 return 0;
1591}
1592
1593/* lots and lots of room for performance fixes in the end_bio funcs */
1594
1595/*
1596 * after a writepage IO is done, we need to:
1597 * clear the uptodate bits on error
1598 * clear the writeback bits in the extent tree for this IO
1599 * end_page_writeback if the page has no more pending IO
1600 *
1601 * Scheduling is not allowed, so the extent state tree is expected
1602 * to have one and only one object corresponding to this IO.
1603 */
d1310b2e 1604static void end_bio_extent_writepage(struct bio *bio, int err)
d1310b2e 1605{
1259ab75 1606 int uptodate = err == 0;
d1310b2e 1607 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1608 struct extent_io_tree *tree;
d1310b2e
CM
1609 u64 start;
1610 u64 end;
1611 int whole_page;
1259ab75 1612 int ret;
d1310b2e 1613
d1310b2e
CM
1614 do {
1615 struct page *page = bvec->bv_page;
902b22f3
DW
1616 tree = &BTRFS_I(page->mapping->host)->io_tree;
1617
d1310b2e
CM
1618 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1619 bvec->bv_offset;
1620 end = start + bvec->bv_len - 1;
1621
1622 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1623 whole_page = 1;
1624 else
1625 whole_page = 0;
1626
1627 if (--bvec >= bio->bi_io_vec)
1628 prefetchw(&bvec->bv_page->flags);
1259ab75
CM
1629 if (tree->ops && tree->ops->writepage_end_io_hook) {
1630 ret = tree->ops->writepage_end_io_hook(page, start,
902b22f3 1631 end, NULL, uptodate);
1259ab75
CM
1632 if (ret)
1633 uptodate = 0;
1634 }
1635
1636 if (!uptodate && tree->ops &&
1637 tree->ops->writepage_io_failed_hook) {
1638 ret = tree->ops->writepage_io_failed_hook(bio, page,
902b22f3 1639 start, end, NULL);
1259ab75 1640 if (ret == 0) {
1259ab75
CM
1641 uptodate = (err == 0);
1642 continue;
1643 }
1644 }
1645
d1310b2e
CM
1646 if (!uptodate) {
1647 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1648 ClearPageUptodate(page);
1649 SetPageError(page);
1650 }
70dec807 1651
902b22f3 1652 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
d1310b2e
CM
1653
1654 if (whole_page)
1655 end_page_writeback(page);
1656 else
1657 check_page_writeback(tree, page);
d1310b2e 1658 } while (bvec >= bio->bi_io_vec);
2b1f55b0 1659
d1310b2e 1660 bio_put(bio);
d1310b2e
CM
1661}
1662
1663/*
1664 * after a readpage IO is done, we need to:
1665 * clear the uptodate bits on error
1666 * set the uptodate bits if things worked
1667 * set the page up to date if all extents in the tree are uptodate
1668 * clear the lock bit in the extent tree
1669 * unlock the page if there are no other extents locked for it
1670 *
1671 * Scheduling is not allowed, so the extent state tree is expected
1672 * to have one and only one object corresponding to this IO.
1673 */
d1310b2e 1674static void end_bio_extent_readpage(struct bio *bio, int err)
d1310b2e
CM
1675{
1676 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1677 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1678 struct extent_io_tree *tree;
d1310b2e
CM
1679 u64 start;
1680 u64 end;
1681 int whole_page;
1682 int ret;
1683
d1310b2e
CM
1684 do {
1685 struct page *page = bvec->bv_page;
902b22f3
DW
1686 tree = &BTRFS_I(page->mapping->host)->io_tree;
1687
d1310b2e
CM
1688 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1689 bvec->bv_offset;
1690 end = start + bvec->bv_len - 1;
1691
1692 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1693 whole_page = 1;
1694 else
1695 whole_page = 0;
1696
1697 if (--bvec >= bio->bi_io_vec)
1698 prefetchw(&bvec->bv_page->flags);
1699
1700 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
70dec807 1701 ret = tree->ops->readpage_end_io_hook(page, start, end,
902b22f3 1702 NULL);
d1310b2e
CM
1703 if (ret)
1704 uptodate = 0;
1705 }
7e38326f
CM
1706 if (!uptodate && tree->ops &&
1707 tree->ops->readpage_io_failed_hook) {
1708 ret = tree->ops->readpage_io_failed_hook(bio, page,
902b22f3 1709 start, end, NULL);
7e38326f 1710 if (ret == 0) {
3b951516
CM
1711 uptodate =
1712 test_bit(BIO_UPTODATE, &bio->bi_flags);
7e38326f
CM
1713 continue;
1714 }
1715 }
d1310b2e 1716
902b22f3
DW
1717 if (uptodate)
1718 set_extent_uptodate(tree, start, end,
1719 GFP_ATOMIC);
1720 unlock_extent(tree, start, end, GFP_ATOMIC);
d1310b2e 1721
70dec807
CM
1722 if (whole_page) {
1723 if (uptodate) {
1724 SetPageUptodate(page);
1725 } else {
1726 ClearPageUptodate(page);
1727 SetPageError(page);
1728 }
d1310b2e 1729 unlock_page(page);
70dec807
CM
1730 } else {
1731 if (uptodate) {
1732 check_page_uptodate(tree, page);
1733 } else {
1734 ClearPageUptodate(page);
1735 SetPageError(page);
1736 }
d1310b2e 1737 check_page_locked(tree, page);
70dec807 1738 }
d1310b2e
CM
1739 } while (bvec >= bio->bi_io_vec);
1740
1741 bio_put(bio);
d1310b2e
CM
1742}
1743
1744/*
1745 * IO done from prepare_write is pretty simple, we just unlock
1746 * the structs in the extent tree when done, and set the uptodate bits
1747 * as appropriate.
1748 */
d1310b2e 1749static void end_bio_extent_preparewrite(struct bio *bio, int err)
d1310b2e
CM
1750{
1751 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1752 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1753 struct extent_io_tree *tree;
d1310b2e
CM
1754 u64 start;
1755 u64 end;
1756
d1310b2e
CM
1757 do {
1758 struct page *page = bvec->bv_page;
902b22f3
DW
1759 tree = &BTRFS_I(page->mapping->host)->io_tree;
1760
d1310b2e
CM
1761 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1762 bvec->bv_offset;
1763 end = start + bvec->bv_len - 1;
1764
1765 if (--bvec >= bio->bi_io_vec)
1766 prefetchw(&bvec->bv_page->flags);
1767
1768 if (uptodate) {
1769 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1770 } else {
1771 ClearPageUptodate(page);
1772 SetPageError(page);
1773 }
1774
1775 unlock_extent(tree, start, end, GFP_ATOMIC);
1776
1777 } while (bvec >= bio->bi_io_vec);
1778
1779 bio_put(bio);
d1310b2e
CM
1780}
1781
1782static struct bio *
1783extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1784 gfp_t gfp_flags)
1785{
1786 struct bio *bio;
1787
1788 bio = bio_alloc(gfp_flags, nr_vecs);
1789
1790 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1791 while (!bio && (nr_vecs /= 2))
1792 bio = bio_alloc(gfp_flags, nr_vecs);
1793 }
1794
1795 if (bio) {
e1c4b745 1796 bio->bi_size = 0;
d1310b2e
CM
1797 bio->bi_bdev = bdev;
1798 bio->bi_sector = first_sector;
1799 }
1800 return bio;
1801}
1802
c8b97818
CM
1803static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1804 unsigned long bio_flags)
d1310b2e 1805{
d1310b2e 1806 int ret = 0;
70dec807
CM
1807 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1808 struct page *page = bvec->bv_page;
1809 struct extent_io_tree *tree = bio->bi_private;
70dec807
CM
1810 u64 start;
1811 u64 end;
1812
1813 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1814 end = start + bvec->bv_len - 1;
1815
902b22f3 1816 bio->bi_private = NULL;
d1310b2e
CM
1817
1818 bio_get(bio);
1819
065631f6 1820 if (tree->ops && tree->ops->submit_bio_hook)
f188591e 1821 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
c8b97818 1822 mirror_num, bio_flags);
0b86a832
CM
1823 else
1824 submit_bio(rw, bio);
d1310b2e
CM
1825 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1826 ret = -EOPNOTSUPP;
1827 bio_put(bio);
1828 return ret;
1829}
1830
1831static int submit_extent_page(int rw, struct extent_io_tree *tree,
1832 struct page *page, sector_t sector,
1833 size_t size, unsigned long offset,
1834 struct block_device *bdev,
1835 struct bio **bio_ret,
1836 unsigned long max_pages,
f188591e 1837 bio_end_io_t end_io_func,
c8b97818
CM
1838 int mirror_num,
1839 unsigned long prev_bio_flags,
1840 unsigned long bio_flags)
d1310b2e
CM
1841{
1842 int ret = 0;
1843 struct bio *bio;
1844 int nr;
c8b97818
CM
1845 int contig = 0;
1846 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1847 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1848 size_t page_size = min(size, PAGE_CACHE_SIZE);
d1310b2e
CM
1849
1850 if (bio_ret && *bio_ret) {
1851 bio = *bio_ret;
c8b97818
CM
1852 if (old_compressed)
1853 contig = bio->bi_sector == sector;
1854 else
1855 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1856 sector;
1857
1858 if (prev_bio_flags != bio_flags || !contig ||
239b14b3 1859 (tree->ops && tree->ops->merge_bio_hook &&
c8b97818
CM
1860 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1861 bio_flags)) ||
1862 bio_add_page(bio, page, page_size, offset) < page_size) {
1863 ret = submit_one_bio(rw, bio, mirror_num,
1864 prev_bio_flags);
d1310b2e
CM
1865 bio = NULL;
1866 } else {
1867 return 0;
1868 }
1869 }
c8b97818
CM
1870 if (this_compressed)
1871 nr = BIO_MAX_PAGES;
1872 else
1873 nr = bio_get_nr_vecs(bdev);
1874
d1310b2e
CM
1875 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1876 if (!bio) {
1877 printk("failed to allocate bio nr %d\n", nr);
1878 }
70dec807 1879
c8b97818 1880 bio_add_page(bio, page, page_size, offset);
d1310b2e
CM
1881 bio->bi_end_io = end_io_func;
1882 bio->bi_private = tree;
70dec807 1883
d1310b2e
CM
1884 if (bio_ret) {
1885 *bio_ret = bio;
1886 } else {
c8b97818 1887 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
d1310b2e
CM
1888 }
1889
1890 return ret;
1891}
1892
1893void set_page_extent_mapped(struct page *page)
1894{
1895 if (!PagePrivate(page)) {
1896 SetPagePrivate(page);
d1310b2e 1897 page_cache_get(page);
6af118ce 1898 set_page_private(page, EXTENT_PAGE_PRIVATE);
d1310b2e
CM
1899 }
1900}
1901
1902void set_page_extent_head(struct page *page, unsigned long len)
1903{
1904 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1905}
1906
1907/*
1908 * basic readpage implementation. Locked extent state structs are inserted
1909 * into the tree that are removed when the IO is done (by the end_io
1910 * handlers)
1911 */
1912static int __extent_read_full_page(struct extent_io_tree *tree,
1913 struct page *page,
1914 get_extent_t *get_extent,
c8b97818
CM
1915 struct bio **bio, int mirror_num,
1916 unsigned long *bio_flags)
d1310b2e
CM
1917{
1918 struct inode *inode = page->mapping->host;
1919 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1920 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1921 u64 end;
1922 u64 cur = start;
1923 u64 extent_offset;
1924 u64 last_byte = i_size_read(inode);
1925 u64 block_start;
1926 u64 cur_end;
1927 sector_t sector;
1928 struct extent_map *em;
1929 struct block_device *bdev;
1930 int ret;
1931 int nr = 0;
1932 size_t page_offset = 0;
1933 size_t iosize;
c8b97818 1934 size_t disk_io_size;
d1310b2e 1935 size_t blocksize = inode->i_sb->s_blocksize;
c8b97818 1936 unsigned long this_bio_flag = 0;
d1310b2e
CM
1937
1938 set_page_extent_mapped(page);
1939
1940 end = page_end;
1941 lock_extent(tree, start, end, GFP_NOFS);
1942
c8b97818
CM
1943 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1944 char *userpage;
1945 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1946
1947 if (zero_offset) {
1948 iosize = PAGE_CACHE_SIZE - zero_offset;
1949 userpage = kmap_atomic(page, KM_USER0);
1950 memset(userpage + zero_offset, 0, iosize);
1951 flush_dcache_page(page);
1952 kunmap_atomic(userpage, KM_USER0);
1953 }
1954 }
d1310b2e
CM
1955 while (cur <= end) {
1956 if (cur >= last_byte) {
1957 char *userpage;
1958 iosize = PAGE_CACHE_SIZE - page_offset;
1959 userpage = kmap_atomic(page, KM_USER0);
1960 memset(userpage + page_offset, 0, iosize);
1961 flush_dcache_page(page);
1962 kunmap_atomic(userpage, KM_USER0);
1963 set_extent_uptodate(tree, cur, cur + iosize - 1,
1964 GFP_NOFS);
1965 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1966 break;
1967 }
1968 em = get_extent(inode, page, page_offset, cur,
1969 end - cur + 1, 0);
1970 if (IS_ERR(em) || !em) {
1971 SetPageError(page);
1972 unlock_extent(tree, cur, end, GFP_NOFS);
1973 break;
1974 }
d1310b2e 1975 extent_offset = cur - em->start;
e6dcd2dc
CM
1976 if (extent_map_end(em) <= cur) {
1977printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1978 }
d1310b2e 1979 BUG_ON(extent_map_end(em) <= cur);
e6dcd2dc
CM
1980 if (end < cur) {
1981printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1982 }
d1310b2e
CM
1983 BUG_ON(end < cur);
1984
c8b97818
CM
1985 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1986 this_bio_flag = EXTENT_BIO_COMPRESSED;
1987
d1310b2e
CM
1988 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1989 cur_end = min(extent_map_end(em) - 1, end);
1990 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
c8b97818
CM
1991 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
1992 disk_io_size = em->block_len;
1993 sector = em->block_start >> 9;
1994 } else {
1995 sector = (em->block_start + extent_offset) >> 9;
1996 disk_io_size = iosize;
1997 }
d1310b2e
CM
1998 bdev = em->bdev;
1999 block_start = em->block_start;
2000 free_extent_map(em);
2001 em = NULL;
2002
2003 /* we've found a hole, just zero and go on */
2004 if (block_start == EXTENT_MAP_HOLE) {
2005 char *userpage;
2006 userpage = kmap_atomic(page, KM_USER0);
2007 memset(userpage + page_offset, 0, iosize);
2008 flush_dcache_page(page);
2009 kunmap_atomic(userpage, KM_USER0);
2010
2011 set_extent_uptodate(tree, cur, cur + iosize - 1,
2012 GFP_NOFS);
2013 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2014 cur = cur + iosize;
2015 page_offset += iosize;
2016 continue;
2017 }
2018 /* the get_extent function already copied into the page */
2019 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
a1b32a59 2020 check_page_uptodate(tree, page);
d1310b2e
CM
2021 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2022 cur = cur + iosize;
2023 page_offset += iosize;
2024 continue;
2025 }
70dec807
CM
2026 /* we have an inline extent but it didn't get marked up
2027 * to date. Error out
2028 */
2029 if (block_start == EXTENT_MAP_INLINE) {
2030 SetPageError(page);
2031 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2032 cur = cur + iosize;
2033 page_offset += iosize;
2034 continue;
2035 }
d1310b2e
CM
2036
2037 ret = 0;
2038 if (tree->ops && tree->ops->readpage_io_hook) {
2039 ret = tree->ops->readpage_io_hook(page, cur,
2040 cur + iosize - 1);
2041 }
2042 if (!ret) {
89642229
CM
2043 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2044 pnr -= page->index;
d1310b2e 2045 ret = submit_extent_page(READ, tree, page,
c8b97818 2046 sector, disk_io_size, page_offset,
89642229 2047 bdev, bio, pnr,
c8b97818
CM
2048 end_bio_extent_readpage, mirror_num,
2049 *bio_flags,
2050 this_bio_flag);
89642229 2051 nr++;
c8b97818 2052 *bio_flags = this_bio_flag;
d1310b2e
CM
2053 }
2054 if (ret)
2055 SetPageError(page);
2056 cur = cur + iosize;
2057 page_offset += iosize;
d1310b2e
CM
2058 }
2059 if (!nr) {
2060 if (!PageError(page))
2061 SetPageUptodate(page);
2062 unlock_page(page);
2063 }
2064 return 0;
2065}
2066
2067int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2068 get_extent_t *get_extent)
2069{
2070 struct bio *bio = NULL;
c8b97818 2071 unsigned long bio_flags = 0;
d1310b2e
CM
2072 int ret;
2073
c8b97818
CM
2074 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2075 &bio_flags);
d1310b2e 2076 if (bio)
c8b97818 2077 submit_one_bio(READ, bio, 0, bio_flags);
d1310b2e
CM
2078 return ret;
2079}
2080EXPORT_SYMBOL(extent_read_full_page);
2081
2082/*
2083 * the writepage semantics are similar to regular writepage. extent
2084 * records are inserted to lock ranges in the tree, and as dirty areas
2085 * are found, they are marked writeback. Then the lock bits are removed
2086 * and the end_io handler clears the writeback ranges
2087 */
2088static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2089 void *data)
2090{
2091 struct inode *inode = page->mapping->host;
2092 struct extent_page_data *epd = data;
2093 struct extent_io_tree *tree = epd->tree;
2094 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2095 u64 delalloc_start;
2096 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2097 u64 end;
2098 u64 cur = start;
2099 u64 extent_offset;
2100 u64 last_byte = i_size_read(inode);
2101 u64 block_start;
2102 u64 iosize;
e6dcd2dc 2103 u64 unlock_start;
d1310b2e
CM
2104 sector_t sector;
2105 struct extent_map *em;
2106 struct block_device *bdev;
2107 int ret;
2108 int nr = 0;
7f3c74fb 2109 size_t pg_offset = 0;
d1310b2e
CM
2110 size_t blocksize;
2111 loff_t i_size = i_size_read(inode);
2112 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2113 u64 nr_delalloc;
2114 u64 delalloc_end;
c8b97818
CM
2115 int page_started;
2116 int compressed;
d1310b2e
CM
2117
2118 WARN_ON(!PageLocked(page));
7f3c74fb 2119 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
211c17f5 2120 if (page->index > end_index ||
7f3c74fb 2121 (page->index == end_index && !pg_offset)) {
211c17f5 2122 page->mapping->a_ops->invalidatepage(page, 0);
d1310b2e
CM
2123 unlock_page(page);
2124 return 0;
2125 }
2126
2127 if (page->index == end_index) {
2128 char *userpage;
2129
d1310b2e 2130 userpage = kmap_atomic(page, KM_USER0);
7f3c74fb
CM
2131 memset(userpage + pg_offset, 0,
2132 PAGE_CACHE_SIZE - pg_offset);
d1310b2e 2133 kunmap_atomic(userpage, KM_USER0);
211c17f5 2134 flush_dcache_page(page);
d1310b2e 2135 }
7f3c74fb 2136 pg_offset = 0;
d1310b2e
CM
2137
2138 set_page_extent_mapped(page);
2139
2140 delalloc_start = start;
2141 delalloc_end = 0;
c8b97818 2142 page_started = 0;
d1310b2e 2143 while(delalloc_end < page_end) {
c8b97818
CM
2144 nr_delalloc = find_lock_delalloc_range(inode, tree,
2145 page,
2146 &delalloc_start,
d1310b2e
CM
2147 &delalloc_end,
2148 128 * 1024 * 1024);
2149 if (nr_delalloc == 0) {
2150 delalloc_start = delalloc_end + 1;
2151 continue;
2152 }
c8b97818
CM
2153 tree->ops->fill_delalloc(inode, page, delalloc_start,
2154 delalloc_end, &page_started);
d1310b2e
CM
2155 delalloc_start = delalloc_end + 1;
2156 }
c8b97818
CM
2157
2158 /* did the fill delalloc function already unlock and start the IO? */
2159 if (page_started) {
2160 return 0;
2161 }
2162
d1310b2e 2163 lock_extent(tree, start, page_end, GFP_NOFS);
e6dcd2dc 2164 unlock_start = start;
d1310b2e 2165
247e743c 2166 if (tree->ops && tree->ops->writepage_start_hook) {
c8b97818
CM
2167 ret = tree->ops->writepage_start_hook(page, start,
2168 page_end);
247e743c
CM
2169 if (ret == -EAGAIN) {
2170 unlock_extent(tree, start, page_end, GFP_NOFS);
2171 redirty_page_for_writepage(wbc, page);
2172 unlock_page(page);
2173 return 0;
2174 }
2175 }
2176
d1310b2e
CM
2177 end = page_end;
2178 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
2179 printk("found delalloc bits after lock_extent\n");
2180 }
2181
2182 if (last_byte <= start) {
2183 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
e6dcd2dc
CM
2184 unlock_extent(tree, start, page_end, GFP_NOFS);
2185 if (tree->ops && tree->ops->writepage_end_io_hook)
2186 tree->ops->writepage_end_io_hook(page, start,
2187 page_end, NULL, 1);
2188 unlock_start = page_end + 1;
d1310b2e
CM
2189 goto done;
2190 }
2191
2192 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2193 blocksize = inode->i_sb->s_blocksize;
2194
2195 while (cur <= end) {
2196 if (cur >= last_byte) {
2197 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
e6dcd2dc
CM
2198 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2199 if (tree->ops && tree->ops->writepage_end_io_hook)
2200 tree->ops->writepage_end_io_hook(page, cur,
2201 page_end, NULL, 1);
2202 unlock_start = page_end + 1;
d1310b2e
CM
2203 break;
2204 }
7f3c74fb 2205 em = epd->get_extent(inode, page, pg_offset, cur,
d1310b2e
CM
2206 end - cur + 1, 1);
2207 if (IS_ERR(em) || !em) {
2208 SetPageError(page);
2209 break;
2210 }
2211
2212 extent_offset = cur - em->start;
2213 BUG_ON(extent_map_end(em) <= cur);
2214 BUG_ON(end < cur);
2215 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2216 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2217 sector = (em->block_start + extent_offset) >> 9;
2218 bdev = em->bdev;
2219 block_start = em->block_start;
c8b97818 2220 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
d1310b2e
CM
2221 free_extent_map(em);
2222 em = NULL;
2223
c8b97818
CM
2224 /*
2225 * compressed and inline extents are written through other
2226 * paths in the FS
2227 */
2228 if (compressed || block_start == EXTENT_MAP_HOLE ||
d1310b2e
CM
2229 block_start == EXTENT_MAP_INLINE) {
2230 clear_extent_dirty(tree, cur,
2231 cur + iosize - 1, GFP_NOFS);
e6dcd2dc
CM
2232
2233 unlock_extent(tree, unlock_start, cur + iosize -1,
2234 GFP_NOFS);
7f3c74fb 2235
c8b97818
CM
2236 /*
2237 * end_io notification does not happen here for
2238 * compressed extents
2239 */
2240 if (!compressed && tree->ops &&
2241 tree->ops->writepage_end_io_hook)
e6dcd2dc
CM
2242 tree->ops->writepage_end_io_hook(page, cur,
2243 cur + iosize - 1,
2244 NULL, 1);
c8b97818
CM
2245 else if (compressed) {
2246 /* we don't want to end_page_writeback on
2247 * a compressed extent. this happens
2248 * elsewhere
2249 */
2250 nr++;
2251 }
2252
2253 cur += iosize;
7f3c74fb 2254 pg_offset += iosize;
e6dcd2dc 2255 unlock_start = cur;
d1310b2e
CM
2256 continue;
2257 }
d1310b2e
CM
2258 /* leave this out until we have a page_mkwrite call */
2259 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2260 EXTENT_DIRTY, 0)) {
2261 cur = cur + iosize;
7f3c74fb 2262 pg_offset += iosize;
d1310b2e
CM
2263 continue;
2264 }
c8b97818 2265
d1310b2e
CM
2266 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2267 if (tree->ops && tree->ops->writepage_io_hook) {
2268 ret = tree->ops->writepage_io_hook(page, cur,
2269 cur + iosize - 1);
2270 } else {
2271 ret = 0;
2272 }
1259ab75 2273 if (ret) {
d1310b2e 2274 SetPageError(page);
1259ab75 2275 } else {
d1310b2e 2276 unsigned long max_nr = end_index + 1;
7f3c74fb 2277
d1310b2e
CM
2278 set_range_writeback(tree, cur, cur + iosize - 1);
2279 if (!PageWriteback(page)) {
2280 printk("warning page %lu not writeback, "
2281 "cur %llu end %llu\n", page->index,
2282 (unsigned long long)cur,
2283 (unsigned long long)end);
2284 }
2285
2286 ret = submit_extent_page(WRITE, tree, page, sector,
7f3c74fb 2287 iosize, pg_offset, bdev,
d1310b2e 2288 &epd->bio, max_nr,
c8b97818
CM
2289 end_bio_extent_writepage,
2290 0, 0, 0);
d1310b2e
CM
2291 if (ret)
2292 SetPageError(page);
2293 }
2294 cur = cur + iosize;
7f3c74fb 2295 pg_offset += iosize;
d1310b2e
CM
2296 nr++;
2297 }
2298done:
2299 if (nr == 0) {
2300 /* make sure the mapping tag for page dirty gets cleared */
2301 set_page_writeback(page);
2302 end_page_writeback(page);
2303 }
e6dcd2dc
CM
2304 if (unlock_start <= page_end)
2305 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
d1310b2e
CM
2306 unlock_page(page);
2307 return 0;
2308}
2309
d1310b2e 2310/**
4bef0848 2311 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
d1310b2e
CM
2312 * @mapping: address space structure to write
2313 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2314 * @writepage: function called for each page
2315 * @data: data passed to writepage function
2316 *
2317 * If a page is already under I/O, write_cache_pages() skips it, even
2318 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2319 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2320 * and msync() need to guarantee that all the data which was dirty at the time
2321 * the call was made get new I/O started against them. If wbc->sync_mode is
2322 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2323 * existing IO to complete.
2324 */
4bef0848
CM
2325int extent_write_cache_pages(struct extent_io_tree *tree,
2326 struct address_space *mapping,
2327 struct writeback_control *wbc,
2328 writepage_t writepage, void *data)
d1310b2e
CM
2329{
2330 struct backing_dev_info *bdi = mapping->backing_dev_info;
2331 int ret = 0;
2332 int done = 0;
2333 struct pagevec pvec;
2334 int nr_pages;
2335 pgoff_t index;
2336 pgoff_t end; /* Inclusive */
2337 int scanned = 0;
2338 int range_whole = 0;
2339
2340 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2341 wbc->encountered_congestion = 1;
2342 return 0;
2343 }
2344
2345 pagevec_init(&pvec, 0);
2346 if (wbc->range_cyclic) {
2347 index = mapping->writeback_index; /* Start from prev offset */
2348 end = -1;
2349 } else {
2350 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2351 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2352 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2353 range_whole = 1;
2354 scanned = 1;
2355 }
2356retry:
2357 while (!done && (index <= end) &&
2358 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2359 PAGECACHE_TAG_DIRTY,
2360 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2361 unsigned i;
2362
2363 scanned = 1;
2364 for (i = 0; i < nr_pages; i++) {
2365 struct page *page = pvec.pages[i];
2366
2367 /*
2368 * At this point we hold neither mapping->tree_lock nor
2369 * lock on the page itself: the page may be truncated or
2370 * invalidated (changing page->mapping to NULL), or even
2371 * swizzled back from swapper_space to tmpfs file
2372 * mapping
2373 */
4bef0848
CM
2374 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2375 tree->ops->write_cache_pages_lock_hook(page);
2376 else
2377 lock_page(page);
d1310b2e
CM
2378
2379 if (unlikely(page->mapping != mapping)) {
2380 unlock_page(page);
2381 continue;
2382 }
2383
2384 if (!wbc->range_cyclic && page->index > end) {
2385 done = 1;
2386 unlock_page(page);
2387 continue;
2388 }
2389
2390 if (wbc->sync_mode != WB_SYNC_NONE)
2391 wait_on_page_writeback(page);
2392
2393 if (PageWriteback(page) ||
2394 !clear_page_dirty_for_io(page)) {
2395 unlock_page(page);
2396 continue;
2397 }
2398
2399 ret = (*writepage)(page, wbc, data);
2400
2401 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2402 unlock_page(page);
2403 ret = 0;
2404 }
2405 if (ret || (--(wbc->nr_to_write) <= 0))
2406 done = 1;
2407 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2408 wbc->encountered_congestion = 1;
2409 done = 1;
2410 }
2411 }
2412 pagevec_release(&pvec);
2413 cond_resched();
2414 }
2415 if (!scanned && !done) {
2416 /*
2417 * We hit the last page and there is more work to be done: wrap
2418 * back to the start of the file
2419 */
2420 scanned = 1;
2421 index = 0;
2422 goto retry;
2423 }
2424 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2425 mapping->writeback_index = index;
2b1f55b0 2426
4bef0848
CM
2427 if (wbc->range_cont)
2428 wbc->range_start = index << PAGE_CACHE_SHIFT;
d1310b2e
CM
2429 return ret;
2430}
4bef0848 2431EXPORT_SYMBOL(extent_write_cache_pages);
d1310b2e
CM
2432
2433int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2434 get_extent_t *get_extent,
2435 struct writeback_control *wbc)
2436{
2437 int ret;
2438 struct address_space *mapping = page->mapping;
2439 struct extent_page_data epd = {
2440 .bio = NULL,
2441 .tree = tree,
2442 .get_extent = get_extent,
2443 };
2444 struct writeback_control wbc_writepages = {
2445 .bdi = wbc->bdi,
2446 .sync_mode = WB_SYNC_NONE,
2447 .older_than_this = NULL,
2448 .nr_to_write = 64,
2449 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2450 .range_end = (loff_t)-1,
2451 };
2452
2453
2454 ret = __extent_writepage(page, wbc, &epd);
2455
4bef0848
CM
2456 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2457 __extent_writepage, &epd);
d1310b2e 2458 if (epd.bio) {
c8b97818 2459 submit_one_bio(WRITE, epd.bio, 0, 0);
d1310b2e
CM
2460 }
2461 return ret;
2462}
2463EXPORT_SYMBOL(extent_write_full_page);
2464
2465
2466int extent_writepages(struct extent_io_tree *tree,
2467 struct address_space *mapping,
2468 get_extent_t *get_extent,
2469 struct writeback_control *wbc)
2470{
2471 int ret = 0;
2472 struct extent_page_data epd = {
2473 .bio = NULL,
2474 .tree = tree,
2475 .get_extent = get_extent,
2476 };
2477
4bef0848
CM
2478 ret = extent_write_cache_pages(tree, mapping, wbc,
2479 __extent_writepage, &epd);
d1310b2e 2480 if (epd.bio) {
c8b97818 2481 submit_one_bio(WRITE, epd.bio, 0, 0);
d1310b2e
CM
2482 }
2483 return ret;
2484}
2485EXPORT_SYMBOL(extent_writepages);
2486
2487int extent_readpages(struct extent_io_tree *tree,
2488 struct address_space *mapping,
2489 struct list_head *pages, unsigned nr_pages,
2490 get_extent_t get_extent)
2491{
2492 struct bio *bio = NULL;
2493 unsigned page_idx;
2494 struct pagevec pvec;
c8b97818 2495 unsigned long bio_flags = 0;
d1310b2e
CM
2496
2497 pagevec_init(&pvec, 0);
2498 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2499 struct page *page = list_entry(pages->prev, struct page, lru);
2500
2501 prefetchw(&page->flags);
2502 list_del(&page->lru);
2503 /*
2504 * what we want to do here is call add_to_page_cache_lru,
2505 * but that isn't exported, so we reproduce it here
2506 */
2507 if (!add_to_page_cache(page, mapping,
2508 page->index, GFP_KERNEL)) {
2509
2510 /* open coding of lru_cache_add, also not exported */
2511 page_cache_get(page);
2512 if (!pagevec_add(&pvec, page))
2513 __pagevec_lru_add(&pvec);
f188591e 2514 __extent_read_full_page(tree, page, get_extent,
c8b97818 2515 &bio, 0, &bio_flags);
d1310b2e
CM
2516 }
2517 page_cache_release(page);
2518 }
2519 if (pagevec_count(&pvec))
2520 __pagevec_lru_add(&pvec);
2521 BUG_ON(!list_empty(pages));
2522 if (bio)
c8b97818 2523 submit_one_bio(READ, bio, 0, bio_flags);
d1310b2e
CM
2524 return 0;
2525}
2526EXPORT_SYMBOL(extent_readpages);
2527
2528/*
2529 * basic invalidatepage code, this waits on any locked or writeback
2530 * ranges corresponding to the page, and then deletes any extent state
2531 * records from the tree
2532 */
2533int extent_invalidatepage(struct extent_io_tree *tree,
2534 struct page *page, unsigned long offset)
2535{
2536 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2537 u64 end = start + PAGE_CACHE_SIZE - 1;
2538 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2539
2540 start += (offset + blocksize -1) & ~(blocksize - 1);
2541 if (start > end)
2542 return 0;
2543
2544 lock_extent(tree, start, end, GFP_NOFS);
2545 wait_on_extent_writeback(tree, start, end);
2546 clear_extent_bit(tree, start, end,
2547 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2548 1, 1, GFP_NOFS);
2549 return 0;
2550}
2551EXPORT_SYMBOL(extent_invalidatepage);
2552
2553/*
2554 * simple commit_write call, set_range_dirty is used to mark both
2555 * the pages and the extent records as dirty
2556 */
2557int extent_commit_write(struct extent_io_tree *tree,
2558 struct inode *inode, struct page *page,
2559 unsigned from, unsigned to)
2560{
2561 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2562
2563 set_page_extent_mapped(page);
2564 set_page_dirty(page);
2565
2566 if (pos > inode->i_size) {
2567 i_size_write(inode, pos);
2568 mark_inode_dirty(inode);
2569 }
2570 return 0;
2571}
2572EXPORT_SYMBOL(extent_commit_write);
2573
2574int extent_prepare_write(struct extent_io_tree *tree,
2575 struct inode *inode, struct page *page,
2576 unsigned from, unsigned to, get_extent_t *get_extent)
2577{
2578 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2579 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2580 u64 block_start;
2581 u64 orig_block_start;
2582 u64 block_end;
2583 u64 cur_end;
2584 struct extent_map *em;
2585 unsigned blocksize = 1 << inode->i_blkbits;
2586 size_t page_offset = 0;
2587 size_t block_off_start;
2588 size_t block_off_end;
2589 int err = 0;
2590 int iocount = 0;
2591 int ret = 0;
2592 int isnew;
2593
2594 set_page_extent_mapped(page);
2595
2596 block_start = (page_start + from) & ~((u64)blocksize - 1);
2597 block_end = (page_start + to - 1) | (blocksize - 1);
2598 orig_block_start = block_start;
2599
2600 lock_extent(tree, page_start, page_end, GFP_NOFS);
2601 while(block_start <= block_end) {
2602 em = get_extent(inode, page, page_offset, block_start,
2603 block_end - block_start + 1, 1);
2604 if (IS_ERR(em) || !em) {
2605 goto err;
2606 }
2607 cur_end = min(block_end, extent_map_end(em) - 1);
2608 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2609 block_off_end = block_off_start + blocksize;
2610 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2611
2612 if (!PageUptodate(page) && isnew &&
2613 (block_off_end > to || block_off_start < from)) {
2614 void *kaddr;
2615
2616 kaddr = kmap_atomic(page, KM_USER0);
2617 if (block_off_end > to)
2618 memset(kaddr + to, 0, block_off_end - to);
2619 if (block_off_start < from)
2620 memset(kaddr + block_off_start, 0,
2621 from - block_off_start);
2622 flush_dcache_page(page);
2623 kunmap_atomic(kaddr, KM_USER0);
2624 }
2625 if ((em->block_start != EXTENT_MAP_HOLE &&
2626 em->block_start != EXTENT_MAP_INLINE) &&
2627 !isnew && !PageUptodate(page) &&
2628 (block_off_end > to || block_off_start < from) &&
2629 !test_range_bit(tree, block_start, cur_end,
2630 EXTENT_UPTODATE, 1)) {
2631 u64 sector;
2632 u64 extent_offset = block_start - em->start;
2633 size_t iosize;
2634 sector = (em->block_start + extent_offset) >> 9;
2635 iosize = (cur_end - block_start + blocksize) &
2636 ~((u64)blocksize - 1);
2637 /*
2638 * we've already got the extent locked, but we
2639 * need to split the state such that our end_bio
2640 * handler can clear the lock.
2641 */
2642 set_extent_bit(tree, block_start,
2643 block_start + iosize - 1,
2644 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2645 ret = submit_extent_page(READ, tree, page,
2646 sector, iosize, page_offset, em->bdev,
2647 NULL, 1,
c8b97818
CM
2648 end_bio_extent_preparewrite, 0,
2649 0, 0);
d1310b2e
CM
2650 iocount++;
2651 block_start = block_start + iosize;
2652 } else {
2653 set_extent_uptodate(tree, block_start, cur_end,
2654 GFP_NOFS);
2655 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2656 block_start = cur_end + 1;
2657 }
2658 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2659 free_extent_map(em);
2660 }
2661 if (iocount) {
2662 wait_extent_bit(tree, orig_block_start,
2663 block_end, EXTENT_LOCKED);
2664 }
2665 check_page_uptodate(tree, page);
2666err:
2667 /* FIXME, zero out newly allocated blocks on error */
2668 return err;
2669}
2670EXPORT_SYMBOL(extent_prepare_write);
2671
7b13b7b1
CM
2672/*
2673 * a helper for releasepage, this tests for areas of the page that
2674 * are locked or under IO and drops the related state bits if it is safe
2675 * to drop the page.
2676 */
2677int try_release_extent_state(struct extent_map_tree *map,
2678 struct extent_io_tree *tree, struct page *page,
2679 gfp_t mask)
2680{
2681 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2682 u64 end = start + PAGE_CACHE_SIZE - 1;
2683 int ret = 1;
2684
211f90e6
CM
2685 if (test_range_bit(tree, start, end,
2686 EXTENT_IOBITS | EXTENT_ORDERED, 0))
7b13b7b1
CM
2687 ret = 0;
2688 else {
2689 if ((mask & GFP_NOFS) == GFP_NOFS)
2690 mask = GFP_NOFS;
2691 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2692 1, 1, mask);
2693 }
2694 return ret;
2695}
2696EXPORT_SYMBOL(try_release_extent_state);
2697
d1310b2e
CM
2698/*
2699 * a helper for releasepage. As long as there are no locked extents
2700 * in the range corresponding to the page, both state records and extent
2701 * map records are removed
2702 */
2703int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
2704 struct extent_io_tree *tree, struct page *page,
2705 gfp_t mask)
d1310b2e
CM
2706{
2707 struct extent_map *em;
2708 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2709 u64 end = start + PAGE_CACHE_SIZE - 1;
7b13b7b1 2710
70dec807
CM
2711 if ((mask & __GFP_WAIT) &&
2712 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 2713 u64 len;
70dec807 2714 while (start <= end) {
39b5637f 2715 len = end - start + 1;
70dec807 2716 spin_lock(&map->lock);
39b5637f 2717 em = lookup_extent_mapping(map, start, len);
70dec807
CM
2718 if (!em || IS_ERR(em)) {
2719 spin_unlock(&map->lock);
2720 break;
2721 }
7f3c74fb
CM
2722 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2723 em->start != start) {
70dec807
CM
2724 spin_unlock(&map->lock);
2725 free_extent_map(em);
2726 break;
2727 }
2728 if (!test_range_bit(tree, em->start,
2729 extent_map_end(em) - 1,
c8b97818
CM
2730 EXTENT_LOCKED | EXTENT_WRITEBACK |
2731 EXTENT_ORDERED,
2732 0)) {
70dec807
CM
2733 remove_extent_mapping(map, em);
2734 /* once for the rb tree */
2735 free_extent_map(em);
2736 }
2737 start = extent_map_end(em);
d1310b2e 2738 spin_unlock(&map->lock);
70dec807
CM
2739
2740 /* once for us */
d1310b2e
CM
2741 free_extent_map(em);
2742 }
d1310b2e 2743 }
7b13b7b1 2744 return try_release_extent_state(map, tree, page, mask);
d1310b2e
CM
2745}
2746EXPORT_SYMBOL(try_release_extent_mapping);
2747
2748sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2749 get_extent_t *get_extent)
2750{
2751 struct inode *inode = mapping->host;
2752 u64 start = iblock << inode->i_blkbits;
2753 sector_t sector = 0;
2754 struct extent_map *em;
2755
2756 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2757 if (!em || IS_ERR(em))
2758 return 0;
2759
2760 if (em->block_start == EXTENT_MAP_INLINE ||
2761 em->block_start == EXTENT_MAP_HOLE)
2762 goto out;
2763
2764 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
d1310b2e
CM
2765out:
2766 free_extent_map(em);
2767 return sector;
2768}
2769
d1310b2e
CM
2770static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2771 unsigned long i)
2772{
2773 struct page *p;
2774 struct address_space *mapping;
2775
2776 if (i == 0)
2777 return eb->first_page;
2778 i += eb->start >> PAGE_CACHE_SHIFT;
2779 mapping = eb->first_page->mapping;
33958dc6
CM
2780 if (!mapping)
2781 return NULL;
0ee0fda0
SW
2782
2783 /*
2784 * extent_buffer_page is only called after pinning the page
2785 * by increasing the reference count. So we know the page must
2786 * be in the radix tree.
2787 */
0ee0fda0 2788 rcu_read_lock();
d1310b2e 2789 p = radix_tree_lookup(&mapping->page_tree, i);
0ee0fda0 2790 rcu_read_unlock();
2b1f55b0 2791
d1310b2e
CM
2792 return p;
2793}
2794
6af118ce 2795static inline unsigned long num_extent_pages(u64 start, u64 len)
728131d8 2796{
6af118ce
CM
2797 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2798 (start >> PAGE_CACHE_SHIFT);
728131d8
CM
2799}
2800
d1310b2e
CM
2801static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2802 u64 start,
2803 unsigned long len,
2804 gfp_t mask)
2805{
2806 struct extent_buffer *eb = NULL;
4bef0848 2807#ifdef LEAK_DEBUG
2d2ae547 2808 unsigned long flags;
4bef0848 2809#endif
d1310b2e 2810
d1310b2e 2811 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
d1310b2e
CM
2812 eb->start = start;
2813 eb->len = len;
a61e6f29 2814 mutex_init(&eb->mutex);
4bef0848 2815#ifdef LEAK_DEBUG
2d2ae547
CM
2816 spin_lock_irqsave(&leak_lock, flags);
2817 list_add(&eb->leak_list, &buffers);
2818 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 2819#endif
d1310b2e
CM
2820 atomic_set(&eb->refs, 1);
2821
2822 return eb;
2823}
2824
2825static void __free_extent_buffer(struct extent_buffer *eb)
2826{
4bef0848 2827#ifdef LEAK_DEBUG
2d2ae547
CM
2828 unsigned long flags;
2829 spin_lock_irqsave(&leak_lock, flags);
2830 list_del(&eb->leak_list);
2831 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 2832#endif
d1310b2e
CM
2833 kmem_cache_free(extent_buffer_cache, eb);
2834}
2835
2836struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2837 u64 start, unsigned long len,
2838 struct page *page0,
2839 gfp_t mask)
2840{
2841 unsigned long num_pages = num_extent_pages(start, len);
2842 unsigned long i;
2843 unsigned long index = start >> PAGE_CACHE_SHIFT;
2844 struct extent_buffer *eb;
6af118ce 2845 struct extent_buffer *exists = NULL;
d1310b2e
CM
2846 struct page *p;
2847 struct address_space *mapping = tree->mapping;
2848 int uptodate = 1;
2849
6af118ce
CM
2850 spin_lock(&tree->buffer_lock);
2851 eb = buffer_search(tree, start);
2852 if (eb) {
2853 atomic_inc(&eb->refs);
2854 spin_unlock(&tree->buffer_lock);
0f9dd46c 2855 mark_page_accessed(eb->first_page);
6af118ce
CM
2856 return eb;
2857 }
2858 spin_unlock(&tree->buffer_lock);
2859
d1310b2e 2860 eb = __alloc_extent_buffer(tree, start, len, mask);
2b114d1d 2861 if (!eb)
d1310b2e
CM
2862 return NULL;
2863
d1310b2e
CM
2864 if (page0) {
2865 eb->first_page = page0;
2866 i = 1;
2867 index++;
2868 page_cache_get(page0);
2869 mark_page_accessed(page0);
2870 set_page_extent_mapped(page0);
d1310b2e 2871 set_page_extent_head(page0, len);
f188591e 2872 uptodate = PageUptodate(page0);
d1310b2e
CM
2873 } else {
2874 i = 0;
2875 }
2876 for (; i < num_pages; i++, index++) {
2877 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2878 if (!p) {
2879 WARN_ON(1);
6af118ce 2880 goto free_eb;
d1310b2e
CM
2881 }
2882 set_page_extent_mapped(p);
2883 mark_page_accessed(p);
2884 if (i == 0) {
2885 eb->first_page = p;
2886 set_page_extent_head(p, len);
2887 } else {
2888 set_page_private(p, EXTENT_PAGE_PRIVATE);
2889 }
2890 if (!PageUptodate(p))
2891 uptodate = 0;
2892 unlock_page(p);
2893 }
2894 if (uptodate)
2895 eb->flags |= EXTENT_UPTODATE;
2896 eb->flags |= EXTENT_BUFFER_FILLED;
2897
6af118ce
CM
2898 spin_lock(&tree->buffer_lock);
2899 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2900 if (exists) {
2901 /* add one reference for the caller */
2902 atomic_inc(&exists->refs);
2903 spin_unlock(&tree->buffer_lock);
2904 goto free_eb;
2905 }
2906 spin_unlock(&tree->buffer_lock);
2907
2908 /* add one reference for the tree */
2909 atomic_inc(&eb->refs);
d1310b2e
CM
2910 return eb;
2911
6af118ce 2912free_eb:
d1310b2e 2913 if (!atomic_dec_and_test(&eb->refs))
6af118ce
CM
2914 return exists;
2915 for (index = 1; index < i; index++)
d1310b2e 2916 page_cache_release(extent_buffer_page(eb, index));
6af118ce 2917 page_cache_release(extent_buffer_page(eb, 0));
d1310b2e 2918 __free_extent_buffer(eb);
6af118ce 2919 return exists;
d1310b2e
CM
2920}
2921EXPORT_SYMBOL(alloc_extent_buffer);
2922
2923struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2924 u64 start, unsigned long len,
2925 gfp_t mask)
2926{
d1310b2e 2927 struct extent_buffer *eb;
d1310b2e 2928
6af118ce
CM
2929 spin_lock(&tree->buffer_lock);
2930 eb = buffer_search(tree, start);
2931 if (eb)
2932 atomic_inc(&eb->refs);
2933 spin_unlock(&tree->buffer_lock);
d1310b2e 2934
0f9dd46c
JB
2935 if (eb)
2936 mark_page_accessed(eb->first_page);
2937
d1310b2e 2938 return eb;
d1310b2e
CM
2939}
2940EXPORT_SYMBOL(find_extent_buffer);
2941
2942void free_extent_buffer(struct extent_buffer *eb)
2943{
d1310b2e
CM
2944 if (!eb)
2945 return;
2946
2947 if (!atomic_dec_and_test(&eb->refs))
2948 return;
2949
6af118ce 2950 WARN_ON(1);
d1310b2e
CM
2951}
2952EXPORT_SYMBOL(free_extent_buffer);
2953
2954int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2955 struct extent_buffer *eb)
2956{
2957 int set;
2958 unsigned long i;
2959 unsigned long num_pages;
2960 struct page *page;
2961
2962 u64 start = eb->start;
2963 u64 end = start + eb->len - 1;
2964
2965 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2966 num_pages = num_extent_pages(eb->start, eb->len);
2967
2968 for (i = 0; i < num_pages; i++) {
2969 page = extent_buffer_page(eb, i);
a61e6f29 2970 lock_page(page);
d1310b2e
CM
2971 if (i == 0)
2972 set_page_extent_head(page, eb->len);
2973 else
2974 set_page_private(page, EXTENT_PAGE_PRIVATE);
2975
2976 /*
2977 * if we're on the last page or the first page and the
2978 * block isn't aligned on a page boundary, do extra checks
2979 * to make sure we don't clean page that is partially dirty
2980 */
2981 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2982 ((i == num_pages - 1) &&
2983 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2984 start = (u64)page->index << PAGE_CACHE_SHIFT;
2985 end = start + PAGE_CACHE_SIZE - 1;
2986 if (test_range_bit(tree, start, end,
2987 EXTENT_DIRTY, 0)) {
a61e6f29 2988 unlock_page(page);
d1310b2e
CM
2989 continue;
2990 }
2991 }
2992 clear_page_dirty_for_io(page);
0ee0fda0 2993 spin_lock_irq(&page->mapping->tree_lock);
d1310b2e
CM
2994 if (!PageDirty(page)) {
2995 radix_tree_tag_clear(&page->mapping->page_tree,
2996 page_index(page),
2997 PAGECACHE_TAG_DIRTY);
2998 }
0ee0fda0 2999 spin_unlock_irq(&page->mapping->tree_lock);
a61e6f29 3000 unlock_page(page);
d1310b2e
CM
3001 }
3002 return 0;
3003}
3004EXPORT_SYMBOL(clear_extent_buffer_dirty);
3005
3006int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3007 struct extent_buffer *eb)
3008{
3009 return wait_on_extent_writeback(tree, eb->start,
3010 eb->start + eb->len - 1);
3011}
3012EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
3013
3014int set_extent_buffer_dirty(struct extent_io_tree *tree,
3015 struct extent_buffer *eb)
3016{
3017 unsigned long i;
3018 unsigned long num_pages;
3019
3020 num_pages = num_extent_pages(eb->start, eb->len);
3021 for (i = 0; i < num_pages; i++) {
3022 struct page *page = extent_buffer_page(eb, i);
3023 /* writepage may need to do something special for the
3024 * first page, we have to make sure page->private is
3025 * properly set. releasepage may drop page->private
3026 * on us if the page isn't already dirty.
3027 */
a1b32a59 3028 lock_page(page);
d1310b2e 3029 if (i == 0) {
d1310b2e
CM
3030 set_page_extent_head(page, eb->len);
3031 } else if (PagePrivate(page) &&
3032 page->private != EXTENT_PAGE_PRIVATE) {
d1310b2e 3033 set_page_extent_mapped(page);
d1310b2e
CM
3034 }
3035 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
a1b32a59
CM
3036 set_extent_dirty(tree, page_offset(page),
3037 page_offset(page) + PAGE_CACHE_SIZE -1,
3038 GFP_NOFS);
3039 unlock_page(page);
d1310b2e 3040 }
a1b32a59 3041 return 0;
d1310b2e
CM
3042}
3043EXPORT_SYMBOL(set_extent_buffer_dirty);
3044
1259ab75
CM
3045int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3046 struct extent_buffer *eb)
3047{
3048 unsigned long i;
3049 struct page *page;
3050 unsigned long num_pages;
3051
3052 num_pages = num_extent_pages(eb->start, eb->len);
3053 eb->flags &= ~EXTENT_UPTODATE;
3054
3055 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3056 GFP_NOFS);
3057 for (i = 0; i < num_pages; i++) {
3058 page = extent_buffer_page(eb, i);
33958dc6
CM
3059 if (page)
3060 ClearPageUptodate(page);
1259ab75
CM
3061 }
3062 return 0;
3063}
3064
d1310b2e
CM
3065int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3066 struct extent_buffer *eb)
3067{
3068 unsigned long i;
3069 struct page *page;
3070 unsigned long num_pages;
3071
3072 num_pages = num_extent_pages(eb->start, eb->len);
3073
3074 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3075 GFP_NOFS);
3076 for (i = 0; i < num_pages; i++) {
3077 page = extent_buffer_page(eb, i);
3078 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3079 ((i == num_pages - 1) &&
3080 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3081 check_page_uptodate(tree, page);
3082 continue;
3083 }
3084 SetPageUptodate(page);
3085 }
3086 return 0;
3087}
3088EXPORT_SYMBOL(set_extent_buffer_uptodate);
3089
ce9adaa5
CM
3090int extent_range_uptodate(struct extent_io_tree *tree,
3091 u64 start, u64 end)
3092{
3093 struct page *page;
3094 int ret;
3095 int pg_uptodate = 1;
3096 int uptodate;
3097 unsigned long index;
3098
3099 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3100 if (ret)
3101 return 1;
3102 while(start <= end) {
3103 index = start >> PAGE_CACHE_SHIFT;
3104 page = find_get_page(tree->mapping, index);
3105 uptodate = PageUptodate(page);
3106 page_cache_release(page);
3107 if (!uptodate) {
3108 pg_uptodate = 0;
3109 break;
3110 }
3111 start += PAGE_CACHE_SIZE;
3112 }
3113 return pg_uptodate;
3114}
3115
d1310b2e 3116int extent_buffer_uptodate(struct extent_io_tree *tree,
ce9adaa5 3117 struct extent_buffer *eb)
d1310b2e 3118{
728131d8 3119 int ret = 0;
ce9adaa5
CM
3120 unsigned long num_pages;
3121 unsigned long i;
728131d8
CM
3122 struct page *page;
3123 int pg_uptodate = 1;
3124
d1310b2e 3125 if (eb->flags & EXTENT_UPTODATE)
4235298e 3126 return 1;
728131d8 3127
4235298e 3128 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
d1310b2e 3129 EXTENT_UPTODATE, 1);
4235298e
CM
3130 if (ret)
3131 return ret;
728131d8
CM
3132
3133 num_pages = num_extent_pages(eb->start, eb->len);
3134 for (i = 0; i < num_pages; i++) {
3135 page = extent_buffer_page(eb, i);
3136 if (!PageUptodate(page)) {
3137 pg_uptodate = 0;
3138 break;
3139 }
3140 }
4235298e 3141 return pg_uptodate;
d1310b2e
CM
3142}
3143EXPORT_SYMBOL(extent_buffer_uptodate);
3144
3145int read_extent_buffer_pages(struct extent_io_tree *tree,
3146 struct extent_buffer *eb,
a86c12c7 3147 u64 start, int wait,
f188591e 3148 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
3149{
3150 unsigned long i;
3151 unsigned long start_i;
3152 struct page *page;
3153 int err;
3154 int ret = 0;
ce9adaa5
CM
3155 int locked_pages = 0;
3156 int all_uptodate = 1;
3157 int inc_all_pages = 0;
d1310b2e 3158 unsigned long num_pages;
a86c12c7 3159 struct bio *bio = NULL;
c8b97818 3160 unsigned long bio_flags = 0;
a86c12c7 3161
d1310b2e
CM
3162 if (eb->flags & EXTENT_UPTODATE)
3163 return 0;
3164
ce9adaa5 3165 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
d1310b2e
CM
3166 EXTENT_UPTODATE, 1)) {
3167 return 0;
3168 }
3169
3170 if (start) {
3171 WARN_ON(start < eb->start);
3172 start_i = (start >> PAGE_CACHE_SHIFT) -
3173 (eb->start >> PAGE_CACHE_SHIFT);
3174 } else {
3175 start_i = 0;
3176 }
3177
3178 num_pages = num_extent_pages(eb->start, eb->len);
3179 for (i = start_i; i < num_pages; i++) {
3180 page = extent_buffer_page(eb, i);
d1310b2e 3181 if (!wait) {
2db04966 3182 if (!trylock_page(page))
ce9adaa5 3183 goto unlock_exit;
d1310b2e
CM
3184 } else {
3185 lock_page(page);
3186 }
ce9adaa5 3187 locked_pages++;
d1310b2e 3188 if (!PageUptodate(page)) {
ce9adaa5
CM
3189 all_uptodate = 0;
3190 }
3191 }
3192 if (all_uptodate) {
3193 if (start_i == 0)
3194 eb->flags |= EXTENT_UPTODATE;
a1b32a59
CM
3195 if (ret) {
3196 printk("all up to date but ret is %d\n", ret);
3197 }
ce9adaa5
CM
3198 goto unlock_exit;
3199 }
3200
3201 for (i = start_i; i < num_pages; i++) {
3202 page = extent_buffer_page(eb, i);
3203 if (inc_all_pages)
3204 page_cache_get(page);
3205 if (!PageUptodate(page)) {
3206 if (start_i == 0)
3207 inc_all_pages = 1;
f188591e 3208 ClearPageError(page);
a86c12c7 3209 err = __extent_read_full_page(tree, page,
f188591e 3210 get_extent, &bio,
c8b97818 3211 mirror_num, &bio_flags);
d1310b2e
CM
3212 if (err) {
3213 ret = err;
a1b32a59 3214 printk("err %d from __extent_read_full_page\n", ret);
d1310b2e
CM
3215 }
3216 } else {
3217 unlock_page(page);
3218 }
3219 }
3220
a86c12c7 3221 if (bio)
c8b97818 3222 submit_one_bio(READ, bio, mirror_num, bio_flags);
a86c12c7 3223
d1310b2e 3224 if (ret || !wait) {
a1b32a59
CM
3225 if (ret)
3226 printk("ret %d wait %d returning\n", ret, wait);
d1310b2e
CM
3227 return ret;
3228 }
d1310b2e
CM
3229 for (i = start_i; i < num_pages; i++) {
3230 page = extent_buffer_page(eb, i);
3231 wait_on_page_locked(page);
3232 if (!PageUptodate(page)) {
a1b32a59 3233 printk("page not uptodate after wait_on_page_locked\n");
d1310b2e
CM
3234 ret = -EIO;
3235 }
3236 }
3237 if (!ret)
3238 eb->flags |= EXTENT_UPTODATE;
3239 return ret;
ce9adaa5
CM
3240
3241unlock_exit:
3242 i = start_i;
3243 while(locked_pages > 0) {
3244 page = extent_buffer_page(eb, i);
3245 i++;
3246 unlock_page(page);
3247 locked_pages--;
3248 }
3249 return ret;
d1310b2e
CM
3250}
3251EXPORT_SYMBOL(read_extent_buffer_pages);
3252
3253void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3254 unsigned long start,
3255 unsigned long len)
3256{
3257 size_t cur;
3258 size_t offset;
3259 struct page *page;
3260 char *kaddr;
3261 char *dst = (char *)dstv;
3262 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3263 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
3264
3265 WARN_ON(start > eb->len);
3266 WARN_ON(start + len > eb->start + eb->len);
3267
3268 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3269
3270 while(len > 0) {
3271 page = extent_buffer_page(eb, i);
d1310b2e
CM
3272
3273 cur = min(len, (PAGE_CACHE_SIZE - offset));
3274 kaddr = kmap_atomic(page, KM_USER1);
3275 memcpy(dst, kaddr + offset, cur);
3276 kunmap_atomic(kaddr, KM_USER1);
3277
3278 dst += cur;
3279 len -= cur;
3280 offset = 0;
3281 i++;
3282 }
3283}
3284EXPORT_SYMBOL(read_extent_buffer);
3285
3286int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3287 unsigned long min_len, char **token, char **map,
3288 unsigned long *map_start,
3289 unsigned long *map_len, int km)
3290{
3291 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3292 char *kaddr;
3293 struct page *p;
3294 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3295 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3296 unsigned long end_i = (start_offset + start + min_len - 1) >>
3297 PAGE_CACHE_SHIFT;
3298
3299 if (i != end_i)
3300 return -EINVAL;
3301
3302 if (i == 0) {
3303 offset = start_offset;
3304 *map_start = 0;
3305 } else {
3306 offset = 0;
3307 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3308 }
3309 if (start + min_len > eb->len) {
3310printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3311 WARN_ON(1);
3312 }
3313
3314 p = extent_buffer_page(eb, i);
d1310b2e
CM
3315 kaddr = kmap_atomic(p, km);
3316 *token = kaddr;
3317 *map = kaddr + offset;
3318 *map_len = PAGE_CACHE_SIZE - offset;
3319 return 0;
3320}
3321EXPORT_SYMBOL(map_private_extent_buffer);
3322
3323int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3324 unsigned long min_len,
3325 char **token, char **map,
3326 unsigned long *map_start,
3327 unsigned long *map_len, int km)
3328{
3329 int err;
3330 int save = 0;
3331 if (eb->map_token) {
3332 unmap_extent_buffer(eb, eb->map_token, km);
3333 eb->map_token = NULL;
3334 save = 1;
3335 }
3336 err = map_private_extent_buffer(eb, start, min_len, token, map,
3337 map_start, map_len, km);
3338 if (!err && save) {
3339 eb->map_token = *token;
3340 eb->kaddr = *map;
3341 eb->map_start = *map_start;
3342 eb->map_len = *map_len;
3343 }
3344 return err;
3345}
3346EXPORT_SYMBOL(map_extent_buffer);
3347
3348void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3349{
3350 kunmap_atomic(token, km);
3351}
3352EXPORT_SYMBOL(unmap_extent_buffer);
3353
3354int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3355 unsigned long start,
3356 unsigned long len)
3357{
3358 size_t cur;
3359 size_t offset;
3360 struct page *page;
3361 char *kaddr;
3362 char *ptr = (char *)ptrv;
3363 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3364 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3365 int ret = 0;
3366
3367 WARN_ON(start > eb->len);
3368 WARN_ON(start + len > eb->start + eb->len);
3369
3370 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3371
3372 while(len > 0) {
3373 page = extent_buffer_page(eb, i);
d1310b2e
CM
3374
3375 cur = min(len, (PAGE_CACHE_SIZE - offset));
3376
3377 kaddr = kmap_atomic(page, KM_USER0);
3378 ret = memcmp(ptr, kaddr + offset, cur);
3379 kunmap_atomic(kaddr, KM_USER0);
3380 if (ret)
3381 break;
3382
3383 ptr += cur;
3384 len -= cur;
3385 offset = 0;
3386 i++;
3387 }
3388 return ret;
3389}
3390EXPORT_SYMBOL(memcmp_extent_buffer);
3391
3392void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3393 unsigned long start, unsigned long len)
3394{
3395 size_t cur;
3396 size_t offset;
3397 struct page *page;
3398 char *kaddr;
3399 char *src = (char *)srcv;
3400 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3401 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3402
3403 WARN_ON(start > eb->len);
3404 WARN_ON(start + len > eb->start + eb->len);
3405
3406 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3407
3408 while(len > 0) {
3409 page = extent_buffer_page(eb, i);
3410 WARN_ON(!PageUptodate(page));
3411
3412 cur = min(len, PAGE_CACHE_SIZE - offset);
3413 kaddr = kmap_atomic(page, KM_USER1);
3414 memcpy(kaddr + offset, src, cur);
3415 kunmap_atomic(kaddr, KM_USER1);
3416
3417 src += cur;
3418 len -= cur;
3419 offset = 0;
3420 i++;
3421 }
3422}
3423EXPORT_SYMBOL(write_extent_buffer);
3424
3425void memset_extent_buffer(struct extent_buffer *eb, char c,
3426 unsigned long start, unsigned long len)
3427{
3428 size_t cur;
3429 size_t offset;
3430 struct page *page;
3431 char *kaddr;
3432 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3433 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3434
3435 WARN_ON(start > eb->len);
3436 WARN_ON(start + len > eb->start + eb->len);
3437
3438 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3439
3440 while(len > 0) {
3441 page = extent_buffer_page(eb, i);
3442 WARN_ON(!PageUptodate(page));
3443
3444 cur = min(len, PAGE_CACHE_SIZE - offset);
3445 kaddr = kmap_atomic(page, KM_USER0);
3446 memset(kaddr + offset, c, cur);
3447 kunmap_atomic(kaddr, KM_USER0);
3448
3449 len -= cur;
3450 offset = 0;
3451 i++;
3452 }
3453}
3454EXPORT_SYMBOL(memset_extent_buffer);
3455
3456void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3457 unsigned long dst_offset, unsigned long src_offset,
3458 unsigned long len)
3459{
3460 u64 dst_len = dst->len;
3461 size_t cur;
3462 size_t offset;
3463 struct page *page;
3464 char *kaddr;
3465 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3466 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3467
3468 WARN_ON(src->len != dst_len);
3469
3470 offset = (start_offset + dst_offset) &
3471 ((unsigned long)PAGE_CACHE_SIZE - 1);
3472
3473 while(len > 0) {
3474 page = extent_buffer_page(dst, i);
3475 WARN_ON(!PageUptodate(page));
3476
3477 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3478
3479 kaddr = kmap_atomic(page, KM_USER0);
3480 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3481 kunmap_atomic(kaddr, KM_USER0);
3482
3483 src_offset += cur;
3484 len -= cur;
3485 offset = 0;
3486 i++;
3487 }
3488}
3489EXPORT_SYMBOL(copy_extent_buffer);
3490
3491static void move_pages(struct page *dst_page, struct page *src_page,
3492 unsigned long dst_off, unsigned long src_off,
3493 unsigned long len)
3494{
3495 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3496 if (dst_page == src_page) {
3497 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3498 } else {
3499 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3500 char *p = dst_kaddr + dst_off + len;
3501 char *s = src_kaddr + src_off + len;
3502
3503 while (len--)
3504 *--p = *--s;
3505
3506 kunmap_atomic(src_kaddr, KM_USER1);
3507 }
3508 kunmap_atomic(dst_kaddr, KM_USER0);
3509}
3510
3511static void copy_pages(struct page *dst_page, struct page *src_page,
3512 unsigned long dst_off, unsigned long src_off,
3513 unsigned long len)
3514{
3515 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3516 char *src_kaddr;
3517
3518 if (dst_page != src_page)
3519 src_kaddr = kmap_atomic(src_page, KM_USER1);
3520 else
3521 src_kaddr = dst_kaddr;
3522
3523 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3524 kunmap_atomic(dst_kaddr, KM_USER0);
3525 if (dst_page != src_page)
3526 kunmap_atomic(src_kaddr, KM_USER1);
3527}
3528
3529void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3530 unsigned long src_offset, unsigned long len)
3531{
3532 size_t cur;
3533 size_t dst_off_in_page;
3534 size_t src_off_in_page;
3535 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3536 unsigned long dst_i;
3537 unsigned long src_i;
3538
3539 if (src_offset + len > dst->len) {
3540 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3541 src_offset, len, dst->len);
3542 BUG_ON(1);
3543 }
3544 if (dst_offset + len > dst->len) {
3545 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3546 dst_offset, len, dst->len);
3547 BUG_ON(1);
3548 }
3549
3550 while(len > 0) {
3551 dst_off_in_page = (start_offset + dst_offset) &
3552 ((unsigned long)PAGE_CACHE_SIZE - 1);
3553 src_off_in_page = (start_offset + src_offset) &
3554 ((unsigned long)PAGE_CACHE_SIZE - 1);
3555
3556 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3557 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3558
3559 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3560 src_off_in_page));
3561 cur = min_t(unsigned long, cur,
3562 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3563
3564 copy_pages(extent_buffer_page(dst, dst_i),
3565 extent_buffer_page(dst, src_i),
3566 dst_off_in_page, src_off_in_page, cur);
3567
3568 src_offset += cur;
3569 dst_offset += cur;
3570 len -= cur;
3571 }
3572}
3573EXPORT_SYMBOL(memcpy_extent_buffer);
3574
3575void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3576 unsigned long src_offset, unsigned long len)
3577{
3578 size_t cur;
3579 size_t dst_off_in_page;
3580 size_t src_off_in_page;
3581 unsigned long dst_end = dst_offset + len - 1;
3582 unsigned long src_end = src_offset + len - 1;
3583 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3584 unsigned long dst_i;
3585 unsigned long src_i;
3586
3587 if (src_offset + len > dst->len) {
3588 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3589 src_offset, len, dst->len);
3590 BUG_ON(1);
3591 }
3592 if (dst_offset + len > dst->len) {
3593 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3594 dst_offset, len, dst->len);
3595 BUG_ON(1);
3596 }
3597 if (dst_offset < src_offset) {
3598 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3599 return;
3600 }
3601 while(len > 0) {
3602 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3603 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3604
3605 dst_off_in_page = (start_offset + dst_end) &
3606 ((unsigned long)PAGE_CACHE_SIZE - 1);
3607 src_off_in_page = (start_offset + src_end) &
3608 ((unsigned long)PAGE_CACHE_SIZE - 1);
3609
3610 cur = min_t(unsigned long, len, src_off_in_page + 1);
3611 cur = min(cur, dst_off_in_page + 1);
3612 move_pages(extent_buffer_page(dst, dst_i),
3613 extent_buffer_page(dst, src_i),
3614 dst_off_in_page - cur + 1,
3615 src_off_in_page - cur + 1, cur);
3616
3617 dst_end -= cur;
3618 src_end -= cur;
3619 len -= cur;
3620 }
3621}
3622EXPORT_SYMBOL(memmove_extent_buffer);
6af118ce
CM
3623
3624int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3625{
3626 u64 start = page_offset(page);
3627 struct extent_buffer *eb;
3628 int ret = 1;
3629 unsigned long i;
3630 unsigned long num_pages;
3631
3632 spin_lock(&tree->buffer_lock);
3633 eb = buffer_search(tree, start);
3634 if (!eb)
3635 goto out;
3636
3637 if (atomic_read(&eb->refs) > 1) {
3638 ret = 0;
3639 goto out;
3640 }
3641 /* at this point we can safely release the extent buffer */
3642 num_pages = num_extent_pages(eb->start, eb->len);
b214107e
CH
3643 for (i = 0; i < num_pages; i++)
3644 page_cache_release(extent_buffer_page(eb, i));
6af118ce
CM
3645 rb_erase(&eb->rb_node, &tree->buffer);
3646 __free_extent_buffer(eb);
3647out:
3648 spin_unlock(&tree->buffer_lock);
3649 return ret;
3650}
3651EXPORT_SYMBOL(try_release_extent_buffer);