btrfs: use a local variable for fs_devices pointer in btrfs_dev_replace_finishing
[linux-block.git] / fs / btrfs / extent_io.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
c1d7c514 2
d1310b2e
CM
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
d1310b2e
CM
7#include <linux/pagemap.h>
8#include <linux/page-flags.h>
d1310b2e
CM
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
d1310b2e
CM
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
268bb0ce 14#include <linux/prefetch.h>
14605409 15#include <linux/fsverity.h>
cea62800 16#include "misc.h"
d1310b2e 17#include "extent_io.h"
9c7d3a54 18#include "extent-io-tree.h"
d1310b2e 19#include "extent_map.h"
902b22f3
DW
20#include "ctree.h"
21#include "btrfs_inode.h"
4a54c8c1 22#include "volumes.h"
21adbd5c 23#include "check-integrity.h"
0b32f4bb 24#include "locking.h"
606686ee 25#include "rcu-string.h"
fe09e16c 26#include "backref.h"
6af49dbd 27#include "disk-io.h"
760f991f 28#include "subpage.h"
d3575156 29#include "zoned.h"
0bc09ca1 30#include "block-group.h"
d1310b2e 31
d1310b2e
CM
32static struct kmem_cache *extent_state_cache;
33static struct kmem_cache *extent_buffer_cache;
8ac9f7c1 34static struct bio_set btrfs_bioset;
d1310b2e 35
27a3507d
FM
36static inline bool extent_state_in_tree(const struct extent_state *state)
37{
38 return !RB_EMPTY_NODE(&state->rb_node);
39}
40
6d49ba1b 41#ifdef CONFIG_BTRFS_DEBUG
d1310b2e 42static LIST_HEAD(states);
d397712b 43static DEFINE_SPINLOCK(leak_lock);
6d49ba1b 44
3fd63727
JB
45static inline void btrfs_leak_debug_add(spinlock_t *lock,
46 struct list_head *new,
47 struct list_head *head)
6d49ba1b
ES
48{
49 unsigned long flags;
50
3fd63727 51 spin_lock_irqsave(lock, flags);
6d49ba1b 52 list_add(new, head);
3fd63727 53 spin_unlock_irqrestore(lock, flags);
6d49ba1b
ES
54}
55
3fd63727
JB
56static inline void btrfs_leak_debug_del(spinlock_t *lock,
57 struct list_head *entry)
6d49ba1b
ES
58{
59 unsigned long flags;
60
3fd63727 61 spin_lock_irqsave(lock, flags);
6d49ba1b 62 list_del(entry);
3fd63727 63 spin_unlock_irqrestore(lock, flags);
6d49ba1b
ES
64}
65
3fd63727 66void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
6d49ba1b 67{
6d49ba1b 68 struct extent_buffer *eb;
3fd63727 69 unsigned long flags;
6d49ba1b 70
8c38938c
JB
71 /*
72 * If we didn't get into open_ctree our allocated_ebs will not be
73 * initialized, so just skip this.
74 */
75 if (!fs_info->allocated_ebs.next)
76 return;
77
3fd63727
JB
78 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
79 while (!list_empty(&fs_info->allocated_ebs)) {
80 eb = list_first_entry(&fs_info->allocated_ebs,
81 struct extent_buffer, leak_list);
8c38938c
JB
82 pr_err(
83 "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
84 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
85 btrfs_header_owner(eb));
33ca832f
JB
86 list_del(&eb->leak_list);
87 kmem_cache_free(extent_buffer_cache, eb);
88 }
3fd63727 89 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
33ca832f
JB
90}
91
92static inline void btrfs_extent_state_leak_debug_check(void)
93{
94 struct extent_state *state;
95
6d49ba1b
ES
96 while (!list_empty(&states)) {
97 state = list_entry(states.next, struct extent_state, leak_list);
9ee49a04 98 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
27a3507d
FM
99 state->start, state->end, state->state,
100 extent_state_in_tree(state),
b7ac31b7 101 refcount_read(&state->refs));
6d49ba1b
ES
102 list_del(&state->leak_list);
103 kmem_cache_free(extent_state_cache, state);
104 }
6d49ba1b 105}
8d599ae1 106
a5dee37d
JB
107#define btrfs_debug_check_extent_io_range(tree, start, end) \
108 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
8d599ae1 109static inline void __btrfs_debug_check_extent_io_range(const char *caller,
a5dee37d 110 struct extent_io_tree *tree, u64 start, u64 end)
8d599ae1 111{
65a680f6
NB
112 struct inode *inode = tree->private_data;
113 u64 isize;
114
115 if (!inode || !is_data_inode(inode))
116 return;
117
118 isize = i_size_read(inode);
119 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
120 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
121 "%s: ino %llu isize %llu odd range [%llu,%llu]",
122 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
123 }
8d599ae1 124}
6d49ba1b 125#else
3fd63727
JB
126#define btrfs_leak_debug_add(lock, new, head) do {} while (0)
127#define btrfs_leak_debug_del(lock, entry) do {} while (0)
33ca832f 128#define btrfs_extent_state_leak_debug_check() do {} while (0)
8d599ae1 129#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
4bef0848 130#endif
d1310b2e 131
d1310b2e
CM
132struct tree_entry {
133 u64 start;
134 u64 end;
d1310b2e
CM
135 struct rb_node rb_node;
136};
137
138struct extent_page_data {
390ed29b 139 struct btrfs_bio_ctrl bio_ctrl;
771ed689
CM
140 /* tells writepage not to lock the state bits for this range
141 * it still does the unlocking
142 */
ffbd517d
CM
143 unsigned int extent_locked:1;
144
70fd7614 145 /* tells the submit_bio code to use REQ_SYNC */
ffbd517d 146 unsigned int sync_io:1;
d1310b2e
CM
147};
148
f97e27e9 149static int add_extent_changeset(struct extent_state *state, u32 bits,
d38ed27f
QW
150 struct extent_changeset *changeset,
151 int set)
152{
153 int ret;
154
155 if (!changeset)
57599c7e 156 return 0;
d38ed27f 157 if (set && (state->state & bits) == bits)
57599c7e 158 return 0;
fefdc557 159 if (!set && (state->state & bits) == 0)
57599c7e 160 return 0;
d38ed27f 161 changeset->bytes_changed += state->end - state->start + 1;
53d32359 162 ret = ulist_add(&changeset->range_changed, state->start, state->end,
d38ed27f 163 GFP_ATOMIC);
57599c7e 164 return ret;
d38ed27f
QW
165}
166
c1be9c1a
NB
167int __must_check submit_one_bio(struct bio *bio, int mirror_num,
168 unsigned long bio_flags)
bb58eb9e
QW
169{
170 blk_status_t ret = 0;
bb58eb9e 171 struct extent_io_tree *tree = bio->bi_private;
bb58eb9e
QW
172
173 bio->bi_private = NULL;
174
e0eefe07
QW
175 /* Caller should ensure the bio has at least some range added */
176 ASSERT(bio->bi_iter.bi_size);
908930f3
NB
177 if (is_data_inode(tree->private_data))
178 ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
179 bio_flags);
180 else
1b36294a
NB
181 ret = btrfs_submit_metadata_bio(tree->private_data, bio,
182 mirror_num, bio_flags);
bb58eb9e
QW
183
184 return blk_status_to_errno(ret);
185}
186
3065976b
QW
187/* Cleanup unsubmitted bios */
188static void end_write_bio(struct extent_page_data *epd, int ret)
189{
390ed29b
QW
190 struct bio *bio = epd->bio_ctrl.bio;
191
192 if (bio) {
193 bio->bi_status = errno_to_blk_status(ret);
194 bio_endio(bio);
195 epd->bio_ctrl.bio = NULL;
3065976b
QW
196 }
197}
198
f4340622
QW
199/*
200 * Submit bio from extent page data via submit_one_bio
201 *
202 * Return 0 if everything is OK.
203 * Return <0 for error.
204 */
205static int __must_check flush_write_bio(struct extent_page_data *epd)
bb58eb9e 206{
f4340622 207 int ret = 0;
390ed29b 208 struct bio *bio = epd->bio_ctrl.bio;
bb58eb9e 209
390ed29b
QW
210 if (bio) {
211 ret = submit_one_bio(bio, 0, 0);
f4340622
QW
212 /*
213 * Clean up of epd->bio is handled by its endio function.
214 * And endio is either triggered by successful bio execution
215 * or the error handler of submit bio hook.
216 * So at this point, no matter what happened, we don't need
217 * to clean up epd->bio.
218 */
390ed29b 219 epd->bio_ctrl.bio = NULL;
bb58eb9e 220 }
f4340622 221 return ret;
bb58eb9e 222}
e2932ee0 223
6f0d04f8 224int __init extent_state_cache_init(void)
d1310b2e 225{
837e1972 226 extent_state_cache = kmem_cache_create("btrfs_extent_state",
9601e3f6 227 sizeof(struct extent_state), 0,
fba4b697 228 SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
229 if (!extent_state_cache)
230 return -ENOMEM;
6f0d04f8
JB
231 return 0;
232}
d1310b2e 233
6f0d04f8
JB
234int __init extent_io_init(void)
235{
837e1972 236 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
9601e3f6 237 sizeof(struct extent_buffer), 0,
fba4b697 238 SLAB_MEM_SPREAD, NULL);
d1310b2e 239 if (!extent_buffer_cache)
6f0d04f8 240 return -ENOMEM;
9be3395b 241
8ac9f7c1 242 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
c3a3b19b 243 offsetof(struct btrfs_bio, bio),
8ac9f7c1 244 BIOSET_NEED_BVECS))
9be3395b 245 goto free_buffer_cache;
b208c2f7 246
8ac9f7c1 247 if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
b208c2f7
DW
248 goto free_bioset;
249
d1310b2e
CM
250 return 0;
251
b208c2f7 252free_bioset:
8ac9f7c1 253 bioset_exit(&btrfs_bioset);
b208c2f7 254
9be3395b
CM
255free_buffer_cache:
256 kmem_cache_destroy(extent_buffer_cache);
257 extent_buffer_cache = NULL;
6f0d04f8
JB
258 return -ENOMEM;
259}
9be3395b 260
6f0d04f8
JB
261void __cold extent_state_cache_exit(void)
262{
263 btrfs_extent_state_leak_debug_check();
d1310b2e 264 kmem_cache_destroy(extent_state_cache);
d1310b2e
CM
265}
266
e67c718b 267void __cold extent_io_exit(void)
d1310b2e 268{
8c0a8537
KS
269 /*
270 * Make sure all delayed rcu free are flushed before we
271 * destroy caches.
272 */
273 rcu_barrier();
5598e900 274 kmem_cache_destroy(extent_buffer_cache);
8ac9f7c1 275 bioset_exit(&btrfs_bioset);
d1310b2e
CM
276}
277
41a2ee75
JB
278/*
279 * For the file_extent_tree, we want to hold the inode lock when we lookup and
280 * update the disk_i_size, but lockdep will complain because our io_tree we hold
281 * the tree lock and get the inode lock when setting delalloc. These two things
282 * are unrelated, so make a class for the file_extent_tree so we don't get the
283 * two locking patterns mixed up.
284 */
285static struct lock_class_key file_extent_tree_class;
286
c258d6e3 287void extent_io_tree_init(struct btrfs_fs_info *fs_info,
43eb5f29
QW
288 struct extent_io_tree *tree, unsigned int owner,
289 void *private_data)
d1310b2e 290{
c258d6e3 291 tree->fs_info = fs_info;
6bef4d31 292 tree->state = RB_ROOT;
d1310b2e 293 tree->dirty_bytes = 0;
70dec807 294 spin_lock_init(&tree->lock);
c6100a4b 295 tree->private_data = private_data;
43eb5f29 296 tree->owner = owner;
41a2ee75
JB
297 if (owner == IO_TREE_INODE_FILE_EXTENT)
298 lockdep_set_class(&tree->lock, &file_extent_tree_class);
d1310b2e 299}
d1310b2e 300
41e7acd3
NB
301void extent_io_tree_release(struct extent_io_tree *tree)
302{
303 spin_lock(&tree->lock);
304 /*
305 * Do a single barrier for the waitqueue_active check here, the state
306 * of the waitqueue should not change once extent_io_tree_release is
307 * called.
308 */
309 smp_mb();
310 while (!RB_EMPTY_ROOT(&tree->state)) {
311 struct rb_node *node;
312 struct extent_state *state;
313
314 node = rb_first(&tree->state);
315 state = rb_entry(node, struct extent_state, rb_node);
316 rb_erase(&state->rb_node, &tree->state);
317 RB_CLEAR_NODE(&state->rb_node);
318 /*
319 * btree io trees aren't supposed to have tasks waiting for
320 * changes in the flags of extent states ever.
321 */
322 ASSERT(!waitqueue_active(&state->wq));
323 free_extent_state(state);
324
325 cond_resched_lock(&tree->lock);
326 }
327 spin_unlock(&tree->lock);
328}
329
b2950863 330static struct extent_state *alloc_extent_state(gfp_t mask)
d1310b2e
CM
331{
332 struct extent_state *state;
d1310b2e 333
3ba7ab22
MH
334 /*
335 * The given mask might be not appropriate for the slab allocator,
336 * drop the unsupported bits
337 */
338 mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
d1310b2e 339 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 340 if (!state)
d1310b2e
CM
341 return state;
342 state->state = 0;
47dc196a 343 state->failrec = NULL;
27a3507d 344 RB_CLEAR_NODE(&state->rb_node);
3fd63727 345 btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
b7ac31b7 346 refcount_set(&state->refs, 1);
d1310b2e 347 init_waitqueue_head(&state->wq);
143bede5 348 trace_alloc_extent_state(state, mask, _RET_IP_);
d1310b2e
CM
349 return state;
350}
d1310b2e 351
4845e44f 352void free_extent_state(struct extent_state *state)
d1310b2e 353{
d1310b2e
CM
354 if (!state)
355 return;
b7ac31b7 356 if (refcount_dec_and_test(&state->refs)) {
27a3507d 357 WARN_ON(extent_state_in_tree(state));
3fd63727 358 btrfs_leak_debug_del(&leak_lock, &state->leak_list);
143bede5 359 trace_free_extent_state(state, _RET_IP_);
d1310b2e
CM
360 kmem_cache_free(extent_state_cache, state);
361 }
362}
d1310b2e 363
f2071b21
FM
364static struct rb_node *tree_insert(struct rb_root *root,
365 struct rb_node *search_start,
366 u64 offset,
12cfbad9
FDBM
367 struct rb_node *node,
368 struct rb_node ***p_in,
369 struct rb_node **parent_in)
d1310b2e 370{
f2071b21 371 struct rb_node **p;
d397712b 372 struct rb_node *parent = NULL;
d1310b2e
CM
373 struct tree_entry *entry;
374
12cfbad9
FDBM
375 if (p_in && parent_in) {
376 p = *p_in;
377 parent = *parent_in;
378 goto do_insert;
379 }
380
f2071b21 381 p = search_start ? &search_start : &root->rb_node;
d397712b 382 while (*p) {
d1310b2e
CM
383 parent = *p;
384 entry = rb_entry(parent, struct tree_entry, rb_node);
385
386 if (offset < entry->start)
387 p = &(*p)->rb_left;
388 else if (offset > entry->end)
389 p = &(*p)->rb_right;
390 else
391 return parent;
392 }
393
12cfbad9 394do_insert:
d1310b2e
CM
395 rb_link_node(node, parent, p);
396 rb_insert_color(node, root);
397 return NULL;
398}
399
8666e638 400/**
3bed2da1
NB
401 * Search @tree for an entry that contains @offset. Such entry would have
402 * entry->start <= offset && entry->end >= offset.
8666e638 403 *
3bed2da1
NB
404 * @tree: the tree to search
405 * @offset: offset that should fall within an entry in @tree
406 * @next_ret: pointer to the first entry whose range ends after @offset
407 * @prev_ret: pointer to the first entry whose range begins before @offset
408 * @p_ret: pointer where new node should be anchored (used when inserting an
409 * entry in the tree)
410 * @parent_ret: points to entry which would have been the parent of the entry,
8666e638
NB
411 * containing @offset
412 *
413 * This function returns a pointer to the entry that contains @offset byte
414 * address. If no such entry exists, then NULL is returned and the other
415 * pointer arguments to the function are filled, otherwise the found entry is
416 * returned and other pointers are left untouched.
417 */
80ea96b1 418static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
12cfbad9 419 struct rb_node **next_ret,
352646c7 420 struct rb_node **prev_ret,
12cfbad9
FDBM
421 struct rb_node ***p_ret,
422 struct rb_node **parent_ret)
d1310b2e 423{
80ea96b1 424 struct rb_root *root = &tree->state;
12cfbad9 425 struct rb_node **n = &root->rb_node;
d1310b2e
CM
426 struct rb_node *prev = NULL;
427 struct rb_node *orig_prev = NULL;
428 struct tree_entry *entry;
429 struct tree_entry *prev_entry = NULL;
430
12cfbad9
FDBM
431 while (*n) {
432 prev = *n;
433 entry = rb_entry(prev, struct tree_entry, rb_node);
d1310b2e
CM
434 prev_entry = entry;
435
436 if (offset < entry->start)
12cfbad9 437 n = &(*n)->rb_left;
d1310b2e 438 else if (offset > entry->end)
12cfbad9 439 n = &(*n)->rb_right;
d397712b 440 else
12cfbad9 441 return *n;
d1310b2e
CM
442 }
443
12cfbad9
FDBM
444 if (p_ret)
445 *p_ret = n;
446 if (parent_ret)
447 *parent_ret = prev;
448
352646c7 449 if (next_ret) {
d1310b2e 450 orig_prev = prev;
d397712b 451 while (prev && offset > prev_entry->end) {
d1310b2e
CM
452 prev = rb_next(prev);
453 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
454 }
352646c7 455 *next_ret = prev;
d1310b2e
CM
456 prev = orig_prev;
457 }
458
352646c7 459 if (prev_ret) {
d1310b2e 460 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
d397712b 461 while (prev && offset < prev_entry->start) {
d1310b2e
CM
462 prev = rb_prev(prev);
463 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
464 }
352646c7 465 *prev_ret = prev;
d1310b2e
CM
466 }
467 return NULL;
468}
469
12cfbad9
FDBM
470static inline struct rb_node *
471tree_search_for_insert(struct extent_io_tree *tree,
472 u64 offset,
473 struct rb_node ***p_ret,
474 struct rb_node **parent_ret)
d1310b2e 475{
352646c7 476 struct rb_node *next= NULL;
d1310b2e 477 struct rb_node *ret;
70dec807 478
352646c7 479 ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
d397712b 480 if (!ret)
352646c7 481 return next;
d1310b2e
CM
482 return ret;
483}
484
12cfbad9
FDBM
485static inline struct rb_node *tree_search(struct extent_io_tree *tree,
486 u64 offset)
487{
488 return tree_search_for_insert(tree, offset, NULL, NULL);
489}
490
d1310b2e
CM
491/*
492 * utility function to look for merge candidates inside a given range.
493 * Any extents with matching state are merged together into a single
494 * extent in the tree. Extents with EXTENT_IO in their state field
495 * are not merged because the end_io handlers need to be able to do
496 * operations on them without sleeping (or doing allocations/splits).
497 *
498 * This should be called with the tree lock held.
499 */
1bf85046
JM
500static void merge_state(struct extent_io_tree *tree,
501 struct extent_state *state)
d1310b2e
CM
502{
503 struct extent_state *other;
504 struct rb_node *other_node;
505
8882679e 506 if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
1bf85046 507 return;
d1310b2e
CM
508
509 other_node = rb_prev(&state->rb_node);
510 if (other_node) {
511 other = rb_entry(other_node, struct extent_state, rb_node);
512 if (other->end == state->start - 1 &&
513 other->state == state->state) {
5c848198
NB
514 if (tree->private_data &&
515 is_data_inode(tree->private_data))
516 btrfs_merge_delalloc_extent(tree->private_data,
517 state, other);
d1310b2e 518 state->start = other->start;
d1310b2e 519 rb_erase(&other->rb_node, &tree->state);
27a3507d 520 RB_CLEAR_NODE(&other->rb_node);
d1310b2e
CM
521 free_extent_state(other);
522 }
523 }
524 other_node = rb_next(&state->rb_node);
525 if (other_node) {
526 other = rb_entry(other_node, struct extent_state, rb_node);
527 if (other->start == state->end + 1 &&
528 other->state == state->state) {
5c848198
NB
529 if (tree->private_data &&
530 is_data_inode(tree->private_data))
531 btrfs_merge_delalloc_extent(tree->private_data,
532 state, other);
df98b6e2 533 state->end = other->end;
df98b6e2 534 rb_erase(&other->rb_node, &tree->state);
27a3507d 535 RB_CLEAR_NODE(&other->rb_node);
df98b6e2 536 free_extent_state(other);
d1310b2e
CM
537 }
538 }
d1310b2e
CM
539}
540
3150b699 541static void set_state_bits(struct extent_io_tree *tree,
f97e27e9 542 struct extent_state *state, u32 *bits,
d38ed27f 543 struct extent_changeset *changeset);
3150b699 544
d1310b2e
CM
545/*
546 * insert an extent_state struct into the tree. 'bits' are set on the
547 * struct before it is inserted.
548 *
549 * This may return -EEXIST if the extent is already there, in which case the
550 * state struct is freed.
551 *
552 * The tree lock is not taken internally. This is a utility function and
553 * probably isn't what you want to call (see set/clear_extent_bit).
554 */
555static int insert_state(struct extent_io_tree *tree,
556 struct extent_state *state, u64 start, u64 end,
12cfbad9
FDBM
557 struct rb_node ***p,
558 struct rb_node **parent,
f97e27e9 559 u32 *bits, struct extent_changeset *changeset)
d1310b2e
CM
560{
561 struct rb_node *node;
562
2792237d
DS
563 if (end < start) {
564 btrfs_err(tree->fs_info,
565 "insert state: end < start %llu %llu", end, start);
566 WARN_ON(1);
567 }
d1310b2e
CM
568 state->start = start;
569 state->end = end;
9ed74f2d 570
d38ed27f 571 set_state_bits(tree, state, bits, changeset);
3150b699 572
f2071b21 573 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
d1310b2e
CM
574 if (node) {
575 struct extent_state *found;
576 found = rb_entry(node, struct extent_state, rb_node);
2792237d
DS
577 btrfs_err(tree->fs_info,
578 "found node %llu %llu on insert of %llu %llu",
c1c9ff7c 579 found->start, found->end, start, end);
d1310b2e
CM
580 return -EEXIST;
581 }
582 merge_state(tree, state);
583 return 0;
584}
585
586/*
587 * split a given extent state struct in two, inserting the preallocated
588 * struct 'prealloc' as the newly created second half. 'split' indicates an
589 * offset inside 'orig' where it should be split.
590 *
591 * Before calling,
592 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
593 * are two extent state structs in the tree:
594 * prealloc: [orig->start, split - 1]
595 * orig: [ split, orig->end ]
596 *
597 * The tree locks are not taken by this function. They need to be held
598 * by the caller.
599 */
600static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
601 struct extent_state *prealloc, u64 split)
602{
603 struct rb_node *node;
9ed74f2d 604
abbb55f4
NB
605 if (tree->private_data && is_data_inode(tree->private_data))
606 btrfs_split_delalloc_extent(tree->private_data, orig, split);
9ed74f2d 607
d1310b2e
CM
608 prealloc->start = orig->start;
609 prealloc->end = split - 1;
610 prealloc->state = orig->state;
611 orig->start = split;
612
f2071b21
FM
613 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
614 &prealloc->rb_node, NULL, NULL);
d1310b2e 615 if (node) {
d1310b2e
CM
616 free_extent_state(prealloc);
617 return -EEXIST;
618 }
619 return 0;
620}
621
cdc6a395
LZ
622static struct extent_state *next_state(struct extent_state *state)
623{
624 struct rb_node *next = rb_next(&state->rb_node);
625 if (next)
626 return rb_entry(next, struct extent_state, rb_node);
627 else
628 return NULL;
629}
630
d1310b2e
CM
631/*
632 * utility function to clear some bits in an extent state struct.
52042d8e 633 * it will optionally wake up anyone waiting on this state (wake == 1).
d1310b2e
CM
634 *
635 * If no bits are set on the state struct after clearing things, the
636 * struct is freed and removed from the tree
637 */
cdc6a395
LZ
638static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
639 struct extent_state *state,
f97e27e9 640 u32 *bits, int wake,
fefdc557 641 struct extent_changeset *changeset)
d1310b2e 642{
cdc6a395 643 struct extent_state *next;
f97e27e9 644 u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
57599c7e 645 int ret;
d1310b2e 646
0ca1f7ce 647 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
d1310b2e
CM
648 u64 range = state->end - state->start + 1;
649 WARN_ON(range > tree->dirty_bytes);
650 tree->dirty_bytes -= range;
651 }
a36bb5f9
NB
652
653 if (tree->private_data && is_data_inode(tree->private_data))
654 btrfs_clear_delalloc_extent(tree->private_data, state, bits);
655
57599c7e
DS
656 ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
657 BUG_ON(ret < 0);
32c00aff 658 state->state &= ~bits_to_clear;
d1310b2e
CM
659 if (wake)
660 wake_up(&state->wq);
0ca1f7ce 661 if (state->state == 0) {
cdc6a395 662 next = next_state(state);
27a3507d 663 if (extent_state_in_tree(state)) {
d1310b2e 664 rb_erase(&state->rb_node, &tree->state);
27a3507d 665 RB_CLEAR_NODE(&state->rb_node);
d1310b2e
CM
666 free_extent_state(state);
667 } else {
668 WARN_ON(1);
669 }
670 } else {
671 merge_state(tree, state);
cdc6a395 672 next = next_state(state);
d1310b2e 673 }
cdc6a395 674 return next;
d1310b2e
CM
675}
676
8233767a
XG
677static struct extent_state *
678alloc_extent_state_atomic(struct extent_state *prealloc)
679{
680 if (!prealloc)
681 prealloc = alloc_extent_state(GFP_ATOMIC);
682
683 return prealloc;
684}
685
48a3b636 686static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
c2d904e0 687{
29b665cc 688 btrfs_panic(tree->fs_info, err,
05912a3c 689 "locking error: extent tree was modified by another thread while locked");
c2d904e0
JM
690}
691
d1310b2e
CM
692/*
693 * clear some bits on a range in the tree. This may require splitting
694 * or inserting elements in the tree, so the gfp mask is used to
695 * indicate which allocations or sleeping are allowed.
696 *
697 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
698 * the given range from the tree regardless of state (ie for truncate).
699 *
700 * the range [start, end] is inclusive.
701 *
6763af84 702 * This takes the tree lock, and returns 0 on success and < 0 on error.
d1310b2e 703 */
66b0c887 704int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
f97e27e9
QW
705 u32 bits, int wake, int delete,
706 struct extent_state **cached_state,
707 gfp_t mask, struct extent_changeset *changeset)
d1310b2e
CM
708{
709 struct extent_state *state;
2c64c53d 710 struct extent_state *cached;
d1310b2e
CM
711 struct extent_state *prealloc = NULL;
712 struct rb_node *node;
5c939df5 713 u64 last_end;
d1310b2e 714 int err;
2ac55d41 715 int clear = 0;
d1310b2e 716
a5dee37d 717 btrfs_debug_check_extent_io_range(tree, start, end);
a1d19847 718 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
8d599ae1 719
7ee9e440
JB
720 if (bits & EXTENT_DELALLOC)
721 bits |= EXTENT_NORESERVE;
722
0ca1f7ce
YZ
723 if (delete)
724 bits |= ~EXTENT_CTLBITS;
0ca1f7ce 725
8882679e 726 if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
2ac55d41 727 clear = 1;
d1310b2e 728again:
d0164adc 729 if (!prealloc && gfpflags_allow_blocking(mask)) {
c7bc6319
FM
730 /*
731 * Don't care for allocation failure here because we might end
732 * up not needing the pre-allocated extent state at all, which
733 * is the case if we only have in the tree extent states that
734 * cover our input range and don't cover too any other range.
735 * If we end up needing a new extent state we allocate it later.
736 */
d1310b2e 737 prealloc = alloc_extent_state(mask);
d1310b2e
CM
738 }
739
cad321ad 740 spin_lock(&tree->lock);
2c64c53d
CM
741 if (cached_state) {
742 cached = *cached_state;
2ac55d41
JB
743
744 if (clear) {
745 *cached_state = NULL;
746 cached_state = NULL;
747 }
748
27a3507d
FM
749 if (cached && extent_state_in_tree(cached) &&
750 cached->start <= start && cached->end > start) {
2ac55d41 751 if (clear)
b7ac31b7 752 refcount_dec(&cached->refs);
2c64c53d 753 state = cached;
42daec29 754 goto hit_next;
2c64c53d 755 }
2ac55d41
JB
756 if (clear)
757 free_extent_state(cached);
2c64c53d 758 }
d1310b2e
CM
759 /*
760 * this search will find the extents that end after
761 * our range starts
762 */
80ea96b1 763 node = tree_search(tree, start);
d1310b2e
CM
764 if (!node)
765 goto out;
766 state = rb_entry(node, struct extent_state, rb_node);
2c64c53d 767hit_next:
d1310b2e
CM
768 if (state->start > end)
769 goto out;
770 WARN_ON(state->end < start);
5c939df5 771 last_end = state->end;
d1310b2e 772
0449314a 773 /* the state doesn't have the wanted bits, go ahead */
cdc6a395
LZ
774 if (!(state->state & bits)) {
775 state = next_state(state);
0449314a 776 goto next;
cdc6a395 777 }
0449314a 778
d1310b2e
CM
779 /*
780 * | ---- desired range ---- |
781 * | state | or
782 * | ------------- state -------------- |
783 *
784 * We need to split the extent we found, and may flip
785 * bits on second half.
786 *
787 * If the extent we found extends past our range, we
788 * just split and search again. It'll get split again
789 * the next time though.
790 *
791 * If the extent we found is inside our range, we clear
792 * the desired bit on it.
793 */
794
795 if (state->start < start) {
8233767a
XG
796 prealloc = alloc_extent_state_atomic(prealloc);
797 BUG_ON(!prealloc);
d1310b2e 798 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
799 if (err)
800 extent_io_tree_panic(tree, err);
801
d1310b2e
CM
802 prealloc = NULL;
803 if (err)
804 goto out;
805 if (state->end <= end) {
fefdc557
QW
806 state = clear_state_bit(tree, state, &bits, wake,
807 changeset);
d1ac6e41 808 goto next;
d1310b2e
CM
809 }
810 goto search_again;
811 }
812 /*
813 * | ---- desired range ---- |
814 * | state |
815 * We need to split the extent, and clear the bit
816 * on the first half
817 */
818 if (state->start <= end && state->end > end) {
8233767a
XG
819 prealloc = alloc_extent_state_atomic(prealloc);
820 BUG_ON(!prealloc);
d1310b2e 821 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
822 if (err)
823 extent_io_tree_panic(tree, err);
824
d1310b2e
CM
825 if (wake)
826 wake_up(&state->wq);
42daec29 827
fefdc557 828 clear_state_bit(tree, prealloc, &bits, wake, changeset);
9ed74f2d 829
d1310b2e
CM
830 prealloc = NULL;
831 goto out;
832 }
42daec29 833
fefdc557 834 state = clear_state_bit(tree, state, &bits, wake, changeset);
0449314a 835next:
5c939df5
YZ
836 if (last_end == (u64)-1)
837 goto out;
838 start = last_end + 1;
cdc6a395 839 if (start <= end && state && !need_resched())
692e5759 840 goto hit_next;
d1310b2e
CM
841
842search_again:
843 if (start > end)
844 goto out;
cad321ad 845 spin_unlock(&tree->lock);
d0164adc 846 if (gfpflags_allow_blocking(mask))
d1310b2e
CM
847 cond_resched();
848 goto again;
7ab5cb2a
DS
849
850out:
851 spin_unlock(&tree->lock);
852 if (prealloc)
853 free_extent_state(prealloc);
854
855 return 0;
856
d1310b2e 857}
d1310b2e 858
143bede5
JM
859static void wait_on_state(struct extent_io_tree *tree,
860 struct extent_state *state)
641f5219
CH
861 __releases(tree->lock)
862 __acquires(tree->lock)
d1310b2e
CM
863{
864 DEFINE_WAIT(wait);
865 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
cad321ad 866 spin_unlock(&tree->lock);
d1310b2e 867 schedule();
cad321ad 868 spin_lock(&tree->lock);
d1310b2e 869 finish_wait(&state->wq, &wait);
d1310b2e
CM
870}
871
872/*
873 * waits for one or more bits to clear on a range in the state tree.
874 * The range [start, end] is inclusive.
875 * The tree lock is taken by this function
876 */
41074888 877static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
f97e27e9 878 u32 bits)
d1310b2e
CM
879{
880 struct extent_state *state;
881 struct rb_node *node;
882
a5dee37d 883 btrfs_debug_check_extent_io_range(tree, start, end);
8d599ae1 884
cad321ad 885 spin_lock(&tree->lock);
d1310b2e
CM
886again:
887 while (1) {
888 /*
889 * this search will find all the extents that end after
890 * our range starts
891 */
80ea96b1 892 node = tree_search(tree, start);
c50d3e71 893process_node:
d1310b2e
CM
894 if (!node)
895 break;
896
897 state = rb_entry(node, struct extent_state, rb_node);
898
899 if (state->start > end)
900 goto out;
901
902 if (state->state & bits) {
903 start = state->start;
b7ac31b7 904 refcount_inc(&state->refs);
d1310b2e
CM
905 wait_on_state(tree, state);
906 free_extent_state(state);
907 goto again;
908 }
909 start = state->end + 1;
910
911 if (start > end)
912 break;
913
c50d3e71
FM
914 if (!cond_resched_lock(&tree->lock)) {
915 node = rb_next(node);
916 goto process_node;
917 }
d1310b2e
CM
918 }
919out:
cad321ad 920 spin_unlock(&tree->lock);
d1310b2e 921}
d1310b2e 922
1bf85046 923static void set_state_bits(struct extent_io_tree *tree,
d1310b2e 924 struct extent_state *state,
f97e27e9 925 u32 *bits, struct extent_changeset *changeset)
d1310b2e 926{
f97e27e9 927 u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
57599c7e 928 int ret;
9ed74f2d 929
e06a1fc9
NB
930 if (tree->private_data && is_data_inode(tree->private_data))
931 btrfs_set_delalloc_extent(tree->private_data, state, bits);
932
0ca1f7ce 933 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
d1310b2e
CM
934 u64 range = state->end - state->start + 1;
935 tree->dirty_bytes += range;
936 }
57599c7e
DS
937 ret = add_extent_changeset(state, bits_to_set, changeset, 1);
938 BUG_ON(ret < 0);
0ca1f7ce 939 state->state |= bits_to_set;
d1310b2e
CM
940}
941
e38e2ed7
FM
942static void cache_state_if_flags(struct extent_state *state,
943 struct extent_state **cached_ptr,
9ee49a04 944 unsigned flags)
2c64c53d
CM
945{
946 if (cached_ptr && !(*cached_ptr)) {
e38e2ed7 947 if (!flags || (state->state & flags)) {
2c64c53d 948 *cached_ptr = state;
b7ac31b7 949 refcount_inc(&state->refs);
2c64c53d
CM
950 }
951 }
952}
953
e38e2ed7
FM
954static void cache_state(struct extent_state *state,
955 struct extent_state **cached_ptr)
956{
957 return cache_state_if_flags(state, cached_ptr,
8882679e 958 EXTENT_LOCKED | EXTENT_BOUNDARY);
e38e2ed7
FM
959}
960
d1310b2e 961/*
1edbb734
CM
962 * set some bits on a range in the tree. This may require allocations or
963 * sleeping, so the gfp mask is used to indicate what is allowed.
d1310b2e 964 *
1edbb734
CM
965 * If any of the exclusive bits are set, this will fail with -EEXIST if some
966 * part of the range already has the desired bits set. The start of the
967 * existing range is returned in failed_start in this case.
d1310b2e 968 *
1edbb734 969 * [start, end] is inclusive This takes the tree lock.
d1310b2e 970 */
f97e27e9
QW
971int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
972 u32 exclusive_bits, u64 *failed_start,
1cab5e72
NB
973 struct extent_state **cached_state, gfp_t mask,
974 struct extent_changeset *changeset)
d1310b2e
CM
975{
976 struct extent_state *state;
977 struct extent_state *prealloc = NULL;
978 struct rb_node *node;
12cfbad9
FDBM
979 struct rb_node **p;
980 struct rb_node *parent;
d1310b2e 981 int err = 0;
d1310b2e
CM
982 u64 last_start;
983 u64 last_end;
42daec29 984
a5dee37d 985 btrfs_debug_check_extent_io_range(tree, start, end);
a1d19847 986 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
8d599ae1 987
3f6bb4ae
QW
988 if (exclusive_bits)
989 ASSERT(failed_start);
990 else
991 ASSERT(failed_start == NULL);
d1310b2e 992again:
d0164adc 993 if (!prealloc && gfpflags_allow_blocking(mask)) {
059f791c
DS
994 /*
995 * Don't care for allocation failure here because we might end
996 * up not needing the pre-allocated extent state at all, which
997 * is the case if we only have in the tree extent states that
998 * cover our input range and don't cover too any other range.
999 * If we end up needing a new extent state we allocate it later.
1000 */
d1310b2e 1001 prealloc = alloc_extent_state(mask);
d1310b2e
CM
1002 }
1003
cad321ad 1004 spin_lock(&tree->lock);
9655d298
CM
1005 if (cached_state && *cached_state) {
1006 state = *cached_state;
df98b6e2 1007 if (state->start <= start && state->end > start &&
27a3507d 1008 extent_state_in_tree(state)) {
9655d298
CM
1009 node = &state->rb_node;
1010 goto hit_next;
1011 }
1012 }
d1310b2e
CM
1013 /*
1014 * this search will find all the extents that end after
1015 * our range starts.
1016 */
12cfbad9 1017 node = tree_search_for_insert(tree, start, &p, &parent);
d1310b2e 1018 if (!node) {
8233767a
XG
1019 prealloc = alloc_extent_state_atomic(prealloc);
1020 BUG_ON(!prealloc);
12cfbad9 1021 err = insert_state(tree, prealloc, start, end,
d38ed27f 1022 &p, &parent, &bits, changeset);
c2d904e0
JM
1023 if (err)
1024 extent_io_tree_panic(tree, err);
1025
c42ac0bc 1026 cache_state(prealloc, cached_state);
d1310b2e 1027 prealloc = NULL;
d1310b2e
CM
1028 goto out;
1029 }
d1310b2e 1030 state = rb_entry(node, struct extent_state, rb_node);
40431d6c 1031hit_next:
d1310b2e
CM
1032 last_start = state->start;
1033 last_end = state->end;
1034
1035 /*
1036 * | ---- desired range ---- |
1037 * | state |
1038 *
1039 * Just lock what we found and keep going
1040 */
1041 if (state->start == start && state->end <= end) {
1edbb734 1042 if (state->state & exclusive_bits) {
d1310b2e
CM
1043 *failed_start = state->start;
1044 err = -EEXIST;
1045 goto out;
1046 }
42daec29 1047
d38ed27f 1048 set_state_bits(tree, state, &bits, changeset);
2c64c53d 1049 cache_state(state, cached_state);
d1310b2e 1050 merge_state(tree, state);
5c939df5
YZ
1051 if (last_end == (u64)-1)
1052 goto out;
1053 start = last_end + 1;
d1ac6e41
LB
1054 state = next_state(state);
1055 if (start < end && state && state->start == start &&
1056 !need_resched())
1057 goto hit_next;
d1310b2e
CM
1058 goto search_again;
1059 }
1060
1061 /*
1062 * | ---- desired range ---- |
1063 * | state |
1064 * or
1065 * | ------------- state -------------- |
1066 *
1067 * We need to split the extent we found, and may flip bits on
1068 * second half.
1069 *
1070 * If the extent we found extends past our
1071 * range, we just split and search again. It'll get split
1072 * again the next time though.
1073 *
1074 * If the extent we found is inside our range, we set the
1075 * desired bit on it.
1076 */
1077 if (state->start < start) {
1edbb734 1078 if (state->state & exclusive_bits) {
d1310b2e
CM
1079 *failed_start = start;
1080 err = -EEXIST;
1081 goto out;
1082 }
8233767a 1083
55ffaabe
FM
1084 /*
1085 * If this extent already has all the bits we want set, then
1086 * skip it, not necessary to split it or do anything with it.
1087 */
1088 if ((state->state & bits) == bits) {
1089 start = state->end + 1;
1090 cache_state(state, cached_state);
1091 goto search_again;
1092 }
1093
8233767a
XG
1094 prealloc = alloc_extent_state_atomic(prealloc);
1095 BUG_ON(!prealloc);
d1310b2e 1096 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
1097 if (err)
1098 extent_io_tree_panic(tree, err);
1099
d1310b2e
CM
1100 prealloc = NULL;
1101 if (err)
1102 goto out;
1103 if (state->end <= end) {
d38ed27f 1104 set_state_bits(tree, state, &bits, changeset);
2c64c53d 1105 cache_state(state, cached_state);
d1310b2e 1106 merge_state(tree, state);
5c939df5
YZ
1107 if (last_end == (u64)-1)
1108 goto out;
1109 start = last_end + 1;
d1ac6e41
LB
1110 state = next_state(state);
1111 if (start < end && state && state->start == start &&
1112 !need_resched())
1113 goto hit_next;
d1310b2e
CM
1114 }
1115 goto search_again;
1116 }
1117 /*
1118 * | ---- desired range ---- |
1119 * | state | or | state |
1120 *
1121 * There's a hole, we need to insert something in it and
1122 * ignore the extent we found.
1123 */
1124 if (state->start > start) {
1125 u64 this_end;
1126 if (end < last_start)
1127 this_end = end;
1128 else
d397712b 1129 this_end = last_start - 1;
8233767a
XG
1130
1131 prealloc = alloc_extent_state_atomic(prealloc);
1132 BUG_ON(!prealloc);
c7f895a2
XG
1133
1134 /*
1135 * Avoid to free 'prealloc' if it can be merged with
1136 * the later extent.
1137 */
d1310b2e 1138 err = insert_state(tree, prealloc, start, this_end,
d38ed27f 1139 NULL, NULL, &bits, changeset);
c2d904e0
JM
1140 if (err)
1141 extent_io_tree_panic(tree, err);
1142
9ed74f2d
JB
1143 cache_state(prealloc, cached_state);
1144 prealloc = NULL;
d1310b2e
CM
1145 start = this_end + 1;
1146 goto search_again;
1147 }
1148 /*
1149 * | ---- desired range ---- |
1150 * | state |
1151 * We need to split the extent, and set the bit
1152 * on the first half
1153 */
1154 if (state->start <= end && state->end > end) {
1edbb734 1155 if (state->state & exclusive_bits) {
d1310b2e
CM
1156 *failed_start = start;
1157 err = -EEXIST;
1158 goto out;
1159 }
8233767a
XG
1160
1161 prealloc = alloc_extent_state_atomic(prealloc);
1162 BUG_ON(!prealloc);
d1310b2e 1163 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
1164 if (err)
1165 extent_io_tree_panic(tree, err);
d1310b2e 1166
d38ed27f 1167 set_state_bits(tree, prealloc, &bits, changeset);
2c64c53d 1168 cache_state(prealloc, cached_state);
d1310b2e
CM
1169 merge_state(tree, prealloc);
1170 prealloc = NULL;
1171 goto out;
1172 }
1173
b5a4ba14
DS
1174search_again:
1175 if (start > end)
1176 goto out;
1177 spin_unlock(&tree->lock);
1178 if (gfpflags_allow_blocking(mask))
1179 cond_resched();
1180 goto again;
d1310b2e
CM
1181
1182out:
cad321ad 1183 spin_unlock(&tree->lock);
d1310b2e
CM
1184 if (prealloc)
1185 free_extent_state(prealloc);
1186
1187 return err;
1188
d1310b2e 1189}
d1310b2e 1190
462d6fac 1191/**
10983f2e
LB
1192 * convert_extent_bit - convert all bits in a given range from one bit to
1193 * another
462d6fac
JB
1194 * @tree: the io tree to search
1195 * @start: the start offset in bytes
1196 * @end: the end offset in bytes (inclusive)
1197 * @bits: the bits to set in this range
1198 * @clear_bits: the bits to clear in this range
e6138876 1199 * @cached_state: state that we're going to cache
462d6fac
JB
1200 *
1201 * This will go through and set bits for the given range. If any states exist
1202 * already in this range they are set with the given bit and cleared of the
1203 * clear_bits. This is only meant to be used by things that are mergeable, ie
1204 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1205 * boundary bits like LOCK.
210aa277
DS
1206 *
1207 * All allocations are done with GFP_NOFS.
462d6fac
JB
1208 */
1209int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
f97e27e9 1210 u32 bits, u32 clear_bits,
210aa277 1211 struct extent_state **cached_state)
462d6fac
JB
1212{
1213 struct extent_state *state;
1214 struct extent_state *prealloc = NULL;
1215 struct rb_node *node;
12cfbad9
FDBM
1216 struct rb_node **p;
1217 struct rb_node *parent;
462d6fac
JB
1218 int err = 0;
1219 u64 last_start;
1220 u64 last_end;
c8fd3de7 1221 bool first_iteration = true;
462d6fac 1222
a5dee37d 1223 btrfs_debug_check_extent_io_range(tree, start, end);
a1d19847
QW
1224 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1225 clear_bits);
8d599ae1 1226
462d6fac 1227again:
210aa277 1228 if (!prealloc) {
c8fd3de7
FM
1229 /*
1230 * Best effort, don't worry if extent state allocation fails
1231 * here for the first iteration. We might have a cached state
1232 * that matches exactly the target range, in which case no
1233 * extent state allocations are needed. We'll only know this
1234 * after locking the tree.
1235 */
210aa277 1236 prealloc = alloc_extent_state(GFP_NOFS);
c8fd3de7 1237 if (!prealloc && !first_iteration)
462d6fac
JB
1238 return -ENOMEM;
1239 }
1240
1241 spin_lock(&tree->lock);
e6138876
JB
1242 if (cached_state && *cached_state) {
1243 state = *cached_state;
1244 if (state->start <= start && state->end > start &&
27a3507d 1245 extent_state_in_tree(state)) {
e6138876
JB
1246 node = &state->rb_node;
1247 goto hit_next;
1248 }
1249 }
1250
462d6fac
JB
1251 /*
1252 * this search will find all the extents that end after
1253 * our range starts.
1254 */
12cfbad9 1255 node = tree_search_for_insert(tree, start, &p, &parent);
462d6fac
JB
1256 if (!node) {
1257 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1258 if (!prealloc) {
1259 err = -ENOMEM;
1260 goto out;
1261 }
12cfbad9 1262 err = insert_state(tree, prealloc, start, end,
d38ed27f 1263 &p, &parent, &bits, NULL);
c2d904e0
JM
1264 if (err)
1265 extent_io_tree_panic(tree, err);
c42ac0bc
FDBM
1266 cache_state(prealloc, cached_state);
1267 prealloc = NULL;
462d6fac
JB
1268 goto out;
1269 }
1270 state = rb_entry(node, struct extent_state, rb_node);
1271hit_next:
1272 last_start = state->start;
1273 last_end = state->end;
1274
1275 /*
1276 * | ---- desired range ---- |
1277 * | state |
1278 *
1279 * Just lock what we found and keep going
1280 */
1281 if (state->start == start && state->end <= end) {
d38ed27f 1282 set_state_bits(tree, state, &bits, NULL);
e6138876 1283 cache_state(state, cached_state);
fefdc557 1284 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
462d6fac
JB
1285 if (last_end == (u64)-1)
1286 goto out;
462d6fac 1287 start = last_end + 1;
d1ac6e41
LB
1288 if (start < end && state && state->start == start &&
1289 !need_resched())
1290 goto hit_next;
462d6fac
JB
1291 goto search_again;
1292 }
1293
1294 /*
1295 * | ---- desired range ---- |
1296 * | state |
1297 * or
1298 * | ------------- state -------------- |
1299 *
1300 * We need to split the extent we found, and may flip bits on
1301 * second half.
1302 *
1303 * If the extent we found extends past our
1304 * range, we just split and search again. It'll get split
1305 * again the next time though.
1306 *
1307 * If the extent we found is inside our range, we set the
1308 * desired bit on it.
1309 */
1310 if (state->start < start) {
1311 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1312 if (!prealloc) {
1313 err = -ENOMEM;
1314 goto out;
1315 }
462d6fac 1316 err = split_state(tree, state, prealloc, start);
c2d904e0
JM
1317 if (err)
1318 extent_io_tree_panic(tree, err);
462d6fac
JB
1319 prealloc = NULL;
1320 if (err)
1321 goto out;
1322 if (state->end <= end) {
d38ed27f 1323 set_state_bits(tree, state, &bits, NULL);
e6138876 1324 cache_state(state, cached_state);
fefdc557
QW
1325 state = clear_state_bit(tree, state, &clear_bits, 0,
1326 NULL);
462d6fac
JB
1327 if (last_end == (u64)-1)
1328 goto out;
1329 start = last_end + 1;
d1ac6e41
LB
1330 if (start < end && state && state->start == start &&
1331 !need_resched())
1332 goto hit_next;
462d6fac
JB
1333 }
1334 goto search_again;
1335 }
1336 /*
1337 * | ---- desired range ---- |
1338 * | state | or | state |
1339 *
1340 * There's a hole, we need to insert something in it and
1341 * ignore the extent we found.
1342 */
1343 if (state->start > start) {
1344 u64 this_end;
1345 if (end < last_start)
1346 this_end = end;
1347 else
1348 this_end = last_start - 1;
1349
1350 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1351 if (!prealloc) {
1352 err = -ENOMEM;
1353 goto out;
1354 }
462d6fac
JB
1355
1356 /*
1357 * Avoid to free 'prealloc' if it can be merged with
1358 * the later extent.
1359 */
1360 err = insert_state(tree, prealloc, start, this_end,
d38ed27f 1361 NULL, NULL, &bits, NULL);
c2d904e0
JM
1362 if (err)
1363 extent_io_tree_panic(tree, err);
e6138876 1364 cache_state(prealloc, cached_state);
462d6fac
JB
1365 prealloc = NULL;
1366 start = this_end + 1;
1367 goto search_again;
1368 }
1369 /*
1370 * | ---- desired range ---- |
1371 * | state |
1372 * We need to split the extent, and set the bit
1373 * on the first half
1374 */
1375 if (state->start <= end && state->end > end) {
1376 prealloc = alloc_extent_state_atomic(prealloc);
1cf4ffdb
LB
1377 if (!prealloc) {
1378 err = -ENOMEM;
1379 goto out;
1380 }
462d6fac
JB
1381
1382 err = split_state(tree, state, prealloc, end + 1);
c2d904e0
JM
1383 if (err)
1384 extent_io_tree_panic(tree, err);
462d6fac 1385
d38ed27f 1386 set_state_bits(tree, prealloc, &bits, NULL);
e6138876 1387 cache_state(prealloc, cached_state);
fefdc557 1388 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
462d6fac
JB
1389 prealloc = NULL;
1390 goto out;
1391 }
1392
462d6fac
JB
1393search_again:
1394 if (start > end)
1395 goto out;
1396 spin_unlock(&tree->lock);
210aa277 1397 cond_resched();
c8fd3de7 1398 first_iteration = false;
462d6fac 1399 goto again;
462d6fac
JB
1400
1401out:
1402 spin_unlock(&tree->lock);
1403 if (prealloc)
1404 free_extent_state(prealloc);
1405
1406 return err;
462d6fac
JB
1407}
1408
d1310b2e 1409/* wrappers around set/clear extent bit */
d38ed27f 1410int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
f97e27e9 1411 u32 bits, struct extent_changeset *changeset)
d38ed27f
QW
1412{
1413 /*
1414 * We don't support EXTENT_LOCKED yet, as current changeset will
1415 * record any bits changed, so for EXTENT_LOCKED case, it will
1416 * either fail with -EEXIST or changeset will record the whole
1417 * range.
1418 */
1419 BUG_ON(bits & EXTENT_LOCKED);
1420
1cab5e72
NB
1421 return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1422 changeset);
d38ed27f
QW
1423}
1424
4ca73656 1425int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
f97e27e9 1426 u32 bits)
4ca73656 1427{
1cab5e72
NB
1428 return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1429 GFP_NOWAIT, NULL);
4ca73656
NB
1430}
1431
fefdc557 1432int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
f97e27e9 1433 u32 bits, int wake, int delete,
ae0f1625 1434 struct extent_state **cached)
fefdc557
QW
1435{
1436 return __clear_extent_bit(tree, start, end, bits, wake, delete,
ae0f1625 1437 cached, GFP_NOFS, NULL);
fefdc557
QW
1438}
1439
fefdc557 1440int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
f97e27e9 1441 u32 bits, struct extent_changeset *changeset)
fefdc557
QW
1442{
1443 /*
1444 * Don't support EXTENT_LOCKED case, same reason as
1445 * set_record_extent_bits().
1446 */
1447 BUG_ON(bits & EXTENT_LOCKED);
1448
f734c44a 1449 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
fefdc557
QW
1450 changeset);
1451}
1452
d352ac68
CM
1453/*
1454 * either insert or lock state struct between start and end use mask to tell
1455 * us if waiting is desired.
1456 */
1edbb734 1457int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
ff13db41 1458 struct extent_state **cached_state)
d1310b2e
CM
1459{
1460 int err;
1461 u64 failed_start;
9ee49a04 1462
d1310b2e 1463 while (1) {
1cab5e72
NB
1464 err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
1465 EXTENT_LOCKED, &failed_start,
1466 cached_state, GFP_NOFS, NULL);
d0082371 1467 if (err == -EEXIST) {
d1310b2e
CM
1468 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1469 start = failed_start;
d0082371 1470 } else
d1310b2e 1471 break;
d1310b2e
CM
1472 WARN_ON(start > end);
1473 }
1474 return err;
1475}
d1310b2e 1476
d0082371 1477int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
25179201
JB
1478{
1479 int err;
1480 u64 failed_start;
1481
1cab5e72
NB
1482 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1483 &failed_start, NULL, GFP_NOFS, NULL);
6643558d
YZ
1484 if (err == -EEXIST) {
1485 if (failed_start > start)
1486 clear_extent_bit(tree, start, failed_start - 1,
ae0f1625 1487 EXTENT_LOCKED, 1, 0, NULL);
25179201 1488 return 0;
6643558d 1489 }
25179201
JB
1490 return 1;
1491}
25179201 1492
bd1fa4f0 1493void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
4adaa611 1494{
09cbfeaf
KS
1495 unsigned long index = start >> PAGE_SHIFT;
1496 unsigned long end_index = end >> PAGE_SHIFT;
4adaa611
CM
1497 struct page *page;
1498
1499 while (index <= end_index) {
1500 page = find_get_page(inode->i_mapping, index);
1501 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1502 clear_page_dirty_for_io(page);
09cbfeaf 1503 put_page(page);
4adaa611
CM
1504 index++;
1505 }
4adaa611
CM
1506}
1507
f6311572 1508void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
4adaa611 1509{
ebf55c88 1510 struct address_space *mapping = inode->i_mapping;
09cbfeaf
KS
1511 unsigned long index = start >> PAGE_SHIFT;
1512 unsigned long end_index = end >> PAGE_SHIFT;
ebf55c88 1513 struct folio *folio;
4adaa611
CM
1514
1515 while (index <= end_index) {
ebf55c88
MWO
1516 folio = filemap_get_folio(mapping, index);
1517 filemap_dirty_folio(mapping, folio);
1518 folio_account_redirty(folio);
1519 index += folio_nr_pages(folio);
1520 folio_put(folio);
4adaa611 1521 }
4adaa611
CM
1522}
1523
d352ac68
CM
1524/* find the first state struct with 'bits' set after 'start', and
1525 * return it. tree->lock must be held. NULL will returned if
1526 * nothing was found after 'start'
1527 */
48a3b636 1528static struct extent_state *
f97e27e9 1529find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
d7fc640e
CM
1530{
1531 struct rb_node *node;
1532 struct extent_state *state;
1533
1534 /*
1535 * this search will find all the extents that end after
1536 * our range starts.
1537 */
1538 node = tree_search(tree, start);
d397712b 1539 if (!node)
d7fc640e 1540 goto out;
d7fc640e 1541
d397712b 1542 while (1) {
d7fc640e 1543 state = rb_entry(node, struct extent_state, rb_node);
d397712b 1544 if (state->end >= start && (state->state & bits))
d7fc640e 1545 return state;
d397712b 1546
d7fc640e
CM
1547 node = rb_next(node);
1548 if (!node)
1549 break;
1550 }
1551out:
1552 return NULL;
1553}
d7fc640e 1554
69261c4b 1555/*
03509b78 1556 * Find the first offset in the io tree with one or more @bits set.
69261c4b 1557 *
03509b78
QW
1558 * Note: If there are multiple bits set in @bits, any of them will match.
1559 *
1560 * Return 0 if we find something, and update @start_ret and @end_ret.
1561 * Return 1 if we found nothing.
69261c4b
XG
1562 */
1563int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
f97e27e9 1564 u64 *start_ret, u64 *end_ret, u32 bits,
e6138876 1565 struct extent_state **cached_state)
69261c4b
XG
1566{
1567 struct extent_state *state;
1568 int ret = 1;
1569
1570 spin_lock(&tree->lock);
e6138876
JB
1571 if (cached_state && *cached_state) {
1572 state = *cached_state;
27a3507d 1573 if (state->end == start - 1 && extent_state_in_tree(state)) {
9688e9a9 1574 while ((state = next_state(state)) != NULL) {
e6138876
JB
1575 if (state->state & bits)
1576 goto got_it;
e6138876
JB
1577 }
1578 free_extent_state(*cached_state);
1579 *cached_state = NULL;
1580 goto out;
1581 }
1582 free_extent_state(*cached_state);
1583 *cached_state = NULL;
1584 }
1585
69261c4b 1586 state = find_first_extent_bit_state(tree, start, bits);
e6138876 1587got_it:
69261c4b 1588 if (state) {
e38e2ed7 1589 cache_state_if_flags(state, cached_state, 0);
69261c4b
XG
1590 *start_ret = state->start;
1591 *end_ret = state->end;
1592 ret = 0;
1593 }
e6138876 1594out:
69261c4b
XG
1595 spin_unlock(&tree->lock);
1596 return ret;
1597}
1598
41a2ee75 1599/**
3bed2da1
NB
1600 * Find a contiguous area of bits
1601 *
1602 * @tree: io tree to check
1603 * @start: offset to start the search from
1604 * @start_ret: the first offset we found with the bits set
1605 * @end_ret: the final contiguous range of the bits that were set
1606 * @bits: bits to look for
41a2ee75
JB
1607 *
1608 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1609 * to set bits appropriately, and then merge them again. During this time it
1610 * will drop the tree->lock, so use this helper if you want to find the actual
1611 * contiguous area for given bits. We will search to the first bit we find, and
1612 * then walk down the tree until we find a non-contiguous area. The area
1613 * returned will be the full contiguous area with the bits set.
1614 */
1615int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
f97e27e9 1616 u64 *start_ret, u64 *end_ret, u32 bits)
41a2ee75
JB
1617{
1618 struct extent_state *state;
1619 int ret = 1;
1620
1621 spin_lock(&tree->lock);
1622 state = find_first_extent_bit_state(tree, start, bits);
1623 if (state) {
1624 *start_ret = state->start;
1625 *end_ret = state->end;
1626 while ((state = next_state(state)) != NULL) {
1627 if (state->start > (*end_ret + 1))
1628 break;
1629 *end_ret = state->end;
1630 }
1631 ret = 0;
1632 }
1633 spin_unlock(&tree->lock);
1634 return ret;
1635}
1636
45bfcfc1 1637/**
3bed2da1
NB
1638 * Find the first range that has @bits not set. This range could start before
1639 * @start.
45bfcfc1 1640 *
3bed2da1
NB
1641 * @tree: the tree to search
1642 * @start: offset at/after which the found extent should start
1643 * @start_ret: records the beginning of the range
1644 * @end_ret: records the end of the range (inclusive)
1645 * @bits: the set of bits which must be unset
45bfcfc1
NB
1646 *
1647 * Since unallocated range is also considered one which doesn't have the bits
1648 * set it's possible that @end_ret contains -1, this happens in case the range
1649 * spans (last_range_end, end of device]. In this case it's up to the caller to
1650 * trim @end_ret to the appropriate size.
1651 */
1652void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
f97e27e9 1653 u64 *start_ret, u64 *end_ret, u32 bits)
45bfcfc1
NB
1654{
1655 struct extent_state *state;
1656 struct rb_node *node, *prev = NULL, *next;
1657
1658 spin_lock(&tree->lock);
1659
1660 /* Find first extent with bits cleared */
1661 while (1) {
1662 node = __etree_search(tree, start, &next, &prev, NULL, NULL);
5750c375
NB
1663 if (!node && !next && !prev) {
1664 /*
1665 * Tree is completely empty, send full range and let
1666 * caller deal with it
1667 */
1668 *start_ret = 0;
1669 *end_ret = -1;
1670 goto out;
1671 } else if (!node && !next) {
1672 /*
1673 * We are past the last allocated chunk, set start at
1674 * the end of the last extent.
1675 */
1676 state = rb_entry(prev, struct extent_state, rb_node);
1677 *start_ret = state->end + 1;
1678 *end_ret = -1;
1679 goto out;
1680 } else if (!node) {
45bfcfc1 1681 node = next;
45bfcfc1 1682 }
1eaebb34
NB
1683 /*
1684 * At this point 'node' either contains 'start' or start is
1685 * before 'node'
1686 */
45bfcfc1 1687 state = rb_entry(node, struct extent_state, rb_node);
1eaebb34
NB
1688
1689 if (in_range(start, state->start, state->end - state->start + 1)) {
1690 if (state->state & bits) {
1691 /*
1692 * |--range with bits sets--|
1693 * |
1694 * start
1695 */
1696 start = state->end + 1;
1697 } else {
1698 /*
1699 * 'start' falls within a range that doesn't
1700 * have the bits set, so take its start as
1701 * the beginning of the desired range
1702 *
1703 * |--range with bits cleared----|
1704 * |
1705 * start
1706 */
1707 *start_ret = state->start;
1708 break;
1709 }
45bfcfc1 1710 } else {
1eaebb34
NB
1711 /*
1712 * |---prev range---|---hole/unset---|---node range---|
1713 * |
1714 * start
1715 *
1716 * or
1717 *
1718 * |---hole/unset--||--first node--|
1719 * 0 |
1720 * start
1721 */
1722 if (prev) {
1723 state = rb_entry(prev, struct extent_state,
1724 rb_node);
1725 *start_ret = state->end + 1;
1726 } else {
1727 *start_ret = 0;
1728 }
45bfcfc1
NB
1729 break;
1730 }
1731 }
1732
1733 /*
1734 * Find the longest stretch from start until an entry which has the
1735 * bits set
1736 */
1737 while (1) {
1738 state = rb_entry(node, struct extent_state, rb_node);
1739 if (state->end >= start && !(state->state & bits)) {
1740 *end_ret = state->end;
1741 } else {
1742 *end_ret = state->start - 1;
1743 break;
1744 }
1745
1746 node = rb_next(node);
1747 if (!node)
1748 break;
1749 }
1750out:
1751 spin_unlock(&tree->lock);
1752}
1753
d352ac68
CM
1754/*
1755 * find a contiguous range of bytes in the file marked as delalloc, not
1756 * more than 'max_bytes'. start and end are used to return the range,
1757 *
3522e903 1758 * true is returned if we find something, false if nothing was in the tree
d352ac68 1759 */
083e75e7
JB
1760bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1761 u64 *end, u64 max_bytes,
1762 struct extent_state **cached_state)
d1310b2e
CM
1763{
1764 struct rb_node *node;
1765 struct extent_state *state;
1766 u64 cur_start = *start;
3522e903 1767 bool found = false;
d1310b2e
CM
1768 u64 total_bytes = 0;
1769
cad321ad 1770 spin_lock(&tree->lock);
c8b97818 1771
d1310b2e
CM
1772 /*
1773 * this search will find all the extents that end after
1774 * our range starts.
1775 */
80ea96b1 1776 node = tree_search(tree, cur_start);
2b114d1d 1777 if (!node) {
3522e903 1778 *end = (u64)-1;
d1310b2e
CM
1779 goto out;
1780 }
1781
d397712b 1782 while (1) {
d1310b2e 1783 state = rb_entry(node, struct extent_state, rb_node);
5b21f2ed
ZY
1784 if (found && (state->start != cur_start ||
1785 (state->state & EXTENT_BOUNDARY))) {
d1310b2e
CM
1786 goto out;
1787 }
1788 if (!(state->state & EXTENT_DELALLOC)) {
1789 if (!found)
1790 *end = state->end;
1791 goto out;
1792 }
c2a128d2 1793 if (!found) {
d1310b2e 1794 *start = state->start;
c2a128d2 1795 *cached_state = state;
b7ac31b7 1796 refcount_inc(&state->refs);
c2a128d2 1797 }
3522e903 1798 found = true;
d1310b2e
CM
1799 *end = state->end;
1800 cur_start = state->end + 1;
1801 node = rb_next(node);
d1310b2e 1802 total_bytes += state->end - state->start + 1;
7bf811a5 1803 if (total_bytes >= max_bytes)
573aecaf 1804 break;
573aecaf 1805 if (!node)
d1310b2e
CM
1806 break;
1807 }
1808out:
cad321ad 1809 spin_unlock(&tree->lock);
d1310b2e
CM
1810 return found;
1811}
1812
ed8f13bf
QW
1813/*
1814 * Process one page for __process_pages_contig().
1815 *
1816 * Return >0 if we hit @page == @locked_page.
1817 * Return 0 if we updated the page status.
1818 * Return -EGAIN if the we need to try again.
1819 * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
1820 */
e38992be
QW
1821static int process_one_page(struct btrfs_fs_info *fs_info,
1822 struct address_space *mapping,
ed8f13bf 1823 struct page *page, struct page *locked_page,
e38992be 1824 unsigned long page_ops, u64 start, u64 end)
ed8f13bf 1825{
e38992be
QW
1826 u32 len;
1827
1828 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
1829 len = end + 1 - start;
1830
ed8f13bf 1831 if (page_ops & PAGE_SET_ORDERED)
b945a463 1832 btrfs_page_clamp_set_ordered(fs_info, page, start, len);
ed8f13bf 1833 if (page_ops & PAGE_SET_ERROR)
e38992be 1834 btrfs_page_clamp_set_error(fs_info, page, start, len);
ed8f13bf 1835 if (page_ops & PAGE_START_WRITEBACK) {
e38992be
QW
1836 btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
1837 btrfs_page_clamp_set_writeback(fs_info, page, start, len);
ed8f13bf
QW
1838 }
1839 if (page_ops & PAGE_END_WRITEBACK)
e38992be 1840 btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
a33a8e9a
QW
1841
1842 if (page == locked_page)
1843 return 1;
1844
ed8f13bf 1845 if (page_ops & PAGE_LOCK) {
1e1de387
QW
1846 int ret;
1847
1848 ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
1849 if (ret)
1850 return ret;
ed8f13bf 1851 if (!PageDirty(page) || page->mapping != mapping) {
1e1de387 1852 btrfs_page_end_writer_lock(fs_info, page, start, len);
ed8f13bf
QW
1853 return -EAGAIN;
1854 }
1855 }
1856 if (page_ops & PAGE_UNLOCK)
1e1de387 1857 btrfs_page_end_writer_lock(fs_info, page, start, len);
ed8f13bf
QW
1858 return 0;
1859}
1860
da2c7009
LB
1861static int __process_pages_contig(struct address_space *mapping,
1862 struct page *locked_page,
98af9ab1 1863 u64 start, u64 end, unsigned long page_ops,
ed8f13bf
QW
1864 u64 *processed_end)
1865{
e38992be 1866 struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
ed8f13bf
QW
1867 pgoff_t start_index = start >> PAGE_SHIFT;
1868 pgoff_t end_index = end >> PAGE_SHIFT;
1869 pgoff_t index = start_index;
1870 unsigned long nr_pages = end_index - start_index + 1;
1871 unsigned long pages_processed = 0;
1872 struct page *pages[16];
1873 int err = 0;
1874 int i;
1875
1876 if (page_ops & PAGE_LOCK) {
1877 ASSERT(page_ops == PAGE_LOCK);
1878 ASSERT(processed_end && *processed_end == start);
1879 }
1880
1881 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1882 mapping_set_error(mapping, -EIO);
1883
1884 while (nr_pages > 0) {
1885 int found_pages;
1886
1887 found_pages = find_get_pages_contig(mapping, index,
1888 min_t(unsigned long,
1889 nr_pages, ARRAY_SIZE(pages)), pages);
1890 if (found_pages == 0) {
1891 /*
1892 * Only if we're going to lock these pages, we can find
1893 * nothing at @index.
1894 */
1895 ASSERT(page_ops & PAGE_LOCK);
1896 err = -EAGAIN;
1897 goto out;
1898 }
1899
1900 for (i = 0; i < found_pages; i++) {
1901 int process_ret;
1902
e38992be
QW
1903 process_ret = process_one_page(fs_info, mapping,
1904 pages[i], locked_page, page_ops,
1905 start, end);
ed8f13bf
QW
1906 if (process_ret < 0) {
1907 for (; i < found_pages; i++)
1908 put_page(pages[i]);
1909 err = -EAGAIN;
1910 goto out;
1911 }
1912 put_page(pages[i]);
1913 pages_processed++;
1914 }
1915 nr_pages -= found_pages;
1916 index += found_pages;
1917 cond_resched();
1918 }
1919out:
1920 if (err && processed_end) {
1921 /*
1922 * Update @processed_end. I know this is awful since it has
1923 * two different return value patterns (inclusive vs exclusive).
1924 *
1925 * But the exclusive pattern is necessary if @start is 0, or we
1926 * underflow and check against processed_end won't work as
1927 * expected.
1928 */
1929 if (pages_processed)
1930 *processed_end = min(end,
1931 ((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
1932 else
1933 *processed_end = start;
1934 }
1935 return err;
1936}
da2c7009 1937
143bede5
JM
1938static noinline void __unlock_for_delalloc(struct inode *inode,
1939 struct page *locked_page,
1940 u64 start, u64 end)
c8b97818 1941{
09cbfeaf
KS
1942 unsigned long index = start >> PAGE_SHIFT;
1943 unsigned long end_index = end >> PAGE_SHIFT;
c8b97818 1944
76c0021d 1945 ASSERT(locked_page);
c8b97818 1946 if (index == locked_page->index && end_index == index)
143bede5 1947 return;
c8b97818 1948
98af9ab1 1949 __process_pages_contig(inode->i_mapping, locked_page, start, end,
76c0021d 1950 PAGE_UNLOCK, NULL);
c8b97818
CM
1951}
1952
1953static noinline int lock_delalloc_pages(struct inode *inode,
1954 struct page *locked_page,
1955 u64 delalloc_start,
1956 u64 delalloc_end)
1957{
09cbfeaf 1958 unsigned long index = delalloc_start >> PAGE_SHIFT;
09cbfeaf 1959 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
98af9ab1 1960 u64 processed_end = delalloc_start;
c8b97818 1961 int ret;
c8b97818 1962
76c0021d 1963 ASSERT(locked_page);
c8b97818
CM
1964 if (index == locked_page->index && index == end_index)
1965 return 0;
1966
98af9ab1
QW
1967 ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
1968 delalloc_end, PAGE_LOCK, &processed_end);
1969 if (ret == -EAGAIN && processed_end > delalloc_start)
76c0021d 1970 __unlock_for_delalloc(inode, locked_page, delalloc_start,
98af9ab1 1971 processed_end);
c8b97818
CM
1972 return ret;
1973}
1974
1975/*
3522e903 1976 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
2749f7ef 1977 * more than @max_bytes.
c8b97818 1978 *
2749f7ef
QW
1979 * @start: The original start bytenr to search.
1980 * Will store the extent range start bytenr.
1981 * @end: The original end bytenr of the search range
1982 * Will store the extent range end bytenr.
1983 *
1984 * Return true if we find a delalloc range which starts inside the original
1985 * range, and @start/@end will store the delalloc range start/end.
1986 *
1987 * Return false if we can't find any delalloc range which starts inside the
1988 * original range, and @start/@end will be the non-delalloc range start/end.
c8b97818 1989 */
ce9f967f 1990EXPORT_FOR_TESTS
3522e903 1991noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
294e30fe 1992 struct page *locked_page, u64 *start,
917aacec 1993 u64 *end)
c8b97818 1994{
9978059b 1995 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2749f7ef
QW
1996 const u64 orig_start = *start;
1997 const u64 orig_end = *end;
917aacec 1998 u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
c8b97818
CM
1999 u64 delalloc_start;
2000 u64 delalloc_end;
3522e903 2001 bool found;
9655d298 2002 struct extent_state *cached_state = NULL;
c8b97818
CM
2003 int ret;
2004 int loops = 0;
2005
2749f7ef
QW
2006 /* Caller should pass a valid @end to indicate the search range end */
2007 ASSERT(orig_end > orig_start);
2008
2009 /* The range should at least cover part of the page */
2010 ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
2011 orig_end <= page_offset(locked_page)));
c8b97818
CM
2012again:
2013 /* step one, find a bunch of delalloc bytes starting at start */
2014 delalloc_start = *start;
2015 delalloc_end = 0;
083e75e7
JB
2016 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
2017 max_bytes, &cached_state);
2749f7ef 2018 if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
c8b97818 2019 *start = delalloc_start;
2749f7ef
QW
2020
2021 /* @delalloc_end can be -1, never go beyond @orig_end */
2022 *end = min(delalloc_end, orig_end);
c2a128d2 2023 free_extent_state(cached_state);
3522e903 2024 return false;
c8b97818
CM
2025 }
2026
70b99e69
CM
2027 /*
2028 * start comes from the offset of locked_page. We have to lock
2029 * pages in order, so we can't process delalloc bytes before
2030 * locked_page
2031 */
d397712b 2032 if (delalloc_start < *start)
70b99e69 2033 delalloc_start = *start;
70b99e69 2034
c8b97818
CM
2035 /*
2036 * make sure to limit the number of pages we try to lock down
c8b97818 2037 */
7bf811a5
JB
2038 if (delalloc_end + 1 - delalloc_start > max_bytes)
2039 delalloc_end = delalloc_start + max_bytes - 1;
d397712b 2040
c8b97818
CM
2041 /* step two, lock all the pages after the page that has start */
2042 ret = lock_delalloc_pages(inode, locked_page,
2043 delalloc_start, delalloc_end);
9bfd61d9 2044 ASSERT(!ret || ret == -EAGAIN);
c8b97818
CM
2045 if (ret == -EAGAIN) {
2046 /* some of the pages are gone, lets avoid looping by
2047 * shortening the size of the delalloc range we're searching
2048 */
9655d298 2049 free_extent_state(cached_state);
7d788742 2050 cached_state = NULL;
c8b97818 2051 if (!loops) {
09cbfeaf 2052 max_bytes = PAGE_SIZE;
c8b97818
CM
2053 loops = 1;
2054 goto again;
2055 } else {
3522e903 2056 found = false;
c8b97818
CM
2057 goto out_failed;
2058 }
2059 }
c8b97818
CM
2060
2061 /* step three, lock the state bits for the whole range */
ff13db41 2062 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
c8b97818
CM
2063
2064 /* then test to make sure it is all still delalloc */
2065 ret = test_range_bit(tree, delalloc_start, delalloc_end,
9655d298 2066 EXTENT_DELALLOC, 1, cached_state);
c8b97818 2067 if (!ret) {
9655d298 2068 unlock_extent_cached(tree, delalloc_start, delalloc_end,
e43bbe5e 2069 &cached_state);
c8b97818
CM
2070 __unlock_for_delalloc(inode, locked_page,
2071 delalloc_start, delalloc_end);
2072 cond_resched();
2073 goto again;
2074 }
9655d298 2075 free_extent_state(cached_state);
c8b97818
CM
2076 *start = delalloc_start;
2077 *end = delalloc_end;
2078out_failed:
2079 return found;
2080}
2081
ad7ff17b 2082void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
74e9194a 2083 struct page *locked_page,
f97e27e9 2084 u32 clear_bits, unsigned long page_ops)
873695b3 2085{
ad7ff17b 2086 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
873695b3 2087
ad7ff17b 2088 __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
98af9ab1 2089 start, end, page_ops, NULL);
873695b3
LB
2090}
2091
d352ac68
CM
2092/*
2093 * count the number of bytes in the tree that have a given bit(s)
2094 * set. This can be fairly slow, except for EXTENT_DIRTY which is
2095 * cached. The total number found is returned.
2096 */
d1310b2e
CM
2097u64 count_range_bits(struct extent_io_tree *tree,
2098 u64 *start, u64 search_end, u64 max_bytes,
f97e27e9 2099 u32 bits, int contig)
d1310b2e
CM
2100{
2101 struct rb_node *node;
2102 struct extent_state *state;
2103 u64 cur_start = *start;
2104 u64 total_bytes = 0;
ec29ed5b 2105 u64 last = 0;
d1310b2e
CM
2106 int found = 0;
2107
fae7f21c 2108 if (WARN_ON(search_end <= cur_start))
d1310b2e 2109 return 0;
d1310b2e 2110
cad321ad 2111 spin_lock(&tree->lock);
d1310b2e
CM
2112 if (cur_start == 0 && bits == EXTENT_DIRTY) {
2113 total_bytes = tree->dirty_bytes;
2114 goto out;
2115 }
2116 /*
2117 * this search will find all the extents that end after
2118 * our range starts.
2119 */
80ea96b1 2120 node = tree_search(tree, cur_start);
d397712b 2121 if (!node)
d1310b2e 2122 goto out;
d1310b2e 2123
d397712b 2124 while (1) {
d1310b2e
CM
2125 state = rb_entry(node, struct extent_state, rb_node);
2126 if (state->start > search_end)
2127 break;
ec29ed5b
CM
2128 if (contig && found && state->start > last + 1)
2129 break;
2130 if (state->end >= cur_start && (state->state & bits) == bits) {
d1310b2e
CM
2131 total_bytes += min(search_end, state->end) + 1 -
2132 max(cur_start, state->start);
2133 if (total_bytes >= max_bytes)
2134 break;
2135 if (!found) {
af60bed2 2136 *start = max(cur_start, state->start);
d1310b2e
CM
2137 found = 1;
2138 }
ec29ed5b
CM
2139 last = state->end;
2140 } else if (contig && found) {
2141 break;
d1310b2e
CM
2142 }
2143 node = rb_next(node);
2144 if (!node)
2145 break;
2146 }
2147out:
cad321ad 2148 spin_unlock(&tree->lock);
d1310b2e
CM
2149 return total_bytes;
2150}
b2950863 2151
d352ac68
CM
2152/*
2153 * set the private field for a given byte offset in the tree. If there isn't
2154 * an extent_state there already, this does nothing.
2155 */
b3f167aa
JB
2156int set_state_failrec(struct extent_io_tree *tree, u64 start,
2157 struct io_failure_record *failrec)
d1310b2e
CM
2158{
2159 struct rb_node *node;
2160 struct extent_state *state;
2161 int ret = 0;
2162
cad321ad 2163 spin_lock(&tree->lock);
d1310b2e
CM
2164 /*
2165 * this search will find all the extents that end after
2166 * our range starts.
2167 */
80ea96b1 2168 node = tree_search(tree, start);
2b114d1d 2169 if (!node) {
d1310b2e
CM
2170 ret = -ENOENT;
2171 goto out;
2172 }
2173 state = rb_entry(node, struct extent_state, rb_node);
2174 if (state->start != start) {
2175 ret = -ENOENT;
2176 goto out;
2177 }
47dc196a 2178 state->failrec = failrec;
d1310b2e 2179out:
cad321ad 2180 spin_unlock(&tree->lock);
d1310b2e
CM
2181 return ret;
2182}
2183
2279a270 2184struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
d1310b2e
CM
2185{
2186 struct rb_node *node;
2187 struct extent_state *state;
2279a270 2188 struct io_failure_record *failrec;
d1310b2e 2189
cad321ad 2190 spin_lock(&tree->lock);
d1310b2e
CM
2191 /*
2192 * this search will find all the extents that end after
2193 * our range starts.
2194 */
80ea96b1 2195 node = tree_search(tree, start);
2b114d1d 2196 if (!node) {
2279a270 2197 failrec = ERR_PTR(-ENOENT);
d1310b2e
CM
2198 goto out;
2199 }
2200 state = rb_entry(node, struct extent_state, rb_node);
2201 if (state->start != start) {
2279a270 2202 failrec = ERR_PTR(-ENOENT);
d1310b2e
CM
2203 goto out;
2204 }
2279a270
NB
2205
2206 failrec = state->failrec;
d1310b2e 2207out:
cad321ad 2208 spin_unlock(&tree->lock);
2279a270 2209 return failrec;
d1310b2e
CM
2210}
2211
2212/*
2213 * searches a range in the state tree for a given mask.
70dec807 2214 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
2215 * has the bits set. Otherwise, 1 is returned if any bit in the
2216 * range is found set.
2217 */
2218int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
f97e27e9 2219 u32 bits, int filled, struct extent_state *cached)
d1310b2e
CM
2220{
2221 struct extent_state *state = NULL;
2222 struct rb_node *node;
2223 int bitset = 0;
d1310b2e 2224
cad321ad 2225 spin_lock(&tree->lock);
27a3507d 2226 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
df98b6e2 2227 cached->end > start)
9655d298
CM
2228 node = &cached->rb_node;
2229 else
2230 node = tree_search(tree, start);
d1310b2e
CM
2231 while (node && start <= end) {
2232 state = rb_entry(node, struct extent_state, rb_node);
2233
2234 if (filled && state->start > start) {
2235 bitset = 0;
2236 break;
2237 }
2238
2239 if (state->start > end)
2240 break;
2241
2242 if (state->state & bits) {
2243 bitset = 1;
2244 if (!filled)
2245 break;
2246 } else if (filled) {
2247 bitset = 0;
2248 break;
2249 }
46562cec
CM
2250
2251 if (state->end == (u64)-1)
2252 break;
2253
d1310b2e
CM
2254 start = state->end + 1;
2255 if (start > end)
2256 break;
2257 node = rb_next(node);
2258 if (!node) {
2259 if (filled)
2260 bitset = 0;
2261 break;
2262 }
2263 }
cad321ad 2264 spin_unlock(&tree->lock);
d1310b2e
CM
2265 return bitset;
2266}
d1310b2e 2267
7870d082
JB
2268int free_io_failure(struct extent_io_tree *failure_tree,
2269 struct extent_io_tree *io_tree,
2270 struct io_failure_record *rec)
4a54c8c1
JS
2271{
2272 int ret;
2273 int err = 0;
4a54c8c1 2274
47dc196a 2275 set_state_failrec(failure_tree, rec->start, NULL);
4a54c8c1
JS
2276 ret = clear_extent_bits(failure_tree, rec->start,
2277 rec->start + rec->len - 1,
91166212 2278 EXTENT_LOCKED | EXTENT_DIRTY);
4a54c8c1
JS
2279 if (ret)
2280 err = ret;
2281
7870d082 2282 ret = clear_extent_bits(io_tree, rec->start,
53b381b3 2283 rec->start + rec->len - 1,
91166212 2284 EXTENT_DAMAGED);
53b381b3
DW
2285 if (ret && !err)
2286 err = ret;
4a54c8c1
JS
2287
2288 kfree(rec);
2289 return err;
2290}
2291
4a54c8c1
JS
2292/*
2293 * this bypasses the standard btrfs submit functions deliberately, as
2294 * the standard behavior is to write all copies in a raid setup. here we only
2295 * want to write the one bad copy. so we do the mapping for ourselves and issue
2296 * submit_bio directly.
3ec706c8 2297 * to avoid any synchronization issues, wait for the data after writing, which
4a54c8c1
JS
2298 * actually prevents the read that triggered the error from finishing.
2299 * currently, there can be no more than two copies of every data bit. thus,
2300 * exactly one rewrite is required.
2301 */
38d5e541
QW
2302static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2303 u64 length, u64 logical, struct page *page,
2304 unsigned int pg_offset, int mirror_num)
4a54c8c1
JS
2305{
2306 struct bio *bio;
2307 struct btrfs_device *dev;
4a54c8c1
JS
2308 u64 map_length = 0;
2309 u64 sector;
4c664611 2310 struct btrfs_io_context *bioc = NULL;
4a54c8c1
JS
2311 int ret;
2312
1751e8a6 2313 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
4a54c8c1
JS
2314 BUG_ON(!mirror_num);
2315
554aed7d
JT
2316 if (btrfs_repair_one_zone(fs_info, logical))
2317 return 0;
f7ef5287 2318
c3a3b19b 2319 bio = btrfs_bio_alloc(1);
4f024f37 2320 bio->bi_iter.bi_size = 0;
4a54c8c1
JS
2321 map_length = length;
2322
b5de8d0d 2323 /*
4c664611 2324 * Avoid races with device replace and make sure our bioc has devices
b5de8d0d
FM
2325 * associated to its stripes that don't go away while we are doing the
2326 * read repair operation.
2327 */
2328 btrfs_bio_counter_inc_blocked(fs_info);
e4ff5fb5 2329 if (btrfs_is_parity_mirror(fs_info, logical, length)) {
c725328c
LB
2330 /*
2331 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2332 * to update all raid stripes, but here we just want to correct
2333 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2334 * stripe's dev and sector.
2335 */
2336 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
4c664611 2337 &map_length, &bioc, 0);
c725328c
LB
2338 if (ret) {
2339 btrfs_bio_counter_dec(fs_info);
2340 bio_put(bio);
2341 return -EIO;
2342 }
4c664611 2343 ASSERT(bioc->mirror_num == 1);
c725328c
LB
2344 } else {
2345 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
4c664611 2346 &map_length, &bioc, mirror_num);
c725328c
LB
2347 if (ret) {
2348 btrfs_bio_counter_dec(fs_info);
2349 bio_put(bio);
2350 return -EIO;
2351 }
4c664611 2352 BUG_ON(mirror_num != bioc->mirror_num);
4a54c8c1 2353 }
c725328c 2354
4c664611 2355 sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
4f024f37 2356 bio->bi_iter.bi_sector = sector;
4c664611
QW
2357 dev = bioc->stripes[bioc->mirror_num - 1].dev;
2358 btrfs_put_bioc(bioc);
ebbede42
AJ
2359 if (!dev || !dev->bdev ||
2360 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
b5de8d0d 2361 btrfs_bio_counter_dec(fs_info);
4a54c8c1
JS
2362 bio_put(bio);
2363 return -EIO;
2364 }
74d46992 2365 bio_set_dev(bio, dev->bdev);
70fd7614 2366 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
ffdd2018 2367 bio_add_page(bio, page, length, pg_offset);
4a54c8c1 2368
4e49ea4a 2369 if (btrfsic_submit_bio_wait(bio)) {
4a54c8c1 2370 /* try to remap that extent elsewhere? */
b5de8d0d 2371 btrfs_bio_counter_dec(fs_info);
4a54c8c1 2372 bio_put(bio);
442a4f63 2373 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4a54c8c1
JS
2374 return -EIO;
2375 }
2376
b14af3b4
DS
2377 btrfs_info_rl_in_rcu(fs_info,
2378 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
6ec656bc 2379 ino, start,
1203b681 2380 rcu_str_deref(dev->name), sector);
b5de8d0d 2381 btrfs_bio_counter_dec(fs_info);
4a54c8c1
JS
2382 bio_put(bio);
2383 return 0;
2384}
2385
2b48966a 2386int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
ea466794 2387{
20a1fbf9 2388 struct btrfs_fs_info *fs_info = eb->fs_info;
ea466794 2389 u64 start = eb->start;
cc5e31a4 2390 int i, num_pages = num_extent_pages(eb);
d95603b2 2391 int ret = 0;
ea466794 2392
bc98a42c 2393 if (sb_rdonly(fs_info->sb))
908960c6
ID
2394 return -EROFS;
2395
ea466794 2396 for (i = 0; i < num_pages; i++) {
fb85fc9a 2397 struct page *p = eb->pages[i];
1203b681 2398
6ec656bc 2399 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
1203b681 2400 start - page_offset(p), mirror_num);
ea466794
JB
2401 if (ret)
2402 break;
09cbfeaf 2403 start += PAGE_SIZE;
ea466794
JB
2404 }
2405
2406 return ret;
2407}
2408
4a54c8c1
JS
2409/*
2410 * each time an IO finishes, we do a fast check in the IO failure tree
2411 * to see if we need to process or clean up an io_failure_record
2412 */
7870d082
JB
2413int clean_io_failure(struct btrfs_fs_info *fs_info,
2414 struct extent_io_tree *failure_tree,
2415 struct extent_io_tree *io_tree, u64 start,
2416 struct page *page, u64 ino, unsigned int pg_offset)
4a54c8c1
JS
2417{
2418 u64 private;
4a54c8c1 2419 struct io_failure_record *failrec;
4a54c8c1
JS
2420 struct extent_state *state;
2421 int num_copies;
4a54c8c1 2422 int ret;
4a54c8c1
JS
2423
2424 private = 0;
7870d082
JB
2425 ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2426 EXTENT_DIRTY, 0);
4a54c8c1
JS
2427 if (!ret)
2428 return 0;
2429
2279a270
NB
2430 failrec = get_state_failrec(failure_tree, start);
2431 if (IS_ERR(failrec))
4a54c8c1
JS
2432 return 0;
2433
4a54c8c1
JS
2434 BUG_ON(!failrec->this_mirror);
2435
bc98a42c 2436 if (sb_rdonly(fs_info->sb))
908960c6 2437 goto out;
4a54c8c1 2438
7870d082
JB
2439 spin_lock(&io_tree->lock);
2440 state = find_first_extent_bit_state(io_tree,
4a54c8c1
JS
2441 failrec->start,
2442 EXTENT_LOCKED);
7870d082 2443 spin_unlock(&io_tree->lock);
4a54c8c1 2444
883d0de4
MX
2445 if (state && state->start <= failrec->start &&
2446 state->end >= failrec->start + failrec->len - 1) {
3ec706c8
SB
2447 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2448 failrec->len);
4a54c8c1 2449 if (num_copies > 1) {
7870d082
JB
2450 repair_io_failure(fs_info, ino, start, failrec->len,
2451 failrec->logical, page, pg_offset,
2452 failrec->failed_mirror);
4a54c8c1
JS
2453 }
2454 }
2455
2456out:
7870d082 2457 free_io_failure(failure_tree, io_tree, failrec);
4a54c8c1 2458
454ff3de 2459 return 0;
4a54c8c1
JS
2460}
2461
f612496b
MX
2462/*
2463 * Can be called when
2464 * - hold extent lock
2465 * - under ordered extent
2466 * - the inode is freeing
2467 */
7ab7956e 2468void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
f612496b 2469{
7ab7956e 2470 struct extent_io_tree *failure_tree = &inode->io_failure_tree;
f612496b
MX
2471 struct io_failure_record *failrec;
2472 struct extent_state *state, *next;
2473
2474 if (RB_EMPTY_ROOT(&failure_tree->state))
2475 return;
2476
2477 spin_lock(&failure_tree->lock);
2478 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2479 while (state) {
2480 if (state->start > end)
2481 break;
2482
2483 ASSERT(state->end <= end);
2484
2485 next = next_state(state);
2486
47dc196a 2487 failrec = state->failrec;
f612496b
MX
2488 free_extent_state(state);
2489 kfree(failrec);
2490
2491 state = next;
2492 }
2493 spin_unlock(&failure_tree->lock);
2494}
2495
3526302f 2496static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
150e4b05 2497 u64 start)
4a54c8c1 2498{
ab8d0fc4 2499 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2fe6303e 2500 struct io_failure_record *failrec;
4a54c8c1 2501 struct extent_map *em;
4a54c8c1
JS
2502 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2503 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2504 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
150e4b05 2505 const u32 sectorsize = fs_info->sectorsize;
4a54c8c1 2506 int ret;
4a54c8c1
JS
2507 u64 logical;
2508
2279a270 2509 failrec = get_state_failrec(failure_tree, start);
3526302f 2510 if (!IS_ERR(failrec)) {
ab8d0fc4 2511 btrfs_debug(fs_info,
1245835d
QW
2512 "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
2513 failrec->logical, failrec->start, failrec->len);
4a54c8c1
JS
2514 /*
2515 * when data can be on disk more than twice, add to failrec here
2516 * (e.g. with a list for failed_mirror) to make
2517 * clean_io_failure() clean all those errors at once.
2518 */
3526302f
NB
2519
2520 return failrec;
4a54c8c1 2521 }
2fe6303e 2522
3526302f
NB
2523 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2524 if (!failrec)
2525 return ERR_PTR(-ENOMEM);
2fe6303e 2526
3526302f 2527 failrec->start = start;
150e4b05 2528 failrec->len = sectorsize;
3526302f
NB
2529 failrec->this_mirror = 0;
2530 failrec->bio_flags = 0;
3526302f
NB
2531
2532 read_lock(&em_tree->lock);
2533 em = lookup_extent_mapping(em_tree, start, failrec->len);
2534 if (!em) {
2535 read_unlock(&em_tree->lock);
2536 kfree(failrec);
2537 return ERR_PTR(-EIO);
2538 }
2539
2540 if (em->start > start || em->start + em->len <= start) {
2541 free_extent_map(em);
2542 em = NULL;
2543 }
2544 read_unlock(&em_tree->lock);
2545 if (!em) {
2546 kfree(failrec);
2547 return ERR_PTR(-EIO);
2548 }
2549
2550 logical = start - em->start;
2551 logical = em->block_start + logical;
2552 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2553 logical = em->block_start;
2554 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2555 extent_set_compress_type(&failrec->bio_flags, em->compress_type);
2556 }
2557
2558 btrfs_debug(fs_info,
2559 "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2560 logical, start, failrec->len);
2561
2562 failrec->logical = logical;
2563 free_extent_map(em);
2564
2565 /* Set the bits in the private failure tree */
150e4b05 2566 ret = set_extent_bits(failure_tree, start, start + sectorsize - 1,
3526302f
NB
2567 EXTENT_LOCKED | EXTENT_DIRTY);
2568 if (ret >= 0) {
2569 ret = set_state_failrec(failure_tree, start, failrec);
2570 /* Set the bits in the inode's tree */
150e4b05
QW
2571 ret = set_extent_bits(tree, start, start + sectorsize - 1,
2572 EXTENT_DAMAGED);
3526302f
NB
2573 } else if (ret < 0) {
2574 kfree(failrec);
2575 return ERR_PTR(ret);
2576 }
2577
2578 return failrec;
2fe6303e
MX
2579}
2580
1245835d 2581static bool btrfs_check_repairable(struct inode *inode,
ce06d3ec
OS
2582 struct io_failure_record *failrec,
2583 int failed_mirror)
2fe6303e 2584{
ab8d0fc4 2585 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2fe6303e
MX
2586 int num_copies;
2587
ab8d0fc4 2588 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
4a54c8c1
JS
2589 if (num_copies == 1) {
2590 /*
2591 * we only have a single copy of the data, so don't bother with
2592 * all the retry and error correction code that follows. no
2593 * matter what the error is, it is very likely to persist.
2594 */
ab8d0fc4
JM
2595 btrfs_debug(fs_info,
2596 "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2597 num_copies, failrec->this_mirror, failed_mirror);
c3cfb656 2598 return false;
4a54c8c1
JS
2599 }
2600
1245835d
QW
2601 /* The failure record should only contain one sector */
2602 ASSERT(failrec->len == fs_info->sectorsize);
2603
4a54c8c1 2604 /*
1245835d
QW
2605 * There are two premises:
2606 * a) deliver good data to the caller
2607 * b) correct the bad sectors on disk
2608 *
2609 * Since we're only doing repair for one sector, we only need to get
2610 * a good copy of the failed sector and if we succeed, we have setup
2611 * everything for repair_io_failure to do the rest for us.
4a54c8c1 2612 */
510671d2 2613 ASSERT(failed_mirror);
1245835d
QW
2614 failrec->failed_mirror = failed_mirror;
2615 failrec->this_mirror++;
2616 if (failrec->this_mirror == failed_mirror)
4a54c8c1 2617 failrec->this_mirror++;
4a54c8c1 2618
facc8a22 2619 if (failrec->this_mirror > num_copies) {
ab8d0fc4
JM
2620 btrfs_debug(fs_info,
2621 "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2622 num_copies, failrec->this_mirror, failed_mirror);
c3cfb656 2623 return false;
4a54c8c1
JS
2624 }
2625
c3cfb656 2626 return true;
2fe6303e
MX
2627}
2628
150e4b05
QW
2629int btrfs_repair_one_sector(struct inode *inode,
2630 struct bio *failed_bio, u32 bio_offset,
2631 struct page *page, unsigned int pgoff,
2632 u64 start, int failed_mirror,
2633 submit_bio_hook_t *submit_bio_hook)
2fe6303e
MX
2634{
2635 struct io_failure_record *failrec;
77d5d689 2636 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2fe6303e 2637 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
7870d082 2638 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
c3a3b19b 2639 struct btrfs_bio *failed_bbio = btrfs_bio(failed_bio);
7ffd27e3 2640 const int icsum = bio_offset >> fs_info->sectorsize_bits;
77d5d689 2641 struct bio *repair_bio;
c3a3b19b 2642 struct btrfs_bio *repair_bbio;
2fe6303e 2643
77d5d689
OS
2644 btrfs_debug(fs_info,
2645 "repair read error: read error at %llu", start);
2fe6303e 2646
1f7ad75b 2647 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2fe6303e 2648
150e4b05 2649 failrec = btrfs_get_io_failure_record(inode, start);
3526302f 2650 if (IS_ERR(failrec))
150e4b05 2651 return PTR_ERR(failrec);
2fe6303e 2652
1245835d
QW
2653
2654 if (!btrfs_check_repairable(inode, failrec, failed_mirror)) {
7870d082 2655 free_io_failure(failure_tree, tree, failrec);
150e4b05 2656 return -EIO;
2fe6303e
MX
2657 }
2658
c3a3b19b
QW
2659 repair_bio = btrfs_bio_alloc(1);
2660 repair_bbio = btrfs_bio(repair_bio);
00d82525 2661 repair_bbio->file_offset = start;
77d5d689 2662 repair_bio->bi_opf = REQ_OP_READ;
77d5d689
OS
2663 repair_bio->bi_end_io = failed_bio->bi_end_io;
2664 repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
2665 repair_bio->bi_private = failed_bio->bi_private;
2fe6303e 2666
c3a3b19b 2667 if (failed_bbio->csum) {
223486c2 2668 const u32 csum_size = fs_info->csum_size;
77d5d689 2669
c3a3b19b
QW
2670 repair_bbio->csum = repair_bbio->csum_inline;
2671 memcpy(repair_bbio->csum,
2672 failed_bbio->csum + csum_size * icsum, csum_size);
77d5d689 2673 }
2fe6303e 2674
77d5d689 2675 bio_add_page(repair_bio, page, failrec->len, pgoff);
c3a3b19b 2676 repair_bbio->iter = repair_bio->bi_iter;
4a54c8c1 2677
ab8d0fc4 2678 btrfs_debug(btrfs_sb(inode->i_sb),
1245835d
QW
2679 "repair read error: submitting new read to mirror %d",
2680 failrec->this_mirror);
4a54c8c1 2681
8cbc3001
JB
2682 /*
2683 * At this point we have a bio, so any errors from submit_bio_hook()
2684 * will be handled by the endio on the repair_bio, so we can't return an
2685 * error here.
2686 */
2687 submit_bio_hook(inode, repair_bio, failrec->this_mirror, failrec->bio_flags);
2688 return BLK_STS_OK;
150e4b05
QW
2689}
2690
2691static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
2692{
2693 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
2694
2695 ASSERT(page_offset(page) <= start &&
2696 start + len <= page_offset(page) + PAGE_SIZE);
2697
150e4b05 2698 if (uptodate) {
14605409
BB
2699 if (fsverity_active(page->mapping->host) &&
2700 !PageError(page) &&
2701 !PageUptodate(page) &&
2702 start < i_size_read(page->mapping->host) &&
2703 !fsverity_verify_page(page)) {
2704 btrfs_page_set_error(fs_info, page, start, len);
2705 } else {
2706 btrfs_page_set_uptodate(fs_info, page, start, len);
2707 }
150e4b05
QW
2708 } else {
2709 btrfs_page_clear_uptodate(fs_info, page, start, len);
2710 btrfs_page_set_error(fs_info, page, start, len);
2711 }
2712
2713 if (fs_info->sectorsize == PAGE_SIZE)
2714 unlock_page(page);
3d078efa 2715 else
150e4b05
QW
2716 btrfs_subpage_end_reader(fs_info, page, start, len);
2717}
2718
2719static blk_status_t submit_read_repair(struct inode *inode,
2720 struct bio *failed_bio, u32 bio_offset,
2721 struct page *page, unsigned int pgoff,
2722 u64 start, u64 end, int failed_mirror,
2723 unsigned int error_bitmap,
2724 submit_bio_hook_t *submit_bio_hook)
2725{
2726 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2727 const u32 sectorsize = fs_info->sectorsize;
2728 const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
2729 int error = 0;
2730 int i;
2731
2732 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2733
2734 /* We're here because we had some read errors or csum mismatch */
2735 ASSERT(error_bitmap);
2736
2737 /*
2738 * We only get called on buffered IO, thus page must be mapped and bio
2739 * must not be cloned.
2740 */
2741 ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED));
2742
2743 /* Iterate through all the sectors in the range */
2744 for (i = 0; i < nr_bits; i++) {
2745 const unsigned int offset = i * sectorsize;
2746 struct extent_state *cached = NULL;
2747 bool uptodate = false;
2748 int ret;
2749
2750 if (!(error_bitmap & (1U << i))) {
2751 /*
2752 * This sector has no error, just end the page read
2753 * and unlock the range.
2754 */
2755 uptodate = true;
2756 goto next;
2757 }
2758
2759 ret = btrfs_repair_one_sector(inode, failed_bio,
2760 bio_offset + offset,
2761 page, pgoff + offset, start + offset,
2762 failed_mirror, submit_bio_hook);
2763 if (!ret) {
2764 /*
2765 * We have submitted the read repair, the page release
2766 * will be handled by the endio function of the
2767 * submitted repair bio.
2768 * Thus we don't need to do any thing here.
2769 */
2770 continue;
2771 }
2772 /*
2773 * Repair failed, just record the error but still continue.
2774 * Or the remaining sectors will not be properly unlocked.
2775 */
2776 if (!error)
2777 error = ret;
2778next:
2779 end_page_read(page, uptodate, start + offset, sectorsize);
2780 if (uptodate)
2781 set_extent_uptodate(&BTRFS_I(inode)->io_tree,
2782 start + offset,
2783 start + offset + sectorsize - 1,
2784 &cached, GFP_ATOMIC);
2785 unlock_extent_cached_atomic(&BTRFS_I(inode)->io_tree,
2786 start + offset,
2787 start + offset + sectorsize - 1,
2788 &cached);
2789 }
2790 return errno_to_blk_status(error);
4a54c8c1
JS
2791}
2792
d1310b2e
CM
2793/* lots and lots of room for performance fixes in the end_bio funcs */
2794
b5227c07 2795void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
87826df0 2796{
38a39ac7 2797 struct btrfs_inode *inode;
25c1252a 2798 const bool uptodate = (err == 0);
3e2426bd 2799 int ret = 0;
87826df0 2800
38a39ac7
QW
2801 ASSERT(page && page->mapping);
2802 inode = BTRFS_I(page->mapping->host);
2803 btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
87826df0 2804
87826df0 2805 if (!uptodate) {
963e4db8
QW
2806 const struct btrfs_fs_info *fs_info = inode->root->fs_info;
2807 u32 len;
2808
2809 ASSERT(end + 1 - start <= U32_MAX);
2810 len = end + 1 - start;
2811
2812 btrfs_page_clear_uptodate(fs_info, page, start, len);
2813 btrfs_page_set_error(fs_info, page, start, len);
bff5baf8 2814 ret = err < 0 ? err : -EIO;
5dca6eea 2815 mapping_set_error(page->mapping, ret);
87826df0 2816 }
87826df0
JM
2817}
2818
d1310b2e
CM
2819/*
2820 * after a writepage IO is done, we need to:
2821 * clear the uptodate bits on error
2822 * clear the writeback bits in the extent tree for this IO
2823 * end_page_writeback if the page has no more pending IO
2824 *
2825 * Scheduling is not allowed, so the extent state tree is expected
2826 * to have one and only one object corresponding to this IO.
2827 */
4246a0b6 2828static void end_bio_extent_writepage(struct bio *bio)
d1310b2e 2829{
4e4cbee9 2830 int error = blk_status_to_errno(bio->bi_status);
2c30c71b 2831 struct bio_vec *bvec;
d1310b2e
CM
2832 u64 start;
2833 u64 end;
6dc4f100 2834 struct bvec_iter_all iter_all;
d8e3fb10 2835 bool first_bvec = true;
d1310b2e 2836
c09abff8 2837 ASSERT(!bio_flagged(bio, BIO_CLONED));
2b070cfe 2838 bio_for_each_segment_all(bvec, bio, iter_all) {
d1310b2e 2839 struct page *page = bvec->bv_page;
0b246afa
JM
2840 struct inode *inode = page->mapping->host;
2841 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
321a02db
QW
2842 const u32 sectorsize = fs_info->sectorsize;
2843
2844 /* Our read/write should always be sector aligned. */
2845 if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
2846 btrfs_err(fs_info,
2847 "partial page write in btrfs with offset %u and length %u",
2848 bvec->bv_offset, bvec->bv_len);
2849 else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
2850 btrfs_info(fs_info,
2851 "incomplete page write with offset %u and length %u",
2852 bvec->bv_offset, bvec->bv_len);
2853
2854 start = page_offset(page) + bvec->bv_offset;
2855 end = start + bvec->bv_len - 1;
d1310b2e 2856
d8e3fb10
NA
2857 if (first_bvec) {
2858 btrfs_record_physical_zoned(inode, start, bio);
2859 first_bvec = false;
2860 }
2861
4e4cbee9 2862 end_extent_writepage(page, error, start, end);
9047e317
QW
2863
2864 btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
2c30c71b 2865 }
2b1f55b0 2866
d1310b2e 2867 bio_put(bio);
d1310b2e
CM
2868}
2869
94e8c95c
QW
2870/*
2871 * Record previously processed extent range
2872 *
2873 * For endio_readpage_release_extent() to handle a full extent range, reducing
2874 * the extent io operations.
2875 */
2876struct processed_extent {
2877 struct btrfs_inode *inode;
2878 /* Start of the range in @inode */
2879 u64 start;
2e626e56 2880 /* End of the range in @inode */
94e8c95c
QW
2881 u64 end;
2882 bool uptodate;
2883};
2884
2885/*
2886 * Try to release processed extent range
2887 *
2888 * May not release the extent range right now if the current range is
2889 * contiguous to processed extent.
2890 *
2891 * Will release processed extent when any of @inode, @uptodate, the range is
2892 * no longer contiguous to the processed range.
2893 *
2894 * Passing @inode == NULL will force processed extent to be released.
2895 */
2896static void endio_readpage_release_extent(struct processed_extent *processed,
2897 struct btrfs_inode *inode, u64 start, u64 end,
2898 bool uptodate)
883d0de4
MX
2899{
2900 struct extent_state *cached = NULL;
94e8c95c
QW
2901 struct extent_io_tree *tree;
2902
2903 /* The first extent, initialize @processed */
2904 if (!processed->inode)
2905 goto update;
883d0de4 2906
94e8c95c
QW
2907 /*
2908 * Contiguous to processed extent, just uptodate the end.
2909 *
2910 * Several things to notice:
2911 *
2912 * - bio can be merged as long as on-disk bytenr is contiguous
2913 * This means we can have page belonging to other inodes, thus need to
2914 * check if the inode still matches.
2915 * - bvec can contain range beyond current page for multi-page bvec
2916 * Thus we need to do processed->end + 1 >= start check
2917 */
2918 if (processed->inode == inode && processed->uptodate == uptodate &&
2919 processed->end + 1 >= start && end >= processed->end) {
2920 processed->end = end;
2921 return;
2922 }
2923
2924 tree = &processed->inode->io_tree;
2925 /*
2926 * Now we don't have range contiguous to the processed range, release
2927 * the processed range now.
2928 */
2929 if (processed->uptodate && tree->track_uptodate)
2930 set_extent_uptodate(tree, processed->start, processed->end,
2931 &cached, GFP_ATOMIC);
2932 unlock_extent_cached_atomic(tree, processed->start, processed->end,
2933 &cached);
2934
2935update:
2936 /* Update processed to current range */
2937 processed->inode = inode;
2938 processed->start = start;
2939 processed->end = end;
2940 processed->uptodate = uptodate;
883d0de4
MX
2941}
2942
92082d40
QW
2943static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
2944{
2945 ASSERT(PageLocked(page));
2946 if (fs_info->sectorsize == PAGE_SIZE)
2947 return;
2948
2949 ASSERT(PagePrivate(page));
2950 btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
2951}
2952
d9bb77d5
QW
2953/*
2954 * Find extent buffer for a givne bytenr.
2955 *
2956 * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
2957 * in endio context.
2958 */
2959static struct extent_buffer *find_extent_buffer_readpage(
2960 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
2961{
2962 struct extent_buffer *eb;
2963
2964 /*
2965 * For regular sectorsize, we can use page->private to grab extent
2966 * buffer
2967 */
2968 if (fs_info->sectorsize == PAGE_SIZE) {
2969 ASSERT(PagePrivate(page) && page->private);
2970 return (struct extent_buffer *)page->private;
2971 }
2972
2973 /* For subpage case, we need to lookup buffer radix tree */
2974 rcu_read_lock();
2975 eb = radix_tree_lookup(&fs_info->buffer_radix,
2976 bytenr >> fs_info->sectorsize_bits);
2977 rcu_read_unlock();
2978 ASSERT(eb);
2979 return eb;
2980}
2981
d1310b2e
CM
2982/*
2983 * after a readpage IO is done, we need to:
2984 * clear the uptodate bits on error
2985 * set the uptodate bits if things worked
2986 * set the page up to date if all extents in the tree are uptodate
2987 * clear the lock bit in the extent tree
2988 * unlock the page if there are no other extents locked for it
2989 *
2990 * Scheduling is not allowed, so the extent state tree is expected
2991 * to have one and only one object corresponding to this IO.
2992 */
4246a0b6 2993static void end_bio_extent_readpage(struct bio *bio)
d1310b2e 2994{
2c30c71b 2995 struct bio_vec *bvec;
c3a3b19b 2996 struct btrfs_bio *bbio = btrfs_bio(bio);
7870d082 2997 struct extent_io_tree *tree, *failure_tree;
94e8c95c 2998 struct processed_extent processed = { 0 };
7ffd27e3
QW
2999 /*
3000 * The offset to the beginning of a bio, since one bio can never be
3001 * larger than UINT_MAX, u32 here is enough.
3002 */
3003 u32 bio_offset = 0;
5cf1ab56 3004 int mirror;
d1310b2e 3005 int ret;
6dc4f100 3006 struct bvec_iter_all iter_all;
d1310b2e 3007
c09abff8 3008 ASSERT(!bio_flagged(bio, BIO_CLONED));
2b070cfe 3009 bio_for_each_segment_all(bvec, bio, iter_all) {
150e4b05 3010 bool uptodate = !bio->bi_status;
d1310b2e 3011 struct page *page = bvec->bv_page;
a71754fc 3012 struct inode *inode = page->mapping->host;
ab8d0fc4 3013 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7ffd27e3 3014 const u32 sectorsize = fs_info->sectorsize;
150e4b05 3015 unsigned int error_bitmap = (unsigned int)-1;
7ffd27e3
QW
3016 u64 start;
3017 u64 end;
3018 u32 len;
507903b8 3019
ab8d0fc4
JM
3020 btrfs_debug(fs_info,
3021 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
1201b58b 3022 bio->bi_iter.bi_sector, bio->bi_status,
c3a3b19b 3023 bbio->mirror_num);
a71754fc 3024 tree = &BTRFS_I(inode)->io_tree;
7870d082 3025 failure_tree = &BTRFS_I(inode)->io_failure_tree;
902b22f3 3026
8b8bbd46
QW
3027 /*
3028 * We always issue full-sector reads, but if some block in a
3029 * page fails to read, blk_update_request() will advance
3030 * bv_offset and adjust bv_len to compensate. Print a warning
3031 * for unaligned offsets, and an error if they don't add up to
3032 * a full sector.
3033 */
3034 if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
3035 btrfs_err(fs_info,
3036 "partial page read in btrfs with offset %u and length %u",
3037 bvec->bv_offset, bvec->bv_len);
3038 else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
3039 sectorsize))
3040 btrfs_info(fs_info,
3041 "incomplete page read with offset %u and length %u",
3042 bvec->bv_offset, bvec->bv_len);
3043
3044 start = page_offset(page) + bvec->bv_offset;
3045 end = start + bvec->bv_len - 1;
facc8a22 3046 len = bvec->bv_len;
d1310b2e 3047
c3a3b19b 3048 mirror = bbio->mirror_num;
78e62c02 3049 if (likely(uptodate)) {
150e4b05 3050 if (is_data_inode(inode)) {
c3a3b19b 3051 error_bitmap = btrfs_verify_data_csum(bbio,
5e295768 3052 bio_offset, page, start, end);
150e4b05
QW
3053 ret = error_bitmap;
3054 } else {
c3a3b19b 3055 ret = btrfs_validate_metadata_buffer(bbio,
8e1dc982 3056 page, start, end, mirror);
150e4b05 3057 }
5ee0844d 3058 if (ret)
150e4b05 3059 uptodate = false;
5ee0844d 3060 else
7870d082
JB
3061 clean_io_failure(BTRFS_I(inode)->root->fs_info,
3062 failure_tree, tree, start,
3063 page,
3064 btrfs_ino(BTRFS_I(inode)), 0);
d1310b2e 3065 }
ea466794 3066
f2a09da9
MX
3067 if (likely(uptodate))
3068 goto readpage_ok;
3069
be17b3af 3070 if (is_data_inode(inode)) {
510671d2
JB
3071 /*
3072 * If we failed to submit the IO at all we'll have a
3073 * mirror_num == 0, in which case we need to just mark
3074 * the page with an error and unlock it and carry on.
3075 */
3076 if (mirror == 0)
3077 goto readpage_ok;
3078
f4a8e656 3079 /*
150e4b05
QW
3080 * btrfs_submit_read_repair() will handle all the good
3081 * and bad sectors, we just continue to the next bvec.
f4a8e656 3082 */
150e4b05
QW
3083 submit_read_repair(inode, bio, bio_offset, page,
3084 start - page_offset(page), start,
3085 end, mirror, error_bitmap,
3086 btrfs_submit_data_bio);
3087
3088 ASSERT(bio_offset + len > bio_offset);
3089 bio_offset += len;
3090 continue;
78e62c02
NB
3091 } else {
3092 struct extent_buffer *eb;
3093
d9bb77d5 3094 eb = find_extent_buffer_readpage(fs_info, page, start);
78e62c02
NB
3095 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3096 eb->read_mirror = mirror;
3097 atomic_dec(&eb->io_pages);
7e38326f 3098 }
f2a09da9 3099readpage_ok:
883d0de4 3100 if (likely(uptodate)) {
a71754fc 3101 loff_t i_size = i_size_read(inode);
09cbfeaf 3102 pgoff_t end_index = i_size >> PAGE_SHIFT;
a71754fc 3103
c28ea613
QW
3104 /*
3105 * Zero out the remaining part if this range straddles
3106 * i_size.
3107 *
3108 * Here we should only zero the range inside the bvec,
3109 * not touch anything else.
3110 *
3111 * NOTE: i_size is exclusive while end is inclusive.
3112 */
3113 if (page->index == end_index && i_size <= end) {
3114 u32 zero_start = max(offset_in_page(i_size),
d2dcc8ed 3115 offset_in_page(start));
c28ea613
QW
3116
3117 zero_user_segment(page, zero_start,
3118 offset_in_page(end) + 1);
3119 }
70dec807 3120 }
7ffd27e3
QW
3121 ASSERT(bio_offset + len > bio_offset);
3122 bio_offset += len;
883d0de4 3123
e09caaf9 3124 /* Update page status and unlock */
92082d40 3125 end_page_read(page, uptodate, start, len);
94e8c95c 3126 endio_readpage_release_extent(&processed, BTRFS_I(inode),
14605409 3127 start, end, PageUptodate(page));
2c30c71b 3128 }
94e8c95c
QW
3129 /* Release the last extent */
3130 endio_readpage_release_extent(&processed, NULL, 0, 0, false);
c3a3b19b 3131 btrfs_bio_free_csum(bbio);
d1310b2e 3132 bio_put(bio);
d1310b2e
CM
3133}
3134
9be3395b 3135/*
184f999e
DS
3136 * Initialize the members up to but not including 'bio'. Use after allocating a
3137 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
3138 * 'bio' because use of __GFP_ZERO is not supported.
9be3395b 3139 */
c3a3b19b 3140static inline void btrfs_bio_init(struct btrfs_bio *bbio)
d1310b2e 3141{
c3a3b19b 3142 memset(bbio, 0, offsetof(struct btrfs_bio, bio));
184f999e 3143}
d1310b2e 3144
9be3395b 3145/*
cd8e0cca
QW
3146 * Allocate a btrfs_io_bio, with @nr_iovecs as maximum number of iovecs.
3147 *
3148 * The bio allocation is backed by bioset and does not fail.
9be3395b 3149 */
c3a3b19b 3150struct bio *btrfs_bio_alloc(unsigned int nr_iovecs)
d1310b2e
CM
3151{
3152 struct bio *bio;
d1310b2e 3153
cd8e0cca 3154 ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
609be106 3155 bio = bio_alloc_bioset(NULL, nr_iovecs, 0, GFP_NOFS, &btrfs_bioset);
c3a3b19b 3156 btrfs_bio_init(btrfs_bio(bio));
d1310b2e
CM
3157 return bio;
3158}
3159
8b6c1d56 3160struct bio *btrfs_bio_clone(struct bio *bio)
9be3395b 3161{
c3a3b19b 3162 struct btrfs_bio *bbio;
23ea8e5a 3163 struct bio *new;
9be3395b 3164
6e707bcd 3165 /* Bio allocation backed by a bioset does not fail */
abfc426d 3166 new = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOFS, &btrfs_bioset);
c3a3b19b
QW
3167 bbio = btrfs_bio(new);
3168 btrfs_bio_init(bbio);
3169 bbio->iter = bio->bi_iter;
23ea8e5a
MX
3170 return new;
3171}
9be3395b 3172
21dda654 3173struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
2f8e9140
LB
3174{
3175 struct bio *bio;
c3a3b19b 3176 struct btrfs_bio *bbio;
2f8e9140 3177
21dda654
CK
3178 ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
3179
2f8e9140 3180 /* this will never fail when it's backed by a bioset */
abfc426d 3181 bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
2f8e9140
LB
3182 ASSERT(bio);
3183
c3a3b19b
QW
3184 bbio = btrfs_bio(bio);
3185 btrfs_bio_init(bbio);
2f8e9140
LB
3186
3187 bio_trim(bio, offset >> 9, size >> 9);
c3a3b19b 3188 bbio->iter = bio->bi_iter;
2f8e9140
LB
3189 return bio;
3190}
9be3395b 3191
953651eb
NA
3192/**
3193 * Attempt to add a page to bio
3194 *
be8d1a2a 3195 * @bio_ctrl: record both the bio, and its bio_flags
953651eb
NA
3196 * @page: page to add to the bio
3197 * @disk_bytenr: offset of the new bio or to check whether we are adding
3198 * a contiguous page to the previous one
953651eb 3199 * @size: portion of page that we want to write
be8d1a2a 3200 * @pg_offset: starting offset in the page
953651eb 3201 * @bio_flags: flags of the current bio to see if we can merge them
953651eb
NA
3202 *
3203 * Attempt to add a page to bio considering stripe alignment etc.
3204 *
e0eefe07
QW
3205 * Return >= 0 for the number of bytes added to the bio.
3206 * Can return 0 if the current bio is already at stripe/zone boundary.
3207 * Return <0 for error.
953651eb 3208 */
e0eefe07
QW
3209static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
3210 struct page *page,
3211 u64 disk_bytenr, unsigned int size,
3212 unsigned int pg_offset,
3213 unsigned long bio_flags)
953651eb 3214{
390ed29b
QW
3215 struct bio *bio = bio_ctrl->bio;
3216 u32 bio_size = bio->bi_iter.bi_size;
e0eefe07 3217 u32 real_size;
953651eb
NA
3218 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
3219 bool contig;
e1326f03 3220 int ret;
953651eb 3221
390ed29b
QW
3222 ASSERT(bio);
3223 /* The limit should be calculated when bio_ctrl->bio is allocated */
3224 ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
3225 if (bio_ctrl->bio_flags != bio_flags)
e0eefe07 3226 return 0;
953651eb 3227
390ed29b 3228 if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED)
953651eb
NA
3229 contig = bio->bi_iter.bi_sector == sector;
3230 else
3231 contig = bio_end_sector(bio) == sector;
3232 if (!contig)
e0eefe07 3233 return 0;
953651eb 3234
e0eefe07
QW
3235 real_size = min(bio_ctrl->len_to_oe_boundary,
3236 bio_ctrl->len_to_stripe_boundary) - bio_size;
3237 real_size = min(real_size, size);
3238
3239 /*
3240 * If real_size is 0, never call bio_add_*_page(), as even size is 0,
3241 * bio will still execute its endio function on the page!
3242 */
3243 if (real_size == 0)
3244 return 0;
953651eb 3245
390ed29b 3246 if (bio_op(bio) == REQ_OP_ZONE_APPEND)
e0eefe07 3247 ret = bio_add_zone_append_page(bio, page, real_size, pg_offset);
390ed29b 3248 else
e0eefe07 3249 ret = bio_add_page(bio, page, real_size, pg_offset);
e1326f03 3250
e0eefe07 3251 return ret;
953651eb
NA
3252}
3253
390ed29b 3254static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
939c7feb 3255 struct btrfs_inode *inode, u64 file_offset)
390ed29b
QW
3256{
3257 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3258 struct btrfs_io_geometry geom;
3259 struct btrfs_ordered_extent *ordered;
3260 struct extent_map *em;
3261 u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
3262 int ret;
3263
3264 /*
3265 * Pages for compressed extent are never submitted to disk directly,
3266 * thus it has no real boundary, just set them to U32_MAX.
3267 *
3268 * The split happens for real compressed bio, which happens in
3269 * btrfs_submit_compressed_read/write().
3270 */
3271 if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED) {
3272 bio_ctrl->len_to_oe_boundary = U32_MAX;
3273 bio_ctrl->len_to_stripe_boundary = U32_MAX;
3274 return 0;
3275 }
3276 em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
3277 if (IS_ERR(em))
3278 return PTR_ERR(em);
3279 ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
3280 logical, &geom);
3281 free_extent_map(em);
3282 if (ret < 0) {
3283 return ret;
3284 }
3285 if (geom.len > U32_MAX)
3286 bio_ctrl->len_to_stripe_boundary = U32_MAX;
3287 else
3288 bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
3289
73672710 3290 if (bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
390ed29b
QW
3291 bio_ctrl->len_to_oe_boundary = U32_MAX;
3292 return 0;
3293 }
3294
390ed29b 3295 /* Ordered extent not yet created, so we're good */
939c7feb 3296 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
390ed29b
QW
3297 if (!ordered) {
3298 bio_ctrl->len_to_oe_boundary = U32_MAX;
3299 return 0;
3300 }
3301
3302 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
3303 ordered->disk_bytenr + ordered->disk_num_bytes - logical);
3304 btrfs_put_ordered_extent(ordered);
3305 return 0;
3306}
3307
e0eefe07
QW
3308static int alloc_new_bio(struct btrfs_inode *inode,
3309 struct btrfs_bio_ctrl *bio_ctrl,
3310 struct writeback_control *wbc,
3311 unsigned int opf,
3312 bio_end_io_t end_io_func,
939c7feb 3313 u64 disk_bytenr, u32 offset, u64 file_offset,
e0eefe07
QW
3314 unsigned long bio_flags)
3315{
3316 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3317 struct bio *bio;
3318 int ret;
3319
c3a3b19b 3320 bio = btrfs_bio_alloc(BIO_MAX_VECS);
e0eefe07
QW
3321 /*
3322 * For compressed page range, its disk_bytenr is always @disk_bytenr
3323 * passed in, no matter if we have added any range into previous bio.
3324 */
3325 if (bio_flags & EXTENT_BIO_COMPRESSED)
cd8e0cca 3326 bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
e0eefe07 3327 else
cd8e0cca 3328 bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
e0eefe07
QW
3329 bio_ctrl->bio = bio;
3330 bio_ctrl->bio_flags = bio_flags;
e0eefe07
QW
3331 bio->bi_end_io = end_io_func;
3332 bio->bi_private = &inode->io_tree;
e0eefe07 3333 bio->bi_opf = opf;
939c7feb
NA
3334 ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
3335 if (ret < 0)
3336 goto error;
e0eefe07 3337
50f1cff3
CH
3338 if (wbc) {
3339 /*
3340 * For Zone append we need the correct block_device that we are
3341 * going to write to set in the bio to be able to respect the
3342 * hardware limitation. Look it up here:
3343 */
3344 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
3345 struct btrfs_device *dev;
3346
3347 dev = btrfs_zoned_get_device(fs_info, disk_bytenr,
3348 fs_info->sectorsize);
3349 if (IS_ERR(dev)) {
3350 ret = PTR_ERR(dev);
3351 goto error;
3352 }
e0eefe07 3353
50f1cff3
CH
3354 bio_set_dev(bio, dev->bdev);
3355 } else {
3356 /*
3357 * Otherwise pick the last added device to support
3358 * cgroup writeback. For multi-device file systems this
3359 * means blk-cgroup policies have to always be set on the
3360 * last added/replaced device. This is a bit odd but has
3361 * been like that for a long time.
3362 */
3363 bio_set_dev(bio, fs_info->fs_devices->latest_dev->bdev);
e0eefe07 3364 }
50f1cff3
CH
3365 wbc_init_bio(wbc, bio);
3366 } else {
3367 ASSERT(bio_op(bio) != REQ_OP_ZONE_APPEND);
e0eefe07
QW
3368 }
3369 return 0;
3370error:
3371 bio_ctrl->bio = NULL;
3372 bio->bi_status = errno_to_blk_status(ret);
3373 bio_endio(bio);
3374 return ret;
3375}
3376
4b81ba48
DS
3377/*
3378 * @opf: bio REQ_OP_* and REQ_* flags as one value
b8b3d625
DS
3379 * @wbc: optional writeback control for io accounting
3380 * @page: page to add to the bio
0c64c33c
QW
3381 * @disk_bytenr: logical bytenr where the write will be
3382 * @size: portion of page that we want to write to
b8b3d625
DS
3383 * @pg_offset: offset of the new bio or to check whether we are adding
3384 * a contiguous page to the previous one
5c2b1fd7 3385 * @bio_ret: must be valid pointer, newly allocated bio will be stored there
b8b3d625
DS
3386 * @end_io_func: end_io callback for new bio
3387 * @mirror_num: desired mirror to read/write
3388 * @prev_bio_flags: flags of previous bio to see if we can merge the current one
3389 * @bio_flags: flags of the current bio to see if we can merge them
4b81ba48 3390 */
0ceb34bf 3391static int submit_extent_page(unsigned int opf,
da2f0f74 3392 struct writeback_control *wbc,
390ed29b 3393 struct btrfs_bio_ctrl *bio_ctrl,
0c64c33c 3394 struct page *page, u64 disk_bytenr,
6c5a4e2c 3395 size_t size, unsigned long pg_offset,
f188591e 3396 bio_end_io_t end_io_func,
c8b97818 3397 int mirror_num,
005efedf
FM
3398 unsigned long bio_flags,
3399 bool force_bio_submit)
d1310b2e
CM
3400{
3401 int ret = 0;
e1326f03 3402 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
e0eefe07 3403 unsigned int cur = pg_offset;
d1310b2e 3404
390ed29b 3405 ASSERT(bio_ctrl);
5c2b1fd7 3406
390ed29b
QW
3407 ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
3408 pg_offset + size <= PAGE_SIZE);
e0eefe07
QW
3409 if (force_bio_submit && bio_ctrl->bio) {
3410 ret = submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->bio_flags);
3411 bio_ctrl->bio = NULL;
3412 if (ret < 0)
3413 return ret;
3414 }
3415
3416 while (cur < pg_offset + size) {
3417 u32 offset = cur - pg_offset;
3418 int added;
3419
3420 /* Allocate new bio if needed */
3421 if (!bio_ctrl->bio) {
3422 ret = alloc_new_bio(inode, bio_ctrl, wbc, opf,
3423 end_io_func, disk_bytenr, offset,
939c7feb 3424 page_offset(page) + cur,
e0eefe07
QW
3425 bio_flags);
3426 if (ret < 0)
3427 return ret;
3428 }
3429 /*
3430 * We must go through btrfs_bio_add_page() to ensure each
3431 * page range won't cross various boundaries.
3432 */
3433 if (bio_flags & EXTENT_BIO_COMPRESSED)
3434 added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr,
3435 size - offset, pg_offset + offset,
3436 bio_flags);
3437 else
3438 added = btrfs_bio_add_page(bio_ctrl, page,
3439 disk_bytenr + offset, size - offset,
3440 pg_offset + offset, bio_flags);
3441
3442 /* Metadata page range should never be split */
3443 if (!is_data_inode(&inode->vfs_inode))
3444 ASSERT(added == 0 || added == size - offset);
3445
3446 /* At least we added some page, update the account */
3447 if (wbc && added)
3448 wbc_account_cgroup_owner(wbc, page, added);
3449
3450 /* We have reached boundary, submit right now */
3451 if (added < size - offset) {
3452 /* The bio should contain some page(s) */
3453 ASSERT(bio_ctrl->bio->bi_iter.bi_size);
3454 ret = submit_one_bio(bio_ctrl->bio, mirror_num,
3455 bio_ctrl->bio_flags);
390ed29b
QW
3456 bio_ctrl->bio = NULL;
3457 if (ret < 0)
79787eaa 3458 return ret;
d1310b2e 3459 }
e0eefe07 3460 cur += added;
d1310b2e 3461 }
e0eefe07 3462 return 0;
d1310b2e
CM
3463}
3464
760f991f
QW
3465static int attach_extent_buffer_page(struct extent_buffer *eb,
3466 struct page *page,
3467 struct btrfs_subpage *prealloc)
d1310b2e 3468{
760f991f
QW
3469 struct btrfs_fs_info *fs_info = eb->fs_info;
3470 int ret = 0;
3471
0d01e247
QW
3472 /*
3473 * If the page is mapped to btree inode, we should hold the private
3474 * lock to prevent race.
3475 * For cloned or dummy extent buffers, their pages are not mapped and
3476 * will not race with any other ebs.
3477 */
3478 if (page->mapping)
3479 lockdep_assert_held(&page->mapping->private_lock);
3480
760f991f
QW
3481 if (fs_info->sectorsize == PAGE_SIZE) {
3482 if (!PagePrivate(page))
3483 attach_page_private(page, eb);
3484 else
3485 WARN_ON(page->private != (unsigned long)eb);
3486 return 0;
3487 }
3488
3489 /* Already mapped, just free prealloc */
3490 if (PagePrivate(page)) {
3491 btrfs_free_subpage(prealloc);
3492 return 0;
3493 }
3494
3495 if (prealloc)
3496 /* Has preallocated memory for subpage */
3497 attach_page_private(page, prealloc);
d1b89bc0 3498 else
760f991f
QW
3499 /* Do new allocation to attach subpage */
3500 ret = btrfs_attach_subpage(fs_info, page,
3501 BTRFS_SUBPAGE_METADATA);
3502 return ret;
d1310b2e
CM
3503}
3504
32443de3 3505int set_page_extent_mapped(struct page *page)
d1310b2e 3506{
32443de3
QW
3507 struct btrfs_fs_info *fs_info;
3508
3509 ASSERT(page->mapping);
3510
3511 if (PagePrivate(page))
3512 return 0;
3513
3514 fs_info = btrfs_sb(page->mapping->host->i_sb);
3515
3516 if (fs_info->sectorsize < PAGE_SIZE)
3517 return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
3518
3519 attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
3520 return 0;
3521}
3522
3523void clear_page_extent_mapped(struct page *page)
3524{
3525 struct btrfs_fs_info *fs_info;
3526
3527 ASSERT(page->mapping);
3528
d1b89bc0 3529 if (!PagePrivate(page))
32443de3
QW
3530 return;
3531
3532 fs_info = btrfs_sb(page->mapping->host->i_sb);
3533 if (fs_info->sectorsize < PAGE_SIZE)
3534 return btrfs_detach_subpage(fs_info, page);
3535
3536 detach_page_private(page);
d1310b2e
CM
3537}
3538
125bac01
MX
3539static struct extent_map *
3540__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
1a5ee1e6 3541 u64 start, u64 len, struct extent_map **em_cached)
125bac01
MX
3542{
3543 struct extent_map *em;
3544
3545 if (em_cached && *em_cached) {
3546 em = *em_cached;
cbc0e928 3547 if (extent_map_in_tree(em) && start >= em->start &&
125bac01 3548 start < extent_map_end(em)) {
490b54d6 3549 refcount_inc(&em->refs);
125bac01
MX
3550 return em;
3551 }
3552
3553 free_extent_map(em);
3554 *em_cached = NULL;
3555 }
3556
1a5ee1e6 3557 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
c0347550 3558 if (em_cached && !IS_ERR(em)) {
125bac01 3559 BUG_ON(*em_cached);
490b54d6 3560 refcount_inc(&em->refs);
125bac01
MX
3561 *em_cached = em;
3562 }
3563 return em;
3564}
d1310b2e
CM
3565/*
3566 * basic readpage implementation. Locked extent state structs are inserted
3567 * into the tree that are removed when the IO is done (by the end_io
3568 * handlers)
79787eaa 3569 * XXX JDM: This needs looking at to ensure proper page locking
baf863b9 3570 * return 0 on success, otherwise return error
d1310b2e 3571 */
0f208812 3572int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
390ed29b 3573 struct btrfs_bio_ctrl *bio_ctrl,
0f208812 3574 unsigned int read_flags, u64 *prev_em_start)
d1310b2e
CM
3575{
3576 struct inode *inode = page->mapping->host;
92082d40 3577 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4eee4fa4 3578 u64 start = page_offset(page);
8eec8296 3579 const u64 end = start + PAGE_SIZE - 1;
d1310b2e
CM
3580 u64 cur = start;
3581 u64 extent_offset;
3582 u64 last_byte = i_size_read(inode);
3583 u64 block_start;
3584 u64 cur_end;
d1310b2e 3585 struct extent_map *em;
baf863b9 3586 int ret = 0;
306e16ce 3587 size_t pg_offset = 0;
d1310b2e
CM
3588 size_t iosize;
3589 size_t blocksize = inode->i_sb->s_blocksize;
f657a31c 3590 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
ae6957eb 3591
32443de3
QW
3592 ret = set_page_extent_mapped(page);
3593 if (ret < 0) {
3594 unlock_extent(tree, start, end);
92082d40
QW
3595 btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
3596 unlock_page(page);
32443de3
QW
3597 goto out;
3598 }
d1310b2e 3599
09cbfeaf 3600 if (page->index == last_byte >> PAGE_SHIFT) {
7073017a 3601 size_t zero_offset = offset_in_page(last_byte);
c8b97818
CM
3602
3603 if (zero_offset) {
09cbfeaf 3604 iosize = PAGE_SIZE - zero_offset;
d048b9c2 3605 memzero_page(page, zero_offset, iosize);
c8b97818 3606 flush_dcache_page(page);
c8b97818
CM
3607 }
3608 }
92082d40 3609 begin_page_read(fs_info, page);
d1310b2e 3610 while (cur <= end) {
4c37a793 3611 unsigned long this_bio_flag = 0;
005efedf 3612 bool force_bio_submit = false;
0c64c33c 3613 u64 disk_bytenr;
c8f2f24b 3614
6a404910 3615 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
d1310b2e 3616 if (cur >= last_byte) {
507903b8
AJ
3617 struct extent_state *cached = NULL;
3618
09cbfeaf 3619 iosize = PAGE_SIZE - pg_offset;
d048b9c2 3620 memzero_page(page, pg_offset, iosize);
d1310b2e 3621 flush_dcache_page(page);
d1310b2e 3622 set_extent_uptodate(tree, cur, cur + iosize - 1,
507903b8 3623 &cached, GFP_NOFS);
7f042a83 3624 unlock_extent_cached(tree, cur,
e43bbe5e 3625 cur + iosize - 1, &cached);
92082d40 3626 end_page_read(page, true, cur, iosize);
d1310b2e
CM
3627 break;
3628 }
125bac01 3629 em = __get_extent_map(inode, page, pg_offset, cur,
1a5ee1e6 3630 end - cur + 1, em_cached);
c0347550 3631 if (IS_ERR(em)) {
7f042a83 3632 unlock_extent(tree, cur, end);
92082d40 3633 end_page_read(page, false, cur, end + 1 - cur);
bbf0ea7e 3634 ret = PTR_ERR(em);
d1310b2e
CM
3635 break;
3636 }
d1310b2e
CM
3637 extent_offset = cur - em->start;
3638 BUG_ON(extent_map_end(em) <= cur);
3639 BUG_ON(end < cur);
3640
261507a0 3641 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4b384318 3642 this_bio_flag |= EXTENT_BIO_COMPRESSED;
261507a0
LZ
3643 extent_set_compress_type(&this_bio_flag,
3644 em->compress_type);
3645 }
c8b97818 3646
d1310b2e
CM
3647 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3648 cur_end = min(extent_map_end(em) - 1, end);
fda2832f 3649 iosize = ALIGN(iosize, blocksize);
949b3273 3650 if (this_bio_flag & EXTENT_BIO_COMPRESSED)
0c64c33c 3651 disk_bytenr = em->block_start;
949b3273 3652 else
0c64c33c 3653 disk_bytenr = em->block_start + extent_offset;
d1310b2e 3654 block_start = em->block_start;
d899e052
YZ
3655 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3656 block_start = EXTENT_MAP_HOLE;
005efedf
FM
3657
3658 /*
3659 * If we have a file range that points to a compressed extent
260db43c 3660 * and it's followed by a consecutive file range that points
005efedf
FM
3661 * to the same compressed extent (possibly with a different
3662 * offset and/or length, so it either points to the whole extent
3663 * or only part of it), we must make sure we do not submit a
3664 * single bio to populate the pages for the 2 ranges because
3665 * this makes the compressed extent read zero out the pages
3666 * belonging to the 2nd range. Imagine the following scenario:
3667 *
3668 * File layout
3669 * [0 - 8K] [8K - 24K]
3670 * | |
3671 * | |
3672 * points to extent X, points to extent X,
3673 * offset 4K, length of 8K offset 0, length 16K
3674 *
3675 * [extent X, compressed length = 4K uncompressed length = 16K]
3676 *
3677 * If the bio to read the compressed extent covers both ranges,
3678 * it will decompress extent X into the pages belonging to the
3679 * first range and then it will stop, zeroing out the remaining
3680 * pages that belong to the other range that points to extent X.
3681 * So here we make sure we submit 2 bios, one for the first
3682 * range and another one for the third range. Both will target
3683 * the same physical extent from disk, but we can't currently
3684 * make the compressed bio endio callback populate the pages
3685 * for both ranges because each compressed bio is tightly
3686 * coupled with a single extent map, and each range can have
3687 * an extent map with a different offset value relative to the
3688 * uncompressed data of our extent and different lengths. This
3689 * is a corner case so we prioritize correctness over
3690 * non-optimal behavior (submitting 2 bios for the same extent).
3691 */
3692 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3693 prev_em_start && *prev_em_start != (u64)-1 &&
8e928218 3694 *prev_em_start != em->start)
005efedf
FM
3695 force_bio_submit = true;
3696
3697 if (prev_em_start)
8e928218 3698 *prev_em_start = em->start;
005efedf 3699
d1310b2e
CM
3700 free_extent_map(em);
3701 em = NULL;
3702
3703 /* we've found a hole, just zero and go on */
3704 if (block_start == EXTENT_MAP_HOLE) {
507903b8
AJ
3705 struct extent_state *cached = NULL;
3706
d048b9c2 3707 memzero_page(page, pg_offset, iosize);
d1310b2e 3708 flush_dcache_page(page);
d1310b2e
CM
3709
3710 set_extent_uptodate(tree, cur, cur + iosize - 1,
507903b8 3711 &cached, GFP_NOFS);
7f042a83 3712 unlock_extent_cached(tree, cur,
e43bbe5e 3713 cur + iosize - 1, &cached);
92082d40 3714 end_page_read(page, true, cur, iosize);
d1310b2e 3715 cur = cur + iosize;
306e16ce 3716 pg_offset += iosize;
d1310b2e
CM
3717 continue;
3718 }
3719 /* the get_extent function already copied into the page */
9655d298
CM
3720 if (test_range_bit(tree, cur, cur_end,
3721 EXTENT_UPTODATE, 1, NULL)) {
7f042a83 3722 unlock_extent(tree, cur, cur + iosize - 1);
92082d40 3723 end_page_read(page, true, cur, iosize);
d1310b2e 3724 cur = cur + iosize;
306e16ce 3725 pg_offset += iosize;
d1310b2e
CM
3726 continue;
3727 }
70dec807
CM
3728 /* we have an inline extent but it didn't get marked up
3729 * to date. Error out
3730 */
3731 if (block_start == EXTENT_MAP_INLINE) {
7f042a83 3732 unlock_extent(tree, cur, cur + iosize - 1);
92082d40 3733 end_page_read(page, false, cur, iosize);
70dec807 3734 cur = cur + iosize;
306e16ce 3735 pg_offset += iosize;
70dec807
CM
3736 continue;
3737 }
d1310b2e 3738
0ceb34bf 3739 ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
390ed29b
QW
3740 bio_ctrl, page, disk_bytenr, iosize,
3741 pg_offset,
fd513000 3742 end_bio_extent_readpage, 0,
005efedf
FM
3743 this_bio_flag,
3744 force_bio_submit);
ad3fc794 3745 if (ret) {
7f042a83 3746 unlock_extent(tree, cur, cur + iosize - 1);
92082d40 3747 end_page_read(page, false, cur, iosize);
baf863b9 3748 goto out;
edd33c99 3749 }
d1310b2e 3750 cur = cur + iosize;
306e16ce 3751 pg_offset += iosize;
d1310b2e 3752 }
90a887c9 3753out:
baf863b9 3754 return ret;
d1310b2e
CM
3755}
3756
b6660e80 3757static inline void contiguous_readpages(struct page *pages[], int nr_pages,
390ed29b
QW
3758 u64 start, u64 end,
3759 struct extent_map **em_cached,
3760 struct btrfs_bio_ctrl *bio_ctrl,
3761 u64 *prev_em_start)
9974090b 3762{
23d31bd4 3763 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
9974090b
MX
3764 int index;
3765
b272ae22 3766 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
9974090b
MX
3767
3768 for (index = 0; index < nr_pages; index++) {
390ed29b 3769 btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
0f208812 3770 REQ_RAHEAD, prev_em_start);
09cbfeaf 3771 put_page(pages[index]);
9974090b
MX
3772 }
3773}
3774
3d4b9496 3775static void update_nr_written(struct writeback_control *wbc,
a9132667 3776 unsigned long nr_written)
11c8349b
CM
3777{
3778 wbc->nr_to_write -= nr_written;
11c8349b
CM
3779}
3780
d1310b2e 3781/*
40f76580
CM
3782 * helper for __extent_writepage, doing all of the delayed allocation setup.
3783 *
5eaad97a 3784 * This returns 1 if btrfs_run_delalloc_range function did all the work required
40f76580
CM
3785 * to write the page (copy into inline extent). In this case the IO has
3786 * been started and the page is already unlocked.
3787 *
3788 * This returns 0 if all went well (page still locked)
3789 * This returns < 0 if there were errors (page still locked)
d1310b2e 3790 */
cd4c0bf9 3791static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
83f1b680 3792 struct page *page, struct writeback_control *wbc)
40f76580 3793{
2749f7ef 3794 const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
cf3075fb 3795 u64 delalloc_start = page_offset(page);
40f76580 3796 u64 delalloc_to_write = 0;
83f1b680
QW
3797 /* How many pages are started by btrfs_run_delalloc_range() */
3798 unsigned long nr_written = 0;
40f76580
CM
3799 int ret;
3800 int page_started = 0;
3801
2749f7ef
QW
3802 while (delalloc_start < page_end) {
3803 u64 delalloc_end = page_end;
3804 bool found;
40f76580 3805
cd4c0bf9 3806 found = find_lock_delalloc_range(&inode->vfs_inode, page,
40f76580 3807 &delalloc_start,
917aacec 3808 &delalloc_end);
3522e903 3809 if (!found) {
40f76580
CM
3810 delalloc_start = delalloc_end + 1;
3811 continue;
3812 }
cd4c0bf9 3813 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
83f1b680 3814 delalloc_end, &page_started, &nr_written, wbc);
40f76580 3815 if (ret) {
963e4db8
QW
3816 btrfs_page_set_error(inode->root->fs_info, page,
3817 page_offset(page), PAGE_SIZE);
7361b4ae 3818 return ret;
40f76580
CM
3819 }
3820 /*
ea1754a0
KS
3821 * delalloc_end is already one less than the total length, so
3822 * we don't subtract one from PAGE_SIZE
40f76580
CM
3823 */
3824 delalloc_to_write += (delalloc_end - delalloc_start +
ea1754a0 3825 PAGE_SIZE) >> PAGE_SHIFT;
40f76580
CM
3826 delalloc_start = delalloc_end + 1;
3827 }
3828 if (wbc->nr_to_write < delalloc_to_write) {
3829 int thresh = 8192;
3830
3831 if (delalloc_to_write < thresh * 2)
3832 thresh = delalloc_to_write;
3833 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3834 thresh);
3835 }
3836
83f1b680 3837 /* Did btrfs_run_dealloc_range() already unlock and start the IO? */
40f76580
CM
3838 if (page_started) {
3839 /*
83f1b680
QW
3840 * We've unlocked the page, so we can't update the mapping's
3841 * writeback index, just update nr_to_write.
40f76580 3842 */
83f1b680 3843 wbc->nr_to_write -= nr_written;
40f76580
CM
3844 return 1;
3845 }
3846
b69d1ee9 3847 return 0;
40f76580
CM
3848}
3849
c5ef5c6c
QW
3850/*
3851 * Find the first byte we need to write.
3852 *
3853 * For subpage, one page can contain several sectors, and
3854 * __extent_writepage_io() will just grab all extent maps in the page
3855 * range and try to submit all non-inline/non-compressed extents.
3856 *
3857 * This is a big problem for subpage, we shouldn't re-submit already written
3858 * data at all.
3859 * This function will lookup subpage dirty bit to find which range we really
3860 * need to submit.
3861 *
3862 * Return the next dirty range in [@start, @end).
3863 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
3864 */
3865static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
3866 struct page *page, u64 *start, u64 *end)
3867{
3868 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
72a69cd0 3869 struct btrfs_subpage_info *spi = fs_info->subpage_info;
c5ef5c6c
QW
3870 u64 orig_start = *start;
3871 /* Declare as unsigned long so we can use bitmap ops */
c5ef5c6c 3872 unsigned long flags;
72a69cd0 3873 int range_start_bit;
c5ef5c6c
QW
3874 int range_end_bit;
3875
3876 /*
3877 * For regular sector size == page size case, since one page only
3878 * contains one sector, we return the page offset directly.
3879 */
3880 if (fs_info->sectorsize == PAGE_SIZE) {
3881 *start = page_offset(page);
3882 *end = page_offset(page) + PAGE_SIZE;
3883 return;
3884 }
3885
72a69cd0
QW
3886 range_start_bit = spi->dirty_offset +
3887 (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
3888
c5ef5c6c
QW
3889 /* We should have the page locked, but just in case */
3890 spin_lock_irqsave(&subpage->lock, flags);
72a69cd0
QW
3891 bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
3892 spi->dirty_offset + spi->bitmap_nr_bits);
c5ef5c6c
QW
3893 spin_unlock_irqrestore(&subpage->lock, flags);
3894
72a69cd0
QW
3895 range_start_bit -= spi->dirty_offset;
3896 range_end_bit -= spi->dirty_offset;
3897
c5ef5c6c
QW
3898 *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
3899 *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
3900}
3901
40f76580
CM
3902/*
3903 * helper for __extent_writepage. This calls the writepage start hooks,
3904 * and does the loop to map the page into extents and bios.
3905 *
3906 * We return 1 if the IO is started and the page is unlocked,
3907 * 0 if all went well (page still locked)
3908 * < 0 if there were errors (page still locked)
3909 */
d4580fe2 3910static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
40f76580
CM
3911 struct page *page,
3912 struct writeback_control *wbc,
3913 struct extent_page_data *epd,
3914 loff_t i_size,
57e5ffeb 3915 int *nr_ret)
d1310b2e 3916{
6bc5636a 3917 struct btrfs_fs_info *fs_info = inode->root->fs_info;
a129ffb8
QW
3918 u64 cur = page_offset(page);
3919 u64 end = cur + PAGE_SIZE - 1;
d1310b2e 3920 u64 extent_offset;
d1310b2e 3921 u64 block_start;
d1310b2e 3922 struct extent_map *em;
40f76580
CM
3923 int ret = 0;
3924 int nr = 0;
d8e3fb10 3925 u32 opf = REQ_OP_WRITE;
57e5ffeb 3926 const unsigned int write_flags = wbc_to_write_flags(wbc);
40f76580 3927 bool compressed;
c8b97818 3928
a129ffb8 3929 ret = btrfs_writepage_cow_fixup(page);
d75855b4
NB
3930 if (ret) {
3931 /* Fixup worker will requeue */
5ab58055 3932 redirty_page_for_writepage(wbc, page);
d75855b4
NB
3933 unlock_page(page);
3934 return 1;
247e743c
CM
3935 }
3936
11c8349b
CM
3937 /*
3938 * we don't want to touch the inode after unlocking the page,
3939 * so we update the mapping writeback index now
3940 */
83f1b680 3941 update_nr_written(wbc, 1);
771ed689 3942
d1310b2e 3943 while (cur <= end) {
0c64c33c 3944 u64 disk_bytenr;
40f76580 3945 u64 em_end;
c5ef5c6c
QW
3946 u64 dirty_range_start = cur;
3947 u64 dirty_range_end;
6bc5636a 3948 u32 iosize;
58409edd 3949
40f76580 3950 if (cur >= i_size) {
38a39ac7 3951 btrfs_writepage_endio_finish_ordered(inode, page, cur,
25c1252a 3952 end, true);
cc1d0d93
QW
3953 /*
3954 * This range is beyond i_size, thus we don't need to
3955 * bother writing back.
3956 * But we still need to clear the dirty subpage bit, or
3957 * the next time the page gets dirtied, we will try to
3958 * writeback the sectors with subpage dirty bits,
3959 * causing writeback without ordered extent.
3960 */
3961 btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
d1310b2e
CM
3962 break;
3963 }
c5ef5c6c
QW
3964
3965 find_next_dirty_byte(fs_info, page, &dirty_range_start,
3966 &dirty_range_end);
3967 if (cur < dirty_range_start) {
3968 cur = dirty_range_start;
3969 continue;
3970 }
3971
d4580fe2 3972 em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
c0347550 3973 if (IS_ERR(em)) {
c5ef5c6c 3974 btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
61391d56 3975 ret = PTR_ERR_OR_ZERO(em);
d1310b2e
CM
3976 break;
3977 }
3978
3979 extent_offset = cur - em->start;
40f76580 3980 em_end = extent_map_end(em);
6bc5636a
QW
3981 ASSERT(cur <= em_end);
3982 ASSERT(cur < end);
3983 ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
3984 ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
d1310b2e 3985 block_start = em->block_start;
c8b97818 3986 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
6bc5636a
QW
3987 disk_bytenr = em->block_start + extent_offset;
3988
c5ef5c6c
QW
3989 /*
3990 * Note that em_end from extent_map_end() and dirty_range_end from
3991 * find_next_dirty_byte() are all exclusive
3992 */
3993 iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
d8e3fb10 3994
e380adfc 3995 if (btrfs_use_zone_append(inode, em->block_start))
d8e3fb10
NA
3996 opf = REQ_OP_ZONE_APPEND;
3997
d1310b2e
CM
3998 free_extent_map(em);
3999 em = NULL;
4000
c8b97818
CM
4001 /*
4002 * compressed and inline extents are written through other
4003 * paths in the FS
4004 */
4005 if (compressed || block_start == EXTENT_MAP_HOLE ||
d1310b2e 4006 block_start == EXTENT_MAP_INLINE) {
c8b04030 4007 if (compressed)
c8b97818 4008 nr++;
c8b04030 4009 else
38a39ac7 4010 btrfs_writepage_endio_finish_ordered(inode,
25c1252a 4011 page, cur, cur + iosize - 1, true);
cc1d0d93 4012 btrfs_page_clear_dirty(fs_info, page, cur, iosize);
c8b97818 4013 cur += iosize;
d1310b2e
CM
4014 continue;
4015 }
c8b97818 4016
d2a91064 4017 btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
58409edd 4018 if (!PageWriteback(page)) {
d4580fe2 4019 btrfs_err(inode->root->fs_info,
58409edd
DS
4020 "page %lu not writeback, cur %llu end %llu",
4021 page->index, cur, end);
d1310b2e 4022 }
7f3c74fb 4023
c5ef5c6c
QW
4024 /*
4025 * Although the PageDirty bit is cleared before entering this
4026 * function, subpage dirty bit is not cleared.
4027 * So clear subpage dirty bit here so next time we won't submit
4028 * page for range already written to disk.
4029 */
4030 btrfs_page_clear_dirty(fs_info, page, cur, iosize);
4031
390ed29b
QW
4032 ret = submit_extent_page(opf | write_flags, wbc,
4033 &epd->bio_ctrl, page,
d8e3fb10 4034 disk_bytenr, iosize,
390ed29b 4035 cur - page_offset(page),
58409edd 4036 end_bio_extent_writepage,
390ed29b 4037 0, 0, false);
fe01aa65 4038 if (ret) {
c5ef5c6c 4039 btrfs_page_set_error(fs_info, page, cur, iosize);
fe01aa65 4040 if (PageWriteback(page))
c5ef5c6c
QW
4041 btrfs_page_clear_writeback(fs_info, page, cur,
4042 iosize);
fe01aa65 4043 }
d1310b2e 4044
6bc5636a 4045 cur += iosize;
d1310b2e
CM
4046 nr++;
4047 }
cc1d0d93
QW
4048 /*
4049 * If we finish without problem, we should not only clear page dirty,
4050 * but also empty subpage dirty bits
4051 */
4052 if (!ret)
4053 btrfs_page_assert_not_dirty(fs_info, page);
40f76580 4054 *nr_ret = nr;
40f76580
CM
4055 return ret;
4056}
4057
4058/*
4059 * the writepage semantics are similar to regular writepage. extent
4060 * records are inserted to lock ranges in the tree, and as dirty areas
4061 * are found, they are marked writeback. Then the lock bits are removed
4062 * and the end_io handler clears the writeback ranges
3065976b
QW
4063 *
4064 * Return 0 if everything goes well.
4065 * Return <0 for error.
40f76580
CM
4066 */
4067static int __extent_writepage(struct page *page, struct writeback_control *wbc,
aab6e9ed 4068 struct extent_page_data *epd)
40f76580 4069{
8e1dec8e 4070 struct folio *folio = page_folio(page);
40f76580 4071 struct inode *inode = page->mapping->host;
e55a0de1 4072 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
cf3075fb
QW
4073 const u64 page_start = page_offset(page);
4074 const u64 page_end = page_start + PAGE_SIZE - 1;
40f76580
CM
4075 int ret;
4076 int nr = 0;
eb70d222 4077 size_t pg_offset;
40f76580 4078 loff_t i_size = i_size_read(inode);
09cbfeaf 4079 unsigned long end_index = i_size >> PAGE_SHIFT;
40f76580 4080
40f76580
CM
4081 trace___extent_writepage(page, inode, wbc);
4082
4083 WARN_ON(!PageLocked(page));
4084
963e4db8
QW
4085 btrfs_page_clear_error(btrfs_sb(inode->i_sb), page,
4086 page_offset(page), PAGE_SIZE);
40f76580 4087
7073017a 4088 pg_offset = offset_in_page(i_size);
40f76580
CM
4089 if (page->index > end_index ||
4090 (page->index == end_index && !pg_offset)) {
8e1dec8e
MWO
4091 folio_invalidate(folio, 0, folio_size(folio));
4092 folio_unlock(folio);
40f76580
CM
4093 return 0;
4094 }
4095
4096 if (page->index == end_index) {
d048b9c2 4097 memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
40f76580
CM
4098 flush_dcache_page(page);
4099 }
4100
32443de3
QW
4101 ret = set_page_extent_mapped(page);
4102 if (ret < 0) {
4103 SetPageError(page);
4104 goto done;
4105 }
40f76580 4106
7789a55a 4107 if (!epd->extent_locked) {
83f1b680 4108 ret = writepage_delalloc(BTRFS_I(inode), page, wbc);
7789a55a 4109 if (ret == 1)
169d2c87 4110 return 0;
7789a55a
NB
4111 if (ret)
4112 goto done;
4113 }
40f76580 4114
d4580fe2 4115 ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
83f1b680 4116 &nr);
40f76580 4117 if (ret == 1)
169d2c87 4118 return 0;
40f76580 4119
d1310b2e
CM
4120done:
4121 if (nr == 0) {
4122 /* make sure the mapping tag for page dirty gets cleared */
4123 set_page_writeback(page);
4124 end_page_writeback(page);
4125 }
963e4db8
QW
4126 /*
4127 * Here we used to have a check for PageError() and then set @ret and
4128 * call end_extent_writepage().
4129 *
4130 * But in fact setting @ret here will cause different error paths
4131 * between subpage and regular sectorsize.
4132 *
4133 * For regular page size, we never submit current page, but only add
4134 * current page to current bio.
4135 * The bio submission can only happen in next page.
4136 * Thus if we hit the PageError() branch, @ret is already set to
4137 * non-zero value and will not get updated for regular sectorsize.
4138 *
4139 * But for subpage case, it's possible we submit part of current page,
4140 * thus can get PageError() set by submitted bio of the same page,
4141 * while our @ret is still 0.
4142 *
4143 * So here we unify the behavior and don't set @ret.
4144 * Error can still be properly passed to higher layer as page will
4145 * be set error, here we just don't handle the IO failure.
4146 *
4147 * NOTE: This is just a hotfix for subpage.
4148 * The root fix will be properly ending ordered extent when we hit
4149 * an error during writeback.
4150 *
4151 * But that needs a bigger refactoring, as we not only need to grab the
4152 * submitted OE, but also need to know exactly at which bytenr we hit
4153 * the error.
4154 * Currently the full page based __extent_writepage_io() is not
4155 * capable of that.
4156 */
4157 if (PageError(page))
cf3075fb 4158 end_extent_writepage(page, ret, page_start, page_end);
e55a0de1
QW
4159 if (epd->extent_locked) {
4160 /*
4161 * If epd->extent_locked, it's from extent_write_locked_range(),
4162 * the page can either be locked by lock_page() or
4163 * process_one_page().
4164 * Let btrfs_page_unlock_writer() handle both cases.
4165 */
4166 ASSERT(wbc);
4167 btrfs_page_unlock_writer(fs_info, page, wbc->range_start,
4168 wbc->range_end + 1 - wbc->range_start);
4169 } else {
4170 unlock_page(page);
4171 }
3065976b 4172 ASSERT(ret <= 0);
40f76580 4173 return ret;
d1310b2e
CM
4174}
4175
fd8b2b61 4176void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
0b32f4bb 4177{
74316201
N
4178 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
4179 TASK_UNINTERRUPTIBLE);
0b32f4bb
JB
4180}
4181
18dfa711
FM
4182static void end_extent_buffer_writeback(struct extent_buffer *eb)
4183{
be1a1d7a
NA
4184 if (test_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags))
4185 btrfs_zone_finish_endio(eb->fs_info, eb->start, eb->len);
4186
18dfa711
FM
4187 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4188 smp_mb__after_atomic();
4189 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
4190}
4191
2e3c2513 4192/*
a3efb2f0 4193 * Lock extent buffer status and pages for writeback.
2e3c2513 4194 *
a3efb2f0
QW
4195 * May try to flush write bio if we can't get the lock.
4196 *
4197 * Return 0 if the extent buffer doesn't need to be submitted.
4198 * (E.g. the extent buffer is not dirty)
4199 * Return >0 is the extent buffer is submitted to bio.
4200 * Return <0 if something went wrong, no page is locked.
2e3c2513 4201 */
9df76fb5 4202static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
0e378df1 4203 struct extent_page_data *epd)
0b32f4bb 4204{
9df76fb5 4205 struct btrfs_fs_info *fs_info = eb->fs_info;
2e3c2513 4206 int i, num_pages, failed_page_nr;
0b32f4bb
JB
4207 int flush = 0;
4208 int ret = 0;
4209
4210 if (!btrfs_try_tree_write_lock(eb)) {
f4340622 4211 ret = flush_write_bio(epd);
2e3c2513
QW
4212 if (ret < 0)
4213 return ret;
4214 flush = 1;
0b32f4bb
JB
4215 btrfs_tree_lock(eb);
4216 }
4217
4218 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
4219 btrfs_tree_unlock(eb);
4220 if (!epd->sync_io)
4221 return 0;
4222 if (!flush) {
f4340622 4223 ret = flush_write_bio(epd);
2e3c2513
QW
4224 if (ret < 0)
4225 return ret;
0b32f4bb
JB
4226 flush = 1;
4227 }
a098d8e8
CM
4228 while (1) {
4229 wait_on_extent_buffer_writeback(eb);
4230 btrfs_tree_lock(eb);
4231 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
4232 break;
0b32f4bb 4233 btrfs_tree_unlock(eb);
0b32f4bb
JB
4234 }
4235 }
4236
51561ffe
JB
4237 /*
4238 * We need to do this to prevent races in people who check if the eb is
4239 * under IO since we can end up having no IO bits set for a short period
4240 * of time.
4241 */
4242 spin_lock(&eb->refs_lock);
0b32f4bb
JB
4243 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4244 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
51561ffe 4245 spin_unlock(&eb->refs_lock);
0b32f4bb 4246 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
104b4e51
NB
4247 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4248 -eb->len,
4249 fs_info->dirty_metadata_batch);
0b32f4bb 4250 ret = 1;
51561ffe
JB
4251 } else {
4252 spin_unlock(&eb->refs_lock);
0b32f4bb
JB
4253 }
4254
4255 btrfs_tree_unlock(eb);
4256
f3156df9
QW
4257 /*
4258 * Either we don't need to submit any tree block, or we're submitting
4259 * subpage eb.
4260 * Subpage metadata doesn't use page locking at all, so we can skip
4261 * the page locking.
4262 */
4263 if (!ret || fs_info->sectorsize < PAGE_SIZE)
0b32f4bb
JB
4264 return ret;
4265
65ad0104 4266 num_pages = num_extent_pages(eb);
0b32f4bb 4267 for (i = 0; i < num_pages; i++) {
fb85fc9a 4268 struct page *p = eb->pages[i];
0b32f4bb
JB
4269
4270 if (!trylock_page(p)) {
4271 if (!flush) {
18dfa711
FM
4272 int err;
4273
4274 err = flush_write_bio(epd);
4275 if (err < 0) {
4276 ret = err;
2e3c2513
QW
4277 failed_page_nr = i;
4278 goto err_unlock;
4279 }
0b32f4bb
JB
4280 flush = 1;
4281 }
4282 lock_page(p);
4283 }
4284 }
4285
4286 return ret;
2e3c2513
QW
4287err_unlock:
4288 /* Unlock already locked pages */
4289 for (i = 0; i < failed_page_nr; i++)
4290 unlock_page(eb->pages[i]);
18dfa711
FM
4291 /*
4292 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
4293 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
4294 * be made and undo everything done before.
4295 */
4296 btrfs_tree_lock(eb);
4297 spin_lock(&eb->refs_lock);
4298 set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4299 end_extent_buffer_writeback(eb);
4300 spin_unlock(&eb->refs_lock);
4301 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
4302 fs_info->dirty_metadata_batch);
4303 btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4304 btrfs_tree_unlock(eb);
2e3c2513 4305 return ret;
0b32f4bb
JB
4306}
4307
5a2c6075 4308static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
656f30db 4309{
5a2c6075 4310 struct btrfs_fs_info *fs_info = eb->fs_info;
656f30db 4311
5a2c6075 4312 btrfs_page_set_error(fs_info, page, eb->start, eb->len);
656f30db
FM
4313 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4314 return;
4315
c2e39305
JB
4316 /*
4317 * A read may stumble upon this buffer later, make sure that it gets an
4318 * error and knows there was an error.
4319 */
4320 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4321
68b85589
JB
4322 /*
4323 * We need to set the mapping with the io error as well because a write
4324 * error will flip the file system readonly, and then syncfs() will
4325 * return a 0 because we are readonly if we don't modify the err seq for
4326 * the superblock.
4327 */
4328 mapping_set_error(page->mapping, -EIO);
4329
eb5b64f1
DZ
4330 /*
4331 * If we error out, we should add back the dirty_metadata_bytes
4332 * to make it consistent.
4333 */
eb5b64f1
DZ
4334 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4335 eb->len, fs_info->dirty_metadata_batch);
4336
656f30db
FM
4337 /*
4338 * If writeback for a btree extent that doesn't belong to a log tree
4339 * failed, increment the counter transaction->eb_write_errors.
4340 * We do this because while the transaction is running and before it's
4341 * committing (when we call filemap_fdata[write|wait]_range against
4342 * the btree inode), we might have
4343 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
4344 * returns an error or an error happens during writeback, when we're
4345 * committing the transaction we wouldn't know about it, since the pages
4346 * can be no longer dirty nor marked anymore for writeback (if a
4347 * subsequent modification to the extent buffer didn't happen before the
4348 * transaction commit), which makes filemap_fdata[write|wait]_range not
4349 * able to find the pages tagged with SetPageError at transaction
4350 * commit time. So if this happens we must abort the transaction,
4351 * otherwise we commit a super block with btree roots that point to
4352 * btree nodes/leafs whose content on disk is invalid - either garbage
4353 * or the content of some node/leaf from a past generation that got
4354 * cowed or deleted and is no longer valid.
4355 *
4356 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
4357 * not be enough - we need to distinguish between log tree extents vs
4358 * non-log tree extents, and the next filemap_fdatawait_range() call
4359 * will catch and clear such errors in the mapping - and that call might
4360 * be from a log sync and not from a transaction commit. Also, checking
4361 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
4362 * not done and would not be reliable - the eb might have been released
4363 * from memory and reading it back again means that flag would not be
4364 * set (since it's a runtime flag, not persisted on disk).
4365 *
4366 * Using the flags below in the btree inode also makes us achieve the
4367 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
4368 * writeback for all dirty pages and before filemap_fdatawait_range()
4369 * is called, the writeback for all dirty pages had already finished
4370 * with errors - because we were not using AS_EIO/AS_ENOSPC,
4371 * filemap_fdatawait_range() would return success, as it could not know
4372 * that writeback errors happened (the pages were no longer tagged for
4373 * writeback).
4374 */
4375 switch (eb->log_index) {
4376 case -1:
5a2c6075 4377 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
656f30db
FM
4378 break;
4379 case 0:
5a2c6075 4380 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
656f30db
FM
4381 break;
4382 case 1:
5a2c6075 4383 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
656f30db
FM
4384 break;
4385 default:
4386 BUG(); /* unexpected, logic error */
4387 }
4388}
4389
2f3186d8
QW
4390/*
4391 * The endio specific version which won't touch any unsafe spinlock in endio
4392 * context.
4393 */
4394static struct extent_buffer *find_extent_buffer_nolock(
4395 struct btrfs_fs_info *fs_info, u64 start)
4396{
4397 struct extent_buffer *eb;
4398
4399 rcu_read_lock();
4400 eb = radix_tree_lookup(&fs_info->buffer_radix,
4401 start >> fs_info->sectorsize_bits);
4402 if (eb && atomic_inc_not_zero(&eb->refs)) {
4403 rcu_read_unlock();
4404 return eb;
4405 }
4406 rcu_read_unlock();
4407 return NULL;
4408}
4409
4410/*
4411 * The endio function for subpage extent buffer write.
4412 *
4413 * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
4414 * after all extent buffers in the page has finished their writeback.
4415 */
fa04c165 4416static void end_bio_subpage_eb_writepage(struct bio *bio)
2f3186d8 4417{
fa04c165 4418 struct btrfs_fs_info *fs_info;
2f3186d8
QW
4419 struct bio_vec *bvec;
4420 struct bvec_iter_all iter_all;
4421
fa04c165
QW
4422 fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
4423 ASSERT(fs_info->sectorsize < PAGE_SIZE);
4424
2f3186d8
QW
4425 ASSERT(!bio_flagged(bio, BIO_CLONED));
4426 bio_for_each_segment_all(bvec, bio, iter_all) {
4427 struct page *page = bvec->bv_page;
4428 u64 bvec_start = page_offset(page) + bvec->bv_offset;
4429 u64 bvec_end = bvec_start + bvec->bv_len - 1;
4430 u64 cur_bytenr = bvec_start;
4431
4432 ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
4433
4434 /* Iterate through all extent buffers in the range */
4435 while (cur_bytenr <= bvec_end) {
4436 struct extent_buffer *eb;
4437 int done;
4438
4439 /*
4440 * Here we can't use find_extent_buffer(), as it may
4441 * try to lock eb->refs_lock, which is not safe in endio
4442 * context.
4443 */
4444 eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
4445 ASSERT(eb);
4446
4447 cur_bytenr = eb->start + eb->len;
4448
4449 ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
4450 done = atomic_dec_and_test(&eb->io_pages);
4451 ASSERT(done);
4452
4453 if (bio->bi_status ||
4454 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4455 ClearPageUptodate(page);
4456 set_btree_ioerr(page, eb);
4457 }
4458
4459 btrfs_subpage_clear_writeback(fs_info, page, eb->start,
4460 eb->len);
4461 end_extent_buffer_writeback(eb);
4462 /*
4463 * free_extent_buffer() will grab spinlock which is not
4464 * safe in endio context. Thus here we manually dec
4465 * the ref.
4466 */
4467 atomic_dec(&eb->refs);
4468 }
4469 }
4470 bio_put(bio);
4471}
4472
4246a0b6 4473static void end_bio_extent_buffer_writepage(struct bio *bio)
0b32f4bb 4474{
2c30c71b 4475 struct bio_vec *bvec;
0b32f4bb 4476 struct extent_buffer *eb;
2b070cfe 4477 int done;
6dc4f100 4478 struct bvec_iter_all iter_all;
0b32f4bb 4479
c09abff8 4480 ASSERT(!bio_flagged(bio, BIO_CLONED));
2b070cfe 4481 bio_for_each_segment_all(bvec, bio, iter_all) {
0b32f4bb
JB
4482 struct page *page = bvec->bv_page;
4483
0b32f4bb
JB
4484 eb = (struct extent_buffer *)page->private;
4485 BUG_ON(!eb);
4486 done = atomic_dec_and_test(&eb->io_pages);
4487
4e4cbee9 4488 if (bio->bi_status ||
4246a0b6 4489 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
0b32f4bb 4490 ClearPageUptodate(page);
5a2c6075 4491 set_btree_ioerr(page, eb);
0b32f4bb
JB
4492 }
4493
4494 end_page_writeback(page);
4495
4496 if (!done)
4497 continue;
4498
4499 end_extent_buffer_writeback(eb);
2c30c71b 4500 }
0b32f4bb
JB
4501
4502 bio_put(bio);
0b32f4bb
JB
4503}
4504
fa04c165
QW
4505static void prepare_eb_write(struct extent_buffer *eb)
4506{
4507 u32 nritems;
4508 unsigned long start;
4509 unsigned long end;
4510
4511 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
4512 atomic_set(&eb->io_pages, num_extent_pages(eb));
4513
4514 /* Set btree blocks beyond nritems with 0 to avoid stale content */
4515 nritems = btrfs_header_nritems(eb);
4516 if (btrfs_header_level(eb) > 0) {
4517 end = btrfs_node_key_ptr_offset(nritems);
4518 memzero_extent_buffer(eb, end, eb->len - end);
4519 } else {
4520 /*
4521 * Leaf:
4522 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
4523 */
4524 start = btrfs_item_nr_offset(nritems);
4525 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
4526 memzero_extent_buffer(eb, start, end - start);
4527 }
4528}
4529
35b6ddfa
QW
4530/*
4531 * Unlike the work in write_one_eb(), we rely completely on extent locking.
4532 * Page locking is only utilized at minimum to keep the VMM code happy.
35b6ddfa
QW
4533 */
4534static int write_one_subpage_eb(struct extent_buffer *eb,
4535 struct writeback_control *wbc,
4536 struct extent_page_data *epd)
4537{
4538 struct btrfs_fs_info *fs_info = eb->fs_info;
4539 struct page *page = eb->pages[0];
4540 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4541 bool no_dirty_ebs = false;
4542 int ret;
4543
fa04c165
QW
4544 prepare_eb_write(eb);
4545
35b6ddfa
QW
4546 /* clear_page_dirty_for_io() in subpage helper needs page locked */
4547 lock_page(page);
4548 btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
4549
4550 /* Check if this is the last dirty bit to update nr_written */
4551 no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
4552 eb->start, eb->len);
4553 if (no_dirty_ebs)
4554 clear_page_dirty_for_io(page);
4555
390ed29b
QW
4556 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4557 &epd->bio_ctrl, page, eb->start, eb->len,
4558 eb->start - page_offset(page),
fa04c165 4559 end_bio_subpage_eb_writepage, 0, 0, false);
35b6ddfa
QW
4560 if (ret) {
4561 btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
4562 set_btree_ioerr(page, eb);
4563 unlock_page(page);
4564
4565 if (atomic_dec_and_test(&eb->io_pages))
4566 end_extent_buffer_writeback(eb);
4567 return -EIO;
4568 }
4569 unlock_page(page);
4570 /*
4571 * Submission finished without problem, if no range of the page is
4572 * dirty anymore, we have submitted a page. Update nr_written in wbc.
4573 */
4574 if (no_dirty_ebs)
4575 update_nr_written(wbc, 1);
4576 return ret;
4577}
4578
0e378df1 4579static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
0b32f4bb
JB
4580 struct writeback_control *wbc,
4581 struct extent_page_data *epd)
4582{
0c64c33c 4583 u64 disk_bytenr = eb->start;
cc5e31a4 4584 int i, num_pages;
ff40adf7 4585 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
d7dbe9e7 4586 int ret = 0;
0b32f4bb 4587
fa04c165 4588 prepare_eb_write(eb);
35b6ddfa 4589
fa04c165 4590 num_pages = num_extent_pages(eb);
0b32f4bb 4591 for (i = 0; i < num_pages; i++) {
fb85fc9a 4592 struct page *p = eb->pages[i];
0b32f4bb
JB
4593
4594 clear_page_dirty_for_io(p);
4595 set_page_writeback(p);
0ceb34bf 4596 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
390ed29b
QW
4597 &epd->bio_ctrl, p, disk_bytenr,
4598 PAGE_SIZE, 0,
1f7ad75b 4599 end_bio_extent_buffer_writepage,
390ed29b 4600 0, 0, false);
0b32f4bb 4601 if (ret) {
5a2c6075 4602 set_btree_ioerr(p, eb);
fe01aa65
TK
4603 if (PageWriteback(p))
4604 end_page_writeback(p);
0b32f4bb
JB
4605 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
4606 end_extent_buffer_writeback(eb);
4607 ret = -EIO;
4608 break;
4609 }
0c64c33c 4610 disk_bytenr += PAGE_SIZE;
3d4b9496 4611 update_nr_written(wbc, 1);
0b32f4bb
JB
4612 unlock_page(p);
4613 }
4614
4615 if (unlikely(ret)) {
4616 for (; i < num_pages; i++) {
bbf65cf0 4617 struct page *p = eb->pages[i];
81465028 4618 clear_page_dirty_for_io(p);
0b32f4bb
JB
4619 unlock_page(p);
4620 }
4621 }
4622
4623 return ret;
4624}
4625
c4aec299
QW
4626/*
4627 * Submit one subpage btree page.
4628 *
4629 * The main difference to submit_eb_page() is:
4630 * - Page locking
4631 * For subpage, we don't rely on page locking at all.
4632 *
4633 * - Flush write bio
4634 * We only flush bio if we may be unable to fit current extent buffers into
4635 * current bio.
4636 *
4637 * Return >=0 for the number of submitted extent buffers.
4638 * Return <0 for fatal error.
4639 */
4640static int submit_eb_subpage(struct page *page,
4641 struct writeback_control *wbc,
4642 struct extent_page_data *epd)
4643{
4644 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4645 int submitted = 0;
4646 u64 page_start = page_offset(page);
4647 int bit_start = 0;
c4aec299
QW
4648 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
4649 int ret;
4650
4651 /* Lock and write each dirty extent buffers in the range */
72a69cd0 4652 while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
c4aec299
QW
4653 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
4654 struct extent_buffer *eb;
4655 unsigned long flags;
4656 u64 start;
4657
4658 /*
4659 * Take private lock to ensure the subpage won't be detached
4660 * in the meantime.
4661 */
4662 spin_lock(&page->mapping->private_lock);
4663 if (!PagePrivate(page)) {
4664 spin_unlock(&page->mapping->private_lock);
4665 break;
4666 }
4667 spin_lock_irqsave(&subpage->lock, flags);
72a69cd0
QW
4668 if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
4669 subpage->bitmaps)) {
c4aec299
QW
4670 spin_unlock_irqrestore(&subpage->lock, flags);
4671 spin_unlock(&page->mapping->private_lock);
4672 bit_start++;
4673 continue;
4674 }
4675
4676 start = page_start + bit_start * fs_info->sectorsize;
4677 bit_start += sectors_per_node;
4678
4679 /*
4680 * Here we just want to grab the eb without touching extra
4681 * spin locks, so call find_extent_buffer_nolock().
4682 */
4683 eb = find_extent_buffer_nolock(fs_info, start);
4684 spin_unlock_irqrestore(&subpage->lock, flags);
4685 spin_unlock(&page->mapping->private_lock);
4686
4687 /*
4688 * The eb has already reached 0 refs thus find_extent_buffer()
4689 * doesn't return it. We don't need to write back such eb
4690 * anyway.
4691 */
4692 if (!eb)
4693 continue;
4694
4695 ret = lock_extent_buffer_for_io(eb, epd);
4696 if (ret == 0) {
4697 free_extent_buffer(eb);
4698 continue;
4699 }
4700 if (ret < 0) {
4701 free_extent_buffer(eb);
4702 goto cleanup;
4703 }
fa04c165 4704 ret = write_one_subpage_eb(eb, wbc, epd);
c4aec299
QW
4705 free_extent_buffer(eb);
4706 if (ret < 0)
4707 goto cleanup;
4708 submitted++;
4709 }
4710 return submitted;
4711
4712cleanup:
4713 /* We hit error, end bio for the submitted extent buffers */
4714 end_write_bio(epd, ret);
4715 return ret;
4716}
4717
f91e0d0c
QW
4718/*
4719 * Submit all page(s) of one extent buffer.
4720 *
4721 * @page: the page of one extent buffer
4722 * @eb_context: to determine if we need to submit this page, if current page
4723 * belongs to this eb, we don't need to submit
4724 *
4725 * The caller should pass each page in their bytenr order, and here we use
4726 * @eb_context to determine if we have submitted pages of one extent buffer.
4727 *
4728 * If we have, we just skip until we hit a new page that doesn't belong to
4729 * current @eb_context.
4730 *
4731 * If not, we submit all the page(s) of the extent buffer.
4732 *
4733 * Return >0 if we have submitted the extent buffer successfully.
4734 * Return 0 if we don't need to submit the page, as it's already submitted by
4735 * previous call.
4736 * Return <0 for fatal error.
4737 */
4738static int submit_eb_page(struct page *page, struct writeback_control *wbc,
4739 struct extent_page_data *epd,
4740 struct extent_buffer **eb_context)
4741{
4742 struct address_space *mapping = page->mapping;
0bc09ca1 4743 struct btrfs_block_group *cache = NULL;
f91e0d0c
QW
4744 struct extent_buffer *eb;
4745 int ret;
4746
4747 if (!PagePrivate(page))
4748 return 0;
4749
c4aec299
QW
4750 if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
4751 return submit_eb_subpage(page, wbc, epd);
4752
f91e0d0c
QW
4753 spin_lock(&mapping->private_lock);
4754 if (!PagePrivate(page)) {
4755 spin_unlock(&mapping->private_lock);
4756 return 0;
4757 }
4758
4759 eb = (struct extent_buffer *)page->private;
4760
4761 /*
4762 * Shouldn't happen and normally this would be a BUG_ON but no point
4763 * crashing the machine for something we can survive anyway.
4764 */
4765 if (WARN_ON(!eb)) {
4766 spin_unlock(&mapping->private_lock);
4767 return 0;
4768 }
4769
4770 if (eb == *eb_context) {
4771 spin_unlock(&mapping->private_lock);
4772 return 0;
4773 }
4774 ret = atomic_inc_not_zero(&eb->refs);
4775 spin_unlock(&mapping->private_lock);
4776 if (!ret)
4777 return 0;
4778
0bc09ca1
NA
4779 if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
4780 /*
4781 * If for_sync, this hole will be filled with
4782 * trasnsaction commit.
4783 */
4784 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
4785 ret = -EAGAIN;
4786 else
4787 ret = 0;
4788 free_extent_buffer(eb);
4789 return ret;
4790 }
4791
f91e0d0c
QW
4792 *eb_context = eb;
4793
4794 ret = lock_extent_buffer_for_io(eb, epd);
4795 if (ret <= 0) {
0bc09ca1
NA
4796 btrfs_revert_meta_write_pointer(cache, eb);
4797 if (cache)
4798 btrfs_put_block_group(cache);
f91e0d0c
QW
4799 free_extent_buffer(eb);
4800 return ret;
4801 }
be1a1d7a 4802 if (cache) {
d3e29967
NB
4803 /*
4804 * Implies write in zoned mode. Mark the last eb in a block group.
4805 */
be1a1d7a
NA
4806 if (cache->seq_zone && eb->start + eb->len == cache->zone_capacity)
4807 set_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags);
d3e29967 4808 btrfs_put_block_group(cache);
be1a1d7a 4809 }
f91e0d0c
QW
4810 ret = write_one_eb(eb, wbc, epd);
4811 free_extent_buffer(eb);
4812 if (ret < 0)
4813 return ret;
4814 return 1;
4815}
4816
0b32f4bb
JB
4817int btree_write_cache_pages(struct address_space *mapping,
4818 struct writeback_control *wbc)
4819{
f91e0d0c 4820 struct extent_buffer *eb_context = NULL;
0b32f4bb 4821 struct extent_page_data epd = {
390ed29b 4822 .bio_ctrl = { 0 },
0b32f4bb
JB
4823 .extent_locked = 0,
4824 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4825 };
b3ff8f1d 4826 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
0b32f4bb
JB
4827 int ret = 0;
4828 int done = 0;
4829 int nr_to_write_done = 0;
4830 struct pagevec pvec;
4831 int nr_pages;
4832 pgoff_t index;
4833 pgoff_t end; /* Inclusive */
4834 int scanned = 0;
10bbd235 4835 xa_mark_t tag;
0b32f4bb 4836
86679820 4837 pagevec_init(&pvec);
0b32f4bb
JB
4838 if (wbc->range_cyclic) {
4839 index = mapping->writeback_index; /* Start from prev offset */
4840 end = -1;
556755a8
JB
4841 /*
4842 * Start from the beginning does not need to cycle over the
4843 * range, mark it as scanned.
4844 */
4845 scanned = (index == 0);
0b32f4bb 4846 } else {
09cbfeaf
KS
4847 index = wbc->range_start >> PAGE_SHIFT;
4848 end = wbc->range_end >> PAGE_SHIFT;
0b32f4bb
JB
4849 scanned = 1;
4850 }
4851 if (wbc->sync_mode == WB_SYNC_ALL)
4852 tag = PAGECACHE_TAG_TOWRITE;
4853 else
4854 tag = PAGECACHE_TAG_DIRTY;
0bc09ca1 4855 btrfs_zoned_meta_io_lock(fs_info);
0b32f4bb
JB
4856retry:
4857 if (wbc->sync_mode == WB_SYNC_ALL)
4858 tag_pages_for_writeback(mapping, index, end);
4859 while (!done && !nr_to_write_done && (index <= end) &&
4006f437 4860 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
67fd707f 4861 tag))) {
0b32f4bb
JB
4862 unsigned i;
4863
0b32f4bb
JB
4864 for (i = 0; i < nr_pages; i++) {
4865 struct page *page = pvec.pages[i];
4866
f91e0d0c
QW
4867 ret = submit_eb_page(page, wbc, &epd, &eb_context);
4868 if (ret == 0)
0b32f4bb 4869 continue;
f91e0d0c 4870 if (ret < 0) {
0b32f4bb 4871 done = 1;
0b32f4bb
JB
4872 break;
4873 }
0b32f4bb
JB
4874
4875 /*
4876 * the filesystem may choose to bump up nr_to_write.
4877 * We have to make sure to honor the new nr_to_write
4878 * at any time
4879 */
4880 nr_to_write_done = wbc->nr_to_write <= 0;
4881 }
4882 pagevec_release(&pvec);
4883 cond_resched();
4884 }
4885 if (!scanned && !done) {
4886 /*
4887 * We hit the last page and there is more work to be done: wrap
4888 * back to the start of the file
4889 */
4890 scanned = 1;
4891 index = 0;
4892 goto retry;
4893 }
2b952eea
QW
4894 if (ret < 0) {
4895 end_write_bio(&epd, ret);
0bc09ca1 4896 goto out;
2b952eea 4897 }
b3ff8f1d
QW
4898 /*
4899 * If something went wrong, don't allow any metadata write bio to be
4900 * submitted.
4901 *
4902 * This would prevent use-after-free if we had dirty pages not
4903 * cleaned up, which can still happen by fuzzed images.
4904 *
4905 * - Bad extent tree
4906 * Allowing existing tree block to be allocated for other trees.
4907 *
4908 * - Log tree operations
4909 * Exiting tree blocks get allocated to log tree, bumps its
4910 * generation, then get cleaned in tree re-balance.
4911 * Such tree block will not be written back, since it's clean,
4912 * thus no WRITTEN flag set.
4913 * And after log writes back, this tree block is not traced by
4914 * any dirty extent_io_tree.
4915 *
4916 * - Offending tree block gets re-dirtied from its original owner
4917 * Since it has bumped generation, no WRITTEN flag, it can be
4918 * reused without COWing. This tree block will not be traced
4919 * by btrfs_transaction::dirty_pages.
4920 *
4921 * Now such dirty tree block will not be cleaned by any dirty
4922 * extent io tree. Thus we don't want to submit such wild eb
4923 * if the fs already has error.
4924 */
84961539 4925 if (!BTRFS_FS_ERROR(fs_info)) {
b3ff8f1d
QW
4926 ret = flush_write_bio(&epd);
4927 } else {
fbabd4a3 4928 ret = -EROFS;
b3ff8f1d
QW
4929 end_write_bio(&epd, ret);
4930 }
0bc09ca1
NA
4931out:
4932 btrfs_zoned_meta_io_unlock(fs_info);
0b32f4bb
JB
4933 return ret;
4934}
4935
d1310b2e 4936/**
3bed2da1
NB
4937 * Walk the list of dirty pages of the given address space and write all of them.
4938 *
d1310b2e 4939 * @mapping: address space structure to write
3bed2da1
NB
4940 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
4941 * @epd: holds context for the write, namely the bio
d1310b2e
CM
4942 *
4943 * If a page is already under I/O, write_cache_pages() skips it, even
4944 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
4945 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
4946 * and msync() need to guarantee that all the data which was dirty at the time
4947 * the call was made get new I/O started against them. If wbc->sync_mode is
4948 * WB_SYNC_ALL then we were called for data integrity and we must wait for
4949 * existing IO to complete.
4950 */
4242b64a 4951static int extent_write_cache_pages(struct address_space *mapping,
4bef0848 4952 struct writeback_control *wbc,
aab6e9ed 4953 struct extent_page_data *epd)
d1310b2e 4954{
7fd1a3f7 4955 struct inode *inode = mapping->host;
d1310b2e
CM
4956 int ret = 0;
4957 int done = 0;
f85d7d6c 4958 int nr_to_write_done = 0;
d1310b2e
CM
4959 struct pagevec pvec;
4960 int nr_pages;
4961 pgoff_t index;
4962 pgoff_t end; /* Inclusive */
a9132667
LB
4963 pgoff_t done_index;
4964 int range_whole = 0;
d1310b2e 4965 int scanned = 0;
10bbd235 4966 xa_mark_t tag;
d1310b2e 4967
7fd1a3f7
JB
4968 /*
4969 * We have to hold onto the inode so that ordered extents can do their
4970 * work when the IO finishes. The alternative to this is failing to add
4971 * an ordered extent if the igrab() fails there and that is a huge pain
4972 * to deal with, so instead just hold onto the inode throughout the
4973 * writepages operation. If it fails here we are freeing up the inode
4974 * anyway and we'd rather not waste our time writing out stuff that is
4975 * going to be truncated anyway.
4976 */
4977 if (!igrab(inode))
4978 return 0;
4979
86679820 4980 pagevec_init(&pvec);
d1310b2e
CM
4981 if (wbc->range_cyclic) {
4982 index = mapping->writeback_index; /* Start from prev offset */
4983 end = -1;
556755a8
JB
4984 /*
4985 * Start from the beginning does not need to cycle over the
4986 * range, mark it as scanned.
4987 */
4988 scanned = (index == 0);
d1310b2e 4989 } else {
09cbfeaf
KS
4990 index = wbc->range_start >> PAGE_SHIFT;
4991 end = wbc->range_end >> PAGE_SHIFT;
a9132667
LB
4992 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4993 range_whole = 1;
d1310b2e
CM
4994 scanned = 1;
4995 }
3cd24c69
EL
4996
4997 /*
4998 * We do the tagged writepage as long as the snapshot flush bit is set
4999 * and we are the first one who do the filemap_flush() on this inode.
5000 *
5001 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
5002 * not race in and drop the bit.
5003 */
5004 if (range_whole && wbc->nr_to_write == LONG_MAX &&
5005 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
5006 &BTRFS_I(inode)->runtime_flags))
5007 wbc->tagged_writepages = 1;
5008
5009 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
f7aaa06b
JB
5010 tag = PAGECACHE_TAG_TOWRITE;
5011 else
5012 tag = PAGECACHE_TAG_DIRTY;
d1310b2e 5013retry:
3cd24c69 5014 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
f7aaa06b 5015 tag_pages_for_writeback(mapping, index, end);
a9132667 5016 done_index = index;
f85d7d6c 5017 while (!done && !nr_to_write_done && (index <= end) &&
67fd707f
JK
5018 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
5019 &index, end, tag))) {
d1310b2e
CM
5020 unsigned i;
5021
d1310b2e
CM
5022 for (i = 0; i < nr_pages; i++) {
5023 struct page *page = pvec.pages[i];
5024
f7bddf1e 5025 done_index = page->index + 1;
d1310b2e 5026 /*
b93b0163
MW
5027 * At this point we hold neither the i_pages lock nor
5028 * the page lock: the page may be truncated or
5029 * invalidated (changing page->mapping to NULL),
5030 * or even swizzled back from swapper_space to
5031 * tmpfs file mapping
d1310b2e 5032 */
c8f2f24b 5033 if (!trylock_page(page)) {
f4340622
QW
5034 ret = flush_write_bio(epd);
5035 BUG_ON(ret < 0);
c8f2f24b 5036 lock_page(page);
01d658f2 5037 }
d1310b2e
CM
5038
5039 if (unlikely(page->mapping != mapping)) {
5040 unlock_page(page);
5041 continue;
5042 }
5043
d2c3f4f6 5044 if (wbc->sync_mode != WB_SYNC_NONE) {
f4340622
QW
5045 if (PageWriteback(page)) {
5046 ret = flush_write_bio(epd);
5047 BUG_ON(ret < 0);
5048 }
d1310b2e 5049 wait_on_page_writeback(page);
d2c3f4f6 5050 }
d1310b2e
CM
5051
5052 if (PageWriteback(page) ||
5053 !clear_page_dirty_for_io(page)) {
5054 unlock_page(page);
5055 continue;
5056 }
5057
aab6e9ed 5058 ret = __extent_writepage(page, wbc, epd);
a9132667 5059 if (ret < 0) {
a9132667
LB
5060 done = 1;
5061 break;
5062 }
f85d7d6c
CM
5063
5064 /*
5065 * the filesystem may choose to bump up nr_to_write.
5066 * We have to make sure to honor the new nr_to_write
5067 * at any time
5068 */
5069 nr_to_write_done = wbc->nr_to_write <= 0;
d1310b2e
CM
5070 }
5071 pagevec_release(&pvec);
5072 cond_resched();
5073 }
894b36e3 5074 if (!scanned && !done) {
d1310b2e
CM
5075 /*
5076 * We hit the last page and there is more work to be done: wrap
5077 * back to the start of the file
5078 */
5079 scanned = 1;
5080 index = 0;
42ffb0bf
JB
5081
5082 /*
5083 * If we're looping we could run into a page that is locked by a
5084 * writer and that writer could be waiting on writeback for a
5085 * page in our current bio, and thus deadlock, so flush the
5086 * write bio here.
5087 */
5088 ret = flush_write_bio(epd);
5089 if (!ret)
5090 goto retry;
d1310b2e 5091 }
a9132667
LB
5092
5093 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
5094 mapping->writeback_index = done_index;
5095
7fd1a3f7 5096 btrfs_add_delayed_iput(inode);
894b36e3 5097 return ret;
d1310b2e 5098}
d1310b2e 5099
0a9b0e53 5100int extent_write_full_page(struct page *page, struct writeback_control *wbc)
d1310b2e
CM
5101{
5102 int ret;
d1310b2e 5103 struct extent_page_data epd = {
390ed29b 5104 .bio_ctrl = { 0 },
771ed689 5105 .extent_locked = 0,
ffbd517d 5106 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
d1310b2e 5107 };
d1310b2e 5108
d1310b2e 5109 ret = __extent_writepage(page, wbc, &epd);
3065976b
QW
5110 ASSERT(ret <= 0);
5111 if (ret < 0) {
5112 end_write_bio(&epd, ret);
5113 return ret;
5114 }
d1310b2e 5115
3065976b
QW
5116 ret = flush_write_bio(&epd);
5117 ASSERT(ret <= 0);
d1310b2e
CM
5118 return ret;
5119}
d1310b2e 5120
2bd0fc93
QW
5121/*
5122 * Submit the pages in the range to bio for call sites which delalloc range has
5123 * already been ran (aka, ordered extent inserted) and all pages are still
5124 * locked.
5125 */
5126int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
771ed689 5127{
2bd0fc93
QW
5128 bool found_error = false;
5129 int first_error = 0;
771ed689
CM
5130 int ret = 0;
5131 struct address_space *mapping = inode->i_mapping;
5132 struct page *page;
2bd0fc93 5133 u64 cur = start;
66448b9d
QW
5134 unsigned long nr_pages;
5135 const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
771ed689 5136 struct extent_page_data epd = {
390ed29b 5137 .bio_ctrl = { 0 },
771ed689 5138 .extent_locked = 1,
2bd0fc93 5139 .sync_io = 1,
771ed689
CM
5140 };
5141 struct writeback_control wbc_writepages = {
2bd0fc93 5142 .sync_mode = WB_SYNC_ALL,
771ed689
CM
5143 .range_start = start,
5144 .range_end = end + 1,
ec39f769
CM
5145 /* We're called from an async helper function */
5146 .punt_to_cgroup = 1,
5147 .no_cgroup_owner = 1,
771ed689
CM
5148 };
5149
66448b9d
QW
5150 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
5151 nr_pages = (round_up(end, PAGE_SIZE) - round_down(start, PAGE_SIZE)) >>
5152 PAGE_SHIFT;
5153 wbc_writepages.nr_to_write = nr_pages * 2;
5154
dbb70bec 5155 wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
2bd0fc93 5156 while (cur <= end) {
66448b9d
QW
5157 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
5158
2bd0fc93
QW
5159 page = find_get_page(mapping, cur >> PAGE_SHIFT);
5160 /*
5161 * All pages in the range are locked since
5162 * btrfs_run_delalloc_range(), thus there is no way to clear
5163 * the page dirty flag.
5164 */
66448b9d 5165 ASSERT(PageLocked(page));
2bd0fc93
QW
5166 ASSERT(PageDirty(page));
5167 clear_page_dirty_for_io(page);
5168 ret = __extent_writepage(page, &wbc_writepages, &epd);
5169 ASSERT(ret <= 0);
5170 if (ret < 0) {
5171 found_error = true;
5172 first_error = ret;
771ed689 5173 }
09cbfeaf 5174 put_page(page);
66448b9d 5175 cur = cur_end + 1;
771ed689
CM
5176 }
5177
2bd0fc93 5178 if (!found_error)
dbb70bec
CM
5179 ret = flush_write_bio(&epd);
5180 else
02c6db4f 5181 end_write_bio(&epd, ret);
dbb70bec
CM
5182
5183 wbc_detach_inode(&wbc_writepages);
2bd0fc93
QW
5184 if (found_error)
5185 return first_error;
771ed689
CM
5186 return ret;
5187}
d1310b2e 5188
8ae225a8 5189int extent_writepages(struct address_space *mapping,
d1310b2e
CM
5190 struct writeback_control *wbc)
5191{
35156d85 5192 struct inode *inode = mapping->host;
d1310b2e
CM
5193 int ret = 0;
5194 struct extent_page_data epd = {
390ed29b 5195 .bio_ctrl = { 0 },
771ed689 5196 .extent_locked = 0,
ffbd517d 5197 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
d1310b2e
CM
5198 };
5199
35156d85
JT
5200 /*
5201 * Allow only a single thread to do the reloc work in zoned mode to
5202 * protect the write pointer updates.
5203 */
869f4cdc 5204 btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
935db853 5205 ret = extent_write_cache_pages(mapping, wbc, &epd);
869f4cdc 5206 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
a2a72fbd
QW
5207 ASSERT(ret <= 0);
5208 if (ret < 0) {
5209 end_write_bio(&epd, ret);
5210 return ret;
5211 }
5212 ret = flush_write_bio(&epd);
d1310b2e
CM
5213 return ret;
5214}
d1310b2e 5215
ba206a02 5216void extent_readahead(struct readahead_control *rac)
d1310b2e 5217{
390ed29b 5218 struct btrfs_bio_ctrl bio_ctrl = { 0 };
67c9684f 5219 struct page *pagepool[16];
125bac01 5220 struct extent_map *em_cached = NULL;
808f80b4 5221 u64 prev_em_start = (u64)-1;
ba206a02 5222 int nr;
d1310b2e 5223
ba206a02 5224 while ((nr = readahead_page_batch(rac, pagepool))) {
32c0a6bc
MWO
5225 u64 contig_start = readahead_pos(rac);
5226 u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
e65ef21e 5227
ba206a02 5228 contiguous_readpages(pagepool, nr, contig_start, contig_end,
390ed29b 5229 &em_cached, &bio_ctrl, &prev_em_start);
d1310b2e 5230 }
67c9684f 5231
125bac01
MX
5232 if (em_cached)
5233 free_extent_map(em_cached);
5234
390ed29b
QW
5235 if (bio_ctrl.bio) {
5236 if (submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags))
ba206a02
MWO
5237 return;
5238 }
d1310b2e 5239}
d1310b2e
CM
5240
5241/*
895586eb
MWO
5242 * basic invalidate_folio code, this waits on any locked or writeback
5243 * ranges corresponding to the folio, and then deletes any extent state
d1310b2e
CM
5244 * records from the tree
5245 */
895586eb
MWO
5246int extent_invalidate_folio(struct extent_io_tree *tree,
5247 struct folio *folio, size_t offset)
d1310b2e 5248{
2ac55d41 5249 struct extent_state *cached_state = NULL;
895586eb
MWO
5250 u64 start = folio_pos(folio);
5251 u64 end = start + folio_size(folio) - 1;
5252 size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
d1310b2e 5253
829ddec9
QW
5254 /* This function is only called for the btree inode */
5255 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
5256
fda2832f 5257 start += ALIGN(offset, blocksize);
d1310b2e
CM
5258 if (start > end)
5259 return 0;
5260
ff13db41 5261 lock_extent_bits(tree, start, end, &cached_state);
895586eb 5262 folio_wait_writeback(folio);
829ddec9
QW
5263
5264 /*
5265 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
5266 * so here we only need to unlock the extent range to free any
5267 * existing extent state.
5268 */
5269 unlock_extent_cached(tree, start, end, &cached_state);
d1310b2e
CM
5270 return 0;
5271}
d1310b2e 5272
7b13b7b1
CM
5273/*
5274 * a helper for releasepage, this tests for areas of the page that
5275 * are locked or under IO and drops the related state bits if it is safe
5276 * to drop the page.
5277 */
29c68b2d 5278static int try_release_extent_state(struct extent_io_tree *tree,
48a3b636 5279 struct page *page, gfp_t mask)
7b13b7b1 5280{
4eee4fa4 5281 u64 start = page_offset(page);
09cbfeaf 5282 u64 end = start + PAGE_SIZE - 1;
7b13b7b1
CM
5283 int ret = 1;
5284
8882679e 5285 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
7b13b7b1 5286 ret = 0;
8882679e 5287 } else {
11ef160f 5288 /*
2766ff61
FM
5289 * At this point we can safely clear everything except the
5290 * locked bit, the nodatasum bit and the delalloc new bit.
5291 * The delalloc new bit will be cleared by ordered extent
5292 * completion.
11ef160f 5293 */
66b0c887 5294 ret = __clear_extent_bit(tree, start, end,
2766ff61
FM
5295 ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
5296 0, 0, NULL, mask, NULL);
e3f24cc5
CM
5297
5298 /* if clear_extent_bit failed for enomem reasons,
5299 * we can't allow the release to continue.
5300 */
5301 if (ret < 0)
5302 ret = 0;
5303 else
5304 ret = 1;
7b13b7b1
CM
5305 }
5306 return ret;
5307}
7b13b7b1 5308
d1310b2e
CM
5309/*
5310 * a helper for releasepage. As long as there are no locked extents
5311 * in the range corresponding to the page, both state records and extent
5312 * map records are removed
5313 */
477a30ba 5314int try_release_extent_mapping(struct page *page, gfp_t mask)
d1310b2e
CM
5315{
5316 struct extent_map *em;
4eee4fa4 5317 u64 start = page_offset(page);
09cbfeaf 5318 u64 end = start + PAGE_SIZE - 1;
bd3599a0
FM
5319 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
5320 struct extent_io_tree *tree = &btrfs_inode->io_tree;
5321 struct extent_map_tree *map = &btrfs_inode->extent_tree;
7b13b7b1 5322
d0164adc 5323 if (gfpflags_allow_blocking(mask) &&
ee22184b 5324 page->mapping->host->i_size > SZ_16M) {
39b5637f 5325 u64 len;
70dec807 5326 while (start <= end) {
fbc2bd7e
FM
5327 struct btrfs_fs_info *fs_info;
5328 u64 cur_gen;
5329
39b5637f 5330 len = end - start + 1;
890871be 5331 write_lock(&map->lock);
39b5637f 5332 em = lookup_extent_mapping(map, start, len);
285190d9 5333 if (!em) {
890871be 5334 write_unlock(&map->lock);
70dec807
CM
5335 break;
5336 }
7f3c74fb
CM
5337 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
5338 em->start != start) {
890871be 5339 write_unlock(&map->lock);
70dec807
CM
5340 free_extent_map(em);
5341 break;
5342 }
3d6448e6
FM
5343 if (test_range_bit(tree, em->start,
5344 extent_map_end(em) - 1,
5345 EXTENT_LOCKED, 0, NULL))
5346 goto next;
5347 /*
5348 * If it's not in the list of modified extents, used
5349 * by a fast fsync, we can remove it. If it's being
5350 * logged we can safely remove it since fsync took an
5351 * extra reference on the em.
5352 */
5353 if (list_empty(&em->list) ||
fbc2bd7e
FM
5354 test_bit(EXTENT_FLAG_LOGGING, &em->flags))
5355 goto remove_em;
5356 /*
5357 * If it's in the list of modified extents, remove it
5358 * only if its generation is older then the current one,
5359 * in which case we don't need it for a fast fsync.
5360 * Otherwise don't remove it, we could be racing with an
5361 * ongoing fast fsync that could miss the new extent.
5362 */
5363 fs_info = btrfs_inode->root->fs_info;
5364 spin_lock(&fs_info->trans_lock);
5365 cur_gen = fs_info->generation;
5366 spin_unlock(&fs_info->trans_lock);
5367 if (em->generation >= cur_gen)
5368 goto next;
5369remove_em:
5e548b32
FM
5370 /*
5371 * We only remove extent maps that are not in the list of
5372 * modified extents or that are in the list but with a
5373 * generation lower then the current generation, so there
5374 * is no need to set the full fsync flag on the inode (it
5375 * hurts the fsync performance for workloads with a data
5376 * size that exceeds or is close to the system's memory).
5377 */
fbc2bd7e
FM
5378 remove_extent_mapping(map, em);
5379 /* once for the rb tree */
5380 free_extent_map(em);
3d6448e6 5381next:
70dec807 5382 start = extent_map_end(em);
890871be 5383 write_unlock(&map->lock);
70dec807
CM
5384
5385 /* once for us */
d1310b2e 5386 free_extent_map(em);
9f47eb54
PM
5387
5388 cond_resched(); /* Allow large-extent preemption. */
d1310b2e 5389 }
d1310b2e 5390 }
29c68b2d 5391 return try_release_extent_state(tree, page, mask);
d1310b2e 5392}
d1310b2e 5393
ec29ed5b
CM
5394/*
5395 * helper function for fiemap, which doesn't want to see any holes.
5396 * This maps until we find something past 'last'
5397 */
f1bbde8d 5398static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
e3350e16 5399 u64 offset, u64 last)
ec29ed5b 5400{
f1bbde8d 5401 u64 sectorsize = btrfs_inode_sectorsize(inode);
ec29ed5b
CM
5402 struct extent_map *em;
5403 u64 len;
5404
5405 if (offset >= last)
5406 return NULL;
5407
67871254 5408 while (1) {
ec29ed5b
CM
5409 len = last - offset;
5410 if (len == 0)
5411 break;
fda2832f 5412 len = ALIGN(len, sectorsize);
f1bbde8d 5413 em = btrfs_get_extent_fiemap(inode, offset, len);
6b5b7a41 5414 if (IS_ERR(em))
ec29ed5b
CM
5415 return em;
5416
5417 /* if this isn't a hole return it */
4a2d25cd 5418 if (em->block_start != EXTENT_MAP_HOLE)
ec29ed5b 5419 return em;
ec29ed5b
CM
5420
5421 /* this is a hole, advance to the next extent */
5422 offset = extent_map_end(em);
5423 free_extent_map(em);
5424 if (offset >= last)
5425 break;
5426 }
5427 return NULL;
5428}
5429
4751832d
QW
5430/*
5431 * To cache previous fiemap extent
5432 *
5433 * Will be used for merging fiemap extent
5434 */
5435struct fiemap_cache {
5436 u64 offset;
5437 u64 phys;
5438 u64 len;
5439 u32 flags;
5440 bool cached;
5441};
5442
5443/*
5444 * Helper to submit fiemap extent.
5445 *
5446 * Will try to merge current fiemap extent specified by @offset, @phys,
5447 * @len and @flags with cached one.
5448 * And only when we fails to merge, cached one will be submitted as
5449 * fiemap extent.
5450 *
5451 * Return value is the same as fiemap_fill_next_extent().
5452 */
5453static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
5454 struct fiemap_cache *cache,
5455 u64 offset, u64 phys, u64 len, u32 flags)
5456{
5457 int ret = 0;
5458
5459 if (!cache->cached)
5460 goto assign;
5461
5462 /*
5463 * Sanity check, extent_fiemap() should have ensured that new
52042d8e 5464 * fiemap extent won't overlap with cached one.
4751832d
QW
5465 * Not recoverable.
5466 *
5467 * NOTE: Physical address can overlap, due to compression
5468 */
5469 if (cache->offset + cache->len > offset) {
5470 WARN_ON(1);
5471 return -EINVAL;
5472 }
5473
5474 /*
5475 * Only merges fiemap extents if
5476 * 1) Their logical addresses are continuous
5477 *
5478 * 2) Their physical addresses are continuous
5479 * So truly compressed (physical size smaller than logical size)
5480 * extents won't get merged with each other
5481 *
5482 * 3) Share same flags except FIEMAP_EXTENT_LAST
5483 * So regular extent won't get merged with prealloc extent
5484 */
5485 if (cache->offset + cache->len == offset &&
5486 cache->phys + cache->len == phys &&
5487 (cache->flags & ~FIEMAP_EXTENT_LAST) ==
5488 (flags & ~FIEMAP_EXTENT_LAST)) {
5489 cache->len += len;
5490 cache->flags |= flags;
5491 goto try_submit_last;
5492 }
5493
5494 /* Not mergeable, need to submit cached one */
5495 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5496 cache->len, cache->flags);
5497 cache->cached = false;
5498 if (ret)
5499 return ret;
5500assign:
5501 cache->cached = true;
5502 cache->offset = offset;
5503 cache->phys = phys;
5504 cache->len = len;
5505 cache->flags = flags;
5506try_submit_last:
5507 if (cache->flags & FIEMAP_EXTENT_LAST) {
5508 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
5509 cache->phys, cache->len, cache->flags);
5510 cache->cached = false;
5511 }
5512 return ret;
5513}
5514
5515/*
848c23b7 5516 * Emit last fiemap cache
4751832d 5517 *
848c23b7
QW
5518 * The last fiemap cache may still be cached in the following case:
5519 * 0 4k 8k
5520 * |<- Fiemap range ->|
5521 * |<------------ First extent ----------->|
5522 *
5523 * In this case, the first extent range will be cached but not emitted.
5524 * So we must emit it before ending extent_fiemap().
4751832d 5525 */
5c5aff98 5526static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
848c23b7 5527 struct fiemap_cache *cache)
4751832d
QW
5528{
5529 int ret;
5530
5531 if (!cache->cached)
5532 return 0;
5533
4751832d
QW
5534 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5535 cache->len, cache->flags);
5536 cache->cached = false;
5537 if (ret > 0)
5538 ret = 0;
5539 return ret;
5540}
5541
facee0a0 5542int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
bab16e21 5543 u64 start, u64 len)
1506fcc8 5544{
975f84fe 5545 int ret = 0;
15c7745c 5546 u64 off;
1506fcc8
YS
5547 u64 max = start + len;
5548 u32 flags = 0;
975f84fe
JB
5549 u32 found_type;
5550 u64 last;
ec29ed5b 5551 u64 last_for_get_extent = 0;
1506fcc8 5552 u64 disko = 0;
facee0a0 5553 u64 isize = i_size_read(&inode->vfs_inode);
975f84fe 5554 struct btrfs_key found_key;
1506fcc8 5555 struct extent_map *em = NULL;
2ac55d41 5556 struct extent_state *cached_state = NULL;
975f84fe 5557 struct btrfs_path *path;
facee0a0 5558 struct btrfs_root *root = inode->root;
4751832d 5559 struct fiemap_cache cache = { 0 };
5911c8fe
DS
5560 struct ulist *roots;
5561 struct ulist *tmp_ulist;
1506fcc8 5562 int end = 0;
ec29ed5b
CM
5563 u64 em_start = 0;
5564 u64 em_len = 0;
5565 u64 em_end = 0;
1506fcc8
YS
5566
5567 if (len == 0)
5568 return -EINVAL;
5569
975f84fe
JB
5570 path = btrfs_alloc_path();
5571 if (!path)
5572 return -ENOMEM;
975f84fe 5573
5911c8fe
DS
5574 roots = ulist_alloc(GFP_KERNEL);
5575 tmp_ulist = ulist_alloc(GFP_KERNEL);
5576 if (!roots || !tmp_ulist) {
5577 ret = -ENOMEM;
5578 goto out_free_ulist;
5579 }
5580
15c7745c
BB
5581 /*
5582 * We can't initialize that to 'start' as this could miss extents due
5583 * to extent item merging
5584 */
5585 off = 0;
facee0a0
NB
5586 start = round_down(start, btrfs_inode_sectorsize(inode));
5587 len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
4d479cf0 5588
ec29ed5b
CM
5589 /*
5590 * lookup the last file extent. We're not using i_size here
5591 * because there might be preallocation past i_size
5592 */
facee0a0
NB
5593 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
5594 0);
975f84fe 5595 if (ret < 0) {
5911c8fe 5596 goto out_free_ulist;
2d324f59
LB
5597 } else {
5598 WARN_ON(!ret);
5599 if (ret == 1)
5600 ret = 0;
975f84fe 5601 }
2d324f59 5602
975f84fe 5603 path->slots[0]--;
975f84fe 5604 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
962a298f 5605 found_type = found_key.type;
975f84fe 5606
ec29ed5b 5607 /* No extents, but there might be delalloc bits */
facee0a0 5608 if (found_key.objectid != btrfs_ino(inode) ||
975f84fe 5609 found_type != BTRFS_EXTENT_DATA_KEY) {
ec29ed5b
CM
5610 /* have to trust i_size as the end */
5611 last = (u64)-1;
5612 last_for_get_extent = isize;
5613 } else {
5614 /*
5615 * remember the start of the last extent. There are a
5616 * bunch of different factors that go into the length of the
5617 * extent, so its much less complex to remember where it started
5618 */
5619 last = found_key.offset;
5620 last_for_get_extent = last + 1;
975f84fe 5621 }
fe09e16c 5622 btrfs_release_path(path);
975f84fe 5623
ec29ed5b
CM
5624 /*
5625 * we might have some extents allocated but more delalloc past those
5626 * extents. so, we trust isize unless the start of the last extent is
5627 * beyond isize
5628 */
5629 if (last < isize) {
5630 last = (u64)-1;
5631 last_for_get_extent = isize;
5632 }
5633
facee0a0 5634 lock_extent_bits(&inode->io_tree, start, start + len - 1,
d0082371 5635 &cached_state);
ec29ed5b 5636
facee0a0 5637 em = get_extent_skip_holes(inode, start, last_for_get_extent);
1506fcc8
YS
5638 if (!em)
5639 goto out;
5640 if (IS_ERR(em)) {
5641 ret = PTR_ERR(em);
5642 goto out;
5643 }
975f84fe 5644
1506fcc8 5645 while (!end) {
b76bb701 5646 u64 offset_in_extent = 0;
ea8efc74
CM
5647
5648 /* break if the extent we found is outside the range */
5649 if (em->start >= max || extent_map_end(em) < off)
5650 break;
5651
5652 /*
5653 * get_extent may return an extent that starts before our
5654 * requested range. We have to make sure the ranges
5655 * we return to fiemap always move forward and don't
5656 * overlap, so adjust the offsets here
5657 */
5658 em_start = max(em->start, off);
1506fcc8 5659
ea8efc74
CM
5660 /*
5661 * record the offset from the start of the extent
b76bb701
JB
5662 * for adjusting the disk offset below. Only do this if the
5663 * extent isn't compressed since our in ram offset may be past
5664 * what we have actually allocated on disk.
ea8efc74 5665 */
b76bb701
JB
5666 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5667 offset_in_extent = em_start - em->start;
ec29ed5b 5668 em_end = extent_map_end(em);
ea8efc74 5669 em_len = em_end - em_start;
1506fcc8 5670 flags = 0;
f0986318
FM
5671 if (em->block_start < EXTENT_MAP_LAST_BYTE)
5672 disko = em->block_start + offset_in_extent;
5673 else
5674 disko = 0;
1506fcc8 5675
ea8efc74
CM
5676 /*
5677 * bump off for our next call to get_extent
5678 */
5679 off = extent_map_end(em);
5680 if (off >= max)
5681 end = 1;
5682
93dbfad7 5683 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
1506fcc8
YS
5684 end = 1;
5685 flags |= FIEMAP_EXTENT_LAST;
93dbfad7 5686 } else if (em->block_start == EXTENT_MAP_INLINE) {
1506fcc8
YS
5687 flags |= (FIEMAP_EXTENT_DATA_INLINE |
5688 FIEMAP_EXTENT_NOT_ALIGNED);
93dbfad7 5689 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
1506fcc8
YS
5690 flags |= (FIEMAP_EXTENT_DELALLOC |
5691 FIEMAP_EXTENT_UNKNOWN);
dc046b10
JB
5692 } else if (fieinfo->fi_extents_max) {
5693 u64 bytenr = em->block_start -
5694 (em->start - em->orig_start);
fe09e16c 5695
fe09e16c
LB
5696 /*
5697 * As btrfs supports shared space, this information
5698 * can be exported to userspace tools via
dc046b10
JB
5699 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
5700 * then we're just getting a count and we can skip the
5701 * lookup stuff.
fe09e16c 5702 */
facee0a0 5703 ret = btrfs_check_shared(root, btrfs_ino(inode),
5911c8fe 5704 bytenr, roots, tmp_ulist);
dc046b10 5705 if (ret < 0)
fe09e16c 5706 goto out_free;
dc046b10 5707 if (ret)
fe09e16c 5708 flags |= FIEMAP_EXTENT_SHARED;
dc046b10 5709 ret = 0;
1506fcc8
YS
5710 }
5711 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5712 flags |= FIEMAP_EXTENT_ENCODED;
0d2b2372
JB
5713 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5714 flags |= FIEMAP_EXTENT_UNWRITTEN;
1506fcc8 5715
1506fcc8
YS
5716 free_extent_map(em);
5717 em = NULL;
ec29ed5b
CM
5718 if ((em_start >= last) || em_len == (u64)-1 ||
5719 (last == (u64)-1 && isize <= em_end)) {
1506fcc8
YS
5720 flags |= FIEMAP_EXTENT_LAST;
5721 end = 1;
5722 }
5723
ec29ed5b 5724 /* now scan forward to see if this is really the last extent. */
facee0a0 5725 em = get_extent_skip_holes(inode, off, last_for_get_extent);
ec29ed5b
CM
5726 if (IS_ERR(em)) {
5727 ret = PTR_ERR(em);
5728 goto out;
5729 }
5730 if (!em) {
975f84fe
JB
5731 flags |= FIEMAP_EXTENT_LAST;
5732 end = 1;
5733 }
4751832d
QW
5734 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
5735 em_len, flags);
26e726af
CS
5736 if (ret) {
5737 if (ret == 1)
5738 ret = 0;
ec29ed5b 5739 goto out_free;
26e726af 5740 }
1506fcc8
YS
5741 }
5742out_free:
4751832d 5743 if (!ret)
5c5aff98 5744 ret = emit_last_fiemap_cache(fieinfo, &cache);
1506fcc8
YS
5745 free_extent_map(em);
5746out:
facee0a0 5747 unlock_extent_cached(&inode->io_tree, start, start + len - 1,
e43bbe5e 5748 &cached_state);
5911c8fe
DS
5749
5750out_free_ulist:
e02d48ea 5751 btrfs_free_path(path);
5911c8fe
DS
5752 ulist_free(roots);
5753 ulist_free(tmp_ulist);
1506fcc8
YS
5754 return ret;
5755}
5756
727011e0
CM
5757static void __free_extent_buffer(struct extent_buffer *eb)
5758{
727011e0
CM
5759 kmem_cache_free(extent_buffer_cache, eb);
5760}
5761
2b48966a 5762int extent_buffer_under_io(const struct extent_buffer *eb)
db7f3436
JB
5763{
5764 return (atomic_read(&eb->io_pages) ||
5765 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
5766 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5767}
5768
8ff8466d 5769static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
db7f3436 5770{
8ff8466d 5771 struct btrfs_subpage *subpage;
db7f3436 5772
8ff8466d 5773 lockdep_assert_held(&page->mapping->private_lock);
db7f3436 5774
8ff8466d
QW
5775 if (PagePrivate(page)) {
5776 subpage = (struct btrfs_subpage *)page->private;
5777 if (atomic_read(&subpage->eb_refs))
5778 return true;
3d078efa
QW
5779 /*
5780 * Even there is no eb refs here, we may still have
5781 * end_page_read() call relying on page::private.
5782 */
5783 if (atomic_read(&subpage->readers))
5784 return true;
8ff8466d
QW
5785 }
5786 return false;
5787}
db7f3436 5788
8ff8466d
QW
5789static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
5790{
5791 struct btrfs_fs_info *fs_info = eb->fs_info;
5792 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5793
5794 /*
5795 * For mapped eb, we're going to change the page private, which should
5796 * be done under the private_lock.
5797 */
5798 if (mapped)
5799 spin_lock(&page->mapping->private_lock);
5800
5801 if (!PagePrivate(page)) {
5d2361db 5802 if (mapped)
8ff8466d
QW
5803 spin_unlock(&page->mapping->private_lock);
5804 return;
5805 }
5806
5807 if (fs_info->sectorsize == PAGE_SIZE) {
5d2361db
FL
5808 /*
5809 * We do this since we'll remove the pages after we've
5810 * removed the eb from the radix tree, so we could race
5811 * and have this page now attached to the new eb. So
5812 * only clear page_private if it's still connected to
5813 * this eb.
5814 */
5815 if (PagePrivate(page) &&
5816 page->private == (unsigned long)eb) {
5817 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5818 BUG_ON(PageDirty(page));
5819 BUG_ON(PageWriteback(page));
db7f3436 5820 /*
5d2361db
FL
5821 * We need to make sure we haven't be attached
5822 * to a new eb.
db7f3436 5823 */
d1b89bc0 5824 detach_page_private(page);
db7f3436 5825 }
5d2361db
FL
5826 if (mapped)
5827 spin_unlock(&page->mapping->private_lock);
8ff8466d
QW
5828 return;
5829 }
5830
5831 /*
5832 * For subpage, we can have dummy eb with page private. In this case,
5833 * we can directly detach the private as such page is only attached to
5834 * one dummy eb, no sharing.
5835 */
5836 if (!mapped) {
5837 btrfs_detach_subpage(fs_info, page);
5838 return;
5839 }
5840
5841 btrfs_page_dec_eb_refs(fs_info, page);
5842
5843 /*
5844 * We can only detach the page private if there are no other ebs in the
3d078efa 5845 * page range and no unfinished IO.
8ff8466d
QW
5846 */
5847 if (!page_range_has_eb(fs_info, page))
5848 btrfs_detach_subpage(fs_info, page);
5849
5850 spin_unlock(&page->mapping->private_lock);
5851}
5852
5853/* Release all pages attached to the extent buffer */
5854static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
5855{
5856 int i;
5857 int num_pages;
5858
5859 ASSERT(!extent_buffer_under_io(eb));
5860
5861 num_pages = num_extent_pages(eb);
5862 for (i = 0; i < num_pages; i++) {
5863 struct page *page = eb->pages[i];
5864
5865 if (!page)
5866 continue;
5867
5868 detach_extent_buffer_page(eb, page);
5d2361db 5869
01327610 5870 /* One for when we allocated the page */
09cbfeaf 5871 put_page(page);
d64766fd 5872 }
db7f3436
JB
5873}
5874
5875/*
5876 * Helper for releasing the extent buffer.
5877 */
5878static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
5879{
55ac0139 5880 btrfs_release_extent_buffer_pages(eb);
8c38938c 5881 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
db7f3436
JB
5882 __free_extent_buffer(eb);
5883}
5884
f28491e0
JB
5885static struct extent_buffer *
5886__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
23d79d81 5887 unsigned long len)
d1310b2e
CM
5888{
5889 struct extent_buffer *eb = NULL;
5890
d1b5c567 5891 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
d1310b2e
CM
5892 eb->start = start;
5893 eb->len = len;
f28491e0 5894 eb->fs_info = fs_info;
815a51c7 5895 eb->bflags = 0;
196d59ab 5896 init_rwsem(&eb->lock);
b4ce94de 5897
3fd63727
JB
5898 btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
5899 &fs_info->allocated_ebs);
d3575156 5900 INIT_LIST_HEAD(&eb->release_list);
6d49ba1b 5901
3083ee2e 5902 spin_lock_init(&eb->refs_lock);
d1310b2e 5903 atomic_set(&eb->refs, 1);
0b32f4bb 5904 atomic_set(&eb->io_pages, 0);
727011e0 5905
deb67895 5906 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
d1310b2e
CM
5907
5908 return eb;
5909}
5910
2b48966a 5911struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
815a51c7 5912{
cc5e31a4 5913 int i;
815a51c7
JS
5914 struct page *p;
5915 struct extent_buffer *new;
cc5e31a4 5916 int num_pages = num_extent_pages(src);
815a51c7 5917
3f556f78 5918 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
815a51c7
JS
5919 if (new == NULL)
5920 return NULL;
5921
62c053fb
QW
5922 /*
5923 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
5924 * btrfs_release_extent_buffer() have different behavior for
5925 * UNMAPPED subpage extent buffer.
5926 */
5927 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
5928
815a51c7 5929 for (i = 0; i < num_pages; i++) {
760f991f
QW
5930 int ret;
5931
9ec72677 5932 p = alloc_page(GFP_NOFS);
db7f3436
JB
5933 if (!p) {
5934 btrfs_release_extent_buffer(new);
5935 return NULL;
5936 }
760f991f
QW
5937 ret = attach_extent_buffer_page(new, p, NULL);
5938 if (ret < 0) {
5939 put_page(p);
5940 btrfs_release_extent_buffer(new);
5941 return NULL;
5942 }
815a51c7 5943 WARN_ON(PageDirty(p));
815a51c7 5944 new->pages[i] = p;
fba1acf9 5945 copy_page(page_address(p), page_address(src->pages[i]));
815a51c7 5946 }
92d83e94 5947 set_extent_buffer_uptodate(new);
815a51c7
JS
5948
5949 return new;
5950}
5951
0f331229
OS
5952struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5953 u64 start, unsigned long len)
815a51c7
JS
5954{
5955 struct extent_buffer *eb;
cc5e31a4
DS
5956 int num_pages;
5957 int i;
815a51c7 5958
3f556f78 5959 eb = __alloc_extent_buffer(fs_info, start, len);
815a51c7
JS
5960 if (!eb)
5961 return NULL;
5962
65ad0104 5963 num_pages = num_extent_pages(eb);
815a51c7 5964 for (i = 0; i < num_pages; i++) {
09bc1f0f
QW
5965 int ret;
5966
9ec72677 5967 eb->pages[i] = alloc_page(GFP_NOFS);
815a51c7
JS
5968 if (!eb->pages[i])
5969 goto err;
09bc1f0f
QW
5970 ret = attach_extent_buffer_page(eb, eb->pages[i], NULL);
5971 if (ret < 0)
5972 goto err;
815a51c7
JS
5973 }
5974 set_extent_buffer_uptodate(eb);
5975 btrfs_set_header_nritems(eb, 0);
b0132a3b 5976 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
815a51c7
JS
5977
5978 return eb;
5979err:
09bc1f0f
QW
5980 for (; i > 0; i--) {
5981 detach_extent_buffer_page(eb, eb->pages[i - 1]);
84167d19 5982 __free_page(eb->pages[i - 1]);
09bc1f0f 5983 }
815a51c7
JS
5984 __free_extent_buffer(eb);
5985 return NULL;
5986}
5987
0f331229 5988struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
da17066c 5989 u64 start)
0f331229 5990{
da17066c 5991 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
0f331229
OS
5992}
5993
0b32f4bb
JB
5994static void check_buffer_tree_ref(struct extent_buffer *eb)
5995{
242e18c7 5996 int refs;
6bf9cd2e
BB
5997 /*
5998 * The TREE_REF bit is first set when the extent_buffer is added
5999 * to the radix tree. It is also reset, if unset, when a new reference
6000 * is created by find_extent_buffer.
0b32f4bb 6001 *
6bf9cd2e
BB
6002 * It is only cleared in two cases: freeing the last non-tree
6003 * reference to the extent_buffer when its STALE bit is set or
6004 * calling releasepage when the tree reference is the only reference.
0b32f4bb 6005 *
6bf9cd2e
BB
6006 * In both cases, care is taken to ensure that the extent_buffer's
6007 * pages are not under io. However, releasepage can be concurrently
6008 * called with creating new references, which is prone to race
6009 * conditions between the calls to check_buffer_tree_ref in those
6010 * codepaths and clearing TREE_REF in try_release_extent_buffer.
0b32f4bb 6011 *
6bf9cd2e
BB
6012 * The actual lifetime of the extent_buffer in the radix tree is
6013 * adequately protected by the refcount, but the TREE_REF bit and
6014 * its corresponding reference are not. To protect against this
6015 * class of races, we call check_buffer_tree_ref from the codepaths
6016 * which trigger io after they set eb->io_pages. Note that once io is
6017 * initiated, TREE_REF can no longer be cleared, so that is the
6018 * moment at which any such race is best fixed.
0b32f4bb 6019 */
242e18c7
CM
6020 refs = atomic_read(&eb->refs);
6021 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6022 return;
6023
594831c4
JB
6024 spin_lock(&eb->refs_lock);
6025 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
0b32f4bb 6026 atomic_inc(&eb->refs);
594831c4 6027 spin_unlock(&eb->refs_lock);
0b32f4bb
JB
6028}
6029
2457aec6
MG
6030static void mark_extent_buffer_accessed(struct extent_buffer *eb,
6031 struct page *accessed)
5df4235e 6032{
cc5e31a4 6033 int num_pages, i;
5df4235e 6034
0b32f4bb
JB
6035 check_buffer_tree_ref(eb);
6036
65ad0104 6037 num_pages = num_extent_pages(eb);
5df4235e 6038 for (i = 0; i < num_pages; i++) {
fb85fc9a
DS
6039 struct page *p = eb->pages[i];
6040
2457aec6
MG
6041 if (p != accessed)
6042 mark_page_accessed(p);
5df4235e
JB
6043 }
6044}
6045
f28491e0
JB
6046struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
6047 u64 start)
452c75c3
CS
6048{
6049 struct extent_buffer *eb;
6050
2f3186d8
QW
6051 eb = find_extent_buffer_nolock(fs_info, start);
6052 if (!eb)
6053 return NULL;
6054 /*
6055 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
6056 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
6057 * another task running free_extent_buffer() might have seen that flag
6058 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
6059 * writeback flags not set) and it's still in the tree (flag
6060 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
6061 * decrementing the extent buffer's reference count twice. So here we
6062 * could race and increment the eb's reference count, clear its stale
6063 * flag, mark it as dirty and drop our reference before the other task
6064 * finishes executing free_extent_buffer, which would later result in
6065 * an attempt to free an extent buffer that is dirty.
6066 */
6067 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
6068 spin_lock(&eb->refs_lock);
6069 spin_unlock(&eb->refs_lock);
452c75c3 6070 }
2f3186d8
QW
6071 mark_extent_buffer_accessed(eb, NULL);
6072 return eb;
452c75c3
CS
6073}
6074
faa2dbf0
JB
6075#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
6076struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
da17066c 6077 u64 start)
faa2dbf0
JB
6078{
6079 struct extent_buffer *eb, *exists = NULL;
6080 int ret;
6081
6082 eb = find_extent_buffer(fs_info, start);
6083 if (eb)
6084 return eb;
da17066c 6085 eb = alloc_dummy_extent_buffer(fs_info, start);
faa2dbf0 6086 if (!eb)
b6293c82 6087 return ERR_PTR(-ENOMEM);
faa2dbf0
JB
6088 eb->fs_info = fs_info;
6089again:
e1860a77 6090 ret = radix_tree_preload(GFP_NOFS);
b6293c82
DC
6091 if (ret) {
6092 exists = ERR_PTR(ret);
faa2dbf0 6093 goto free_eb;
b6293c82 6094 }
faa2dbf0
JB
6095 spin_lock(&fs_info->buffer_lock);
6096 ret = radix_tree_insert(&fs_info->buffer_radix,
478ef886 6097 start >> fs_info->sectorsize_bits, eb);
faa2dbf0
JB
6098 spin_unlock(&fs_info->buffer_lock);
6099 radix_tree_preload_end();
6100 if (ret == -EEXIST) {
6101 exists = find_extent_buffer(fs_info, start);
6102 if (exists)
6103 goto free_eb;
6104 else
6105 goto again;
6106 }
6107 check_buffer_tree_ref(eb);
6108 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
6109
faa2dbf0
JB
6110 return eb;
6111free_eb:
6112 btrfs_release_extent_buffer(eb);
6113 return exists;
6114}
6115#endif
6116
81982210
QW
6117static struct extent_buffer *grab_extent_buffer(
6118 struct btrfs_fs_info *fs_info, struct page *page)
c0f0a9e7
QW
6119{
6120 struct extent_buffer *exists;
6121
81982210
QW
6122 /*
6123 * For subpage case, we completely rely on radix tree to ensure we
6124 * don't try to insert two ebs for the same bytenr. So here we always
6125 * return NULL and just continue.
6126 */
6127 if (fs_info->sectorsize < PAGE_SIZE)
6128 return NULL;
6129
c0f0a9e7
QW
6130 /* Page not yet attached to an extent buffer */
6131 if (!PagePrivate(page))
6132 return NULL;
6133
6134 /*
6135 * We could have already allocated an eb for this page and attached one
6136 * so lets see if we can get a ref on the existing eb, and if we can we
6137 * know it's good and we can just return that one, else we know we can
6138 * just overwrite page->private.
6139 */
6140 exists = (struct extent_buffer *)page->private;
6141 if (atomic_inc_not_zero(&exists->refs))
6142 return exists;
6143
6144 WARN_ON(PageDirty(page));
6145 detach_page_private(page);
6146 return NULL;
6147}
6148
f28491e0 6149struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3fbaf258 6150 u64 start, u64 owner_root, int level)
d1310b2e 6151{
da17066c 6152 unsigned long len = fs_info->nodesize;
cc5e31a4
DS
6153 int num_pages;
6154 int i;
09cbfeaf 6155 unsigned long index = start >> PAGE_SHIFT;
d1310b2e 6156 struct extent_buffer *eb;
6af118ce 6157 struct extent_buffer *exists = NULL;
d1310b2e 6158 struct page *p;
f28491e0 6159 struct address_space *mapping = fs_info->btree_inode->i_mapping;
d1310b2e 6160 int uptodate = 1;
19fe0a8b 6161 int ret;
d1310b2e 6162
da17066c 6163 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
c871b0f2
LB
6164 btrfs_err(fs_info, "bad tree block start %llu", start);
6165 return ERR_PTR(-EINVAL);
6166 }
6167
e9306ad4
QW
6168#if BITS_PER_LONG == 32
6169 if (start >= MAX_LFS_FILESIZE) {
6170 btrfs_err_rl(fs_info,
6171 "extent buffer %llu is beyond 32bit page cache limit", start);
6172 btrfs_err_32bit_limit(fs_info);
6173 return ERR_PTR(-EOVERFLOW);
6174 }
6175 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
6176 btrfs_warn_32bit_limit(fs_info);
6177#endif
6178
1aaac38c
QW
6179 if (fs_info->sectorsize < PAGE_SIZE &&
6180 offset_in_page(start) + len > PAGE_SIZE) {
6181 btrfs_err(fs_info,
6182 "tree block crosses page boundary, start %llu nodesize %lu",
6183 start, len);
6184 return ERR_PTR(-EINVAL);
6185 }
6186
f28491e0 6187 eb = find_extent_buffer(fs_info, start);
452c75c3 6188 if (eb)
6af118ce 6189 return eb;
6af118ce 6190
23d79d81 6191 eb = __alloc_extent_buffer(fs_info, start, len);
2b114d1d 6192 if (!eb)
c871b0f2 6193 return ERR_PTR(-ENOMEM);
e114c545 6194 btrfs_set_buffer_lockdep_class(owner_root, eb, level);
d1310b2e 6195
65ad0104 6196 num_pages = num_extent_pages(eb);
727011e0 6197 for (i = 0; i < num_pages; i++, index++) {
760f991f
QW
6198 struct btrfs_subpage *prealloc = NULL;
6199
d1b5c567 6200 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
c871b0f2
LB
6201 if (!p) {
6202 exists = ERR_PTR(-ENOMEM);
6af118ce 6203 goto free_eb;
c871b0f2 6204 }
4f2de97a 6205
760f991f
QW
6206 /*
6207 * Preallocate page->private for subpage case, so that we won't
6208 * allocate memory with private_lock hold. The memory will be
6209 * freed by attach_extent_buffer_page() or freed manually if
6210 * we exit earlier.
6211 *
6212 * Although we have ensured one subpage eb can only have one
6213 * page, but it may change in the future for 16K page size
6214 * support, so we still preallocate the memory in the loop.
6215 */
fdf250db 6216 if (fs_info->sectorsize < PAGE_SIZE) {
651fb419
QW
6217 prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
6218 if (IS_ERR(prealloc)) {
6219 ret = PTR_ERR(prealloc);
fdf250db
QW
6220 unlock_page(p);
6221 put_page(p);
6222 exists = ERR_PTR(ret);
6223 goto free_eb;
6224 }
760f991f
QW
6225 }
6226
4f2de97a 6227 spin_lock(&mapping->private_lock);
81982210 6228 exists = grab_extent_buffer(fs_info, p);
c0f0a9e7
QW
6229 if (exists) {
6230 spin_unlock(&mapping->private_lock);
6231 unlock_page(p);
6232 put_page(p);
6233 mark_extent_buffer_accessed(exists, p);
760f991f 6234 btrfs_free_subpage(prealloc);
c0f0a9e7 6235 goto free_eb;
d1310b2e 6236 }
760f991f
QW
6237 /* Should not fail, as we have preallocated the memory */
6238 ret = attach_extent_buffer_page(eb, p, prealloc);
6239 ASSERT(!ret);
8ff8466d
QW
6240 /*
6241 * To inform we have extra eb under allocation, so that
6242 * detach_extent_buffer_page() won't release the page private
6243 * when the eb hasn't yet been inserted into radix tree.
6244 *
6245 * The ref will be decreased when the eb released the page, in
6246 * detach_extent_buffer_page().
6247 * Thus needs no special handling in error path.
6248 */
6249 btrfs_page_inc_eb_refs(fs_info, p);
4f2de97a 6250 spin_unlock(&mapping->private_lock);
760f991f 6251
1e5eb3d6 6252 WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
727011e0 6253 eb->pages[i] = p;
d1310b2e
CM
6254 if (!PageUptodate(p))
6255 uptodate = 0;
eb14ab8e
CM
6256
6257 /*
b16d011e
NB
6258 * We can't unlock the pages just yet since the extent buffer
6259 * hasn't been properly inserted in the radix tree, this
6260 * opens a race with btree_releasepage which can free a page
6261 * while we are still filling in all pages for the buffer and
6262 * we could crash.
eb14ab8e 6263 */
d1310b2e
CM
6264 }
6265 if (uptodate)
b4ce94de 6266 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
115391d2 6267again:
e1860a77 6268 ret = radix_tree_preload(GFP_NOFS);
c871b0f2
LB
6269 if (ret) {
6270 exists = ERR_PTR(ret);
19fe0a8b 6271 goto free_eb;
c871b0f2 6272 }
19fe0a8b 6273
f28491e0
JB
6274 spin_lock(&fs_info->buffer_lock);
6275 ret = radix_tree_insert(&fs_info->buffer_radix,
478ef886 6276 start >> fs_info->sectorsize_bits, eb);
f28491e0 6277 spin_unlock(&fs_info->buffer_lock);
452c75c3 6278 radix_tree_preload_end();
19fe0a8b 6279 if (ret == -EEXIST) {
f28491e0 6280 exists = find_extent_buffer(fs_info, start);
452c75c3
CS
6281 if (exists)
6282 goto free_eb;
6283 else
115391d2 6284 goto again;
6af118ce 6285 }
6af118ce 6286 /* add one reference for the tree */
0b32f4bb 6287 check_buffer_tree_ref(eb);
34b41ace 6288 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
eb14ab8e
CM
6289
6290 /*
b16d011e
NB
6291 * Now it's safe to unlock the pages because any calls to
6292 * btree_releasepage will correctly detect that a page belongs to a
6293 * live buffer and won't free them prematurely.
eb14ab8e 6294 */
28187ae5
NB
6295 for (i = 0; i < num_pages; i++)
6296 unlock_page(eb->pages[i]);
d1310b2e
CM
6297 return eb;
6298
6af118ce 6299free_eb:
5ca64f45 6300 WARN_ON(!atomic_dec_and_test(&eb->refs));
727011e0
CM
6301 for (i = 0; i < num_pages; i++) {
6302 if (eb->pages[i])
6303 unlock_page(eb->pages[i]);
6304 }
eb14ab8e 6305
897ca6e9 6306 btrfs_release_extent_buffer(eb);
6af118ce 6307 return exists;
d1310b2e 6308}
d1310b2e 6309
3083ee2e
JB
6310static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
6311{
6312 struct extent_buffer *eb =
6313 container_of(head, struct extent_buffer, rcu_head);
6314
6315 __free_extent_buffer(eb);
6316}
6317
f7a52a40 6318static int release_extent_buffer(struct extent_buffer *eb)
5ce48d0f 6319 __releases(&eb->refs_lock)
3083ee2e 6320{
07e21c4d
NB
6321 lockdep_assert_held(&eb->refs_lock);
6322
3083ee2e
JB
6323 WARN_ON(atomic_read(&eb->refs) == 0);
6324 if (atomic_dec_and_test(&eb->refs)) {
34b41ace 6325 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
f28491e0 6326 struct btrfs_fs_info *fs_info = eb->fs_info;
3083ee2e 6327
815a51c7 6328 spin_unlock(&eb->refs_lock);
3083ee2e 6329
f28491e0
JB
6330 spin_lock(&fs_info->buffer_lock);
6331 radix_tree_delete(&fs_info->buffer_radix,
478ef886 6332 eb->start >> fs_info->sectorsize_bits);
f28491e0 6333 spin_unlock(&fs_info->buffer_lock);
34b41ace
JB
6334 } else {
6335 spin_unlock(&eb->refs_lock);
815a51c7 6336 }
3083ee2e 6337
8c38938c 6338 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
3083ee2e 6339 /* Should be safe to release our pages at this point */
55ac0139 6340 btrfs_release_extent_buffer_pages(eb);
bcb7e449 6341#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
b0132a3b 6342 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
bcb7e449
JB
6343 __free_extent_buffer(eb);
6344 return 1;
6345 }
6346#endif
3083ee2e 6347 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
e64860aa 6348 return 1;
3083ee2e
JB
6349 }
6350 spin_unlock(&eb->refs_lock);
e64860aa
JB
6351
6352 return 0;
3083ee2e
JB
6353}
6354
d1310b2e
CM
6355void free_extent_buffer(struct extent_buffer *eb)
6356{
242e18c7
CM
6357 int refs;
6358 int old;
d1310b2e
CM
6359 if (!eb)
6360 return;
6361
242e18c7
CM
6362 while (1) {
6363 refs = atomic_read(&eb->refs);
46cc775e
NB
6364 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
6365 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
6366 refs == 1))
242e18c7
CM
6367 break;
6368 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
6369 if (old == refs)
6370 return;
6371 }
6372
3083ee2e
JB
6373 spin_lock(&eb->refs_lock);
6374 if (atomic_read(&eb->refs) == 2 &&
6375 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
0b32f4bb 6376 !extent_buffer_under_io(eb) &&
3083ee2e
JB
6377 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6378 atomic_dec(&eb->refs);
6379
6380 /*
6381 * I know this is terrible, but it's temporary until we stop tracking
6382 * the uptodate bits and such for the extent buffers.
6383 */
f7a52a40 6384 release_extent_buffer(eb);
3083ee2e
JB
6385}
6386
6387void free_extent_buffer_stale(struct extent_buffer *eb)
6388{
6389 if (!eb)
d1310b2e
CM
6390 return;
6391
3083ee2e
JB
6392 spin_lock(&eb->refs_lock);
6393 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
6394
0b32f4bb 6395 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3083ee2e
JB
6396 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6397 atomic_dec(&eb->refs);
f7a52a40 6398 release_extent_buffer(eb);
d1310b2e 6399}
d1310b2e 6400
0d27797e
QW
6401static void btree_clear_page_dirty(struct page *page)
6402{
6403 ASSERT(PageDirty(page));
6404 ASSERT(PageLocked(page));
6405 clear_page_dirty_for_io(page);
6406 xa_lock_irq(&page->mapping->i_pages);
6407 if (!PageDirty(page))
6408 __xa_clear_mark(&page->mapping->i_pages,
6409 page_index(page), PAGECACHE_TAG_DIRTY);
6410 xa_unlock_irq(&page->mapping->i_pages);
6411}
6412
6413static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
6414{
6415 struct btrfs_fs_info *fs_info = eb->fs_info;
6416 struct page *page = eb->pages[0];
6417 bool last;
6418
6419 /* btree_clear_page_dirty() needs page locked */
6420 lock_page(page);
6421 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
6422 eb->len);
6423 if (last)
6424 btree_clear_page_dirty(page);
6425 unlock_page(page);
6426 WARN_ON(atomic_read(&eb->refs) == 0);
6427}
6428
2b48966a 6429void clear_extent_buffer_dirty(const struct extent_buffer *eb)
d1310b2e 6430{
cc5e31a4
DS
6431 int i;
6432 int num_pages;
d1310b2e
CM
6433 struct page *page;
6434
0d27797e
QW
6435 if (eb->fs_info->sectorsize < PAGE_SIZE)
6436 return clear_subpage_extent_buffer_dirty(eb);
6437
65ad0104 6438 num_pages = num_extent_pages(eb);
d1310b2e
CM
6439
6440 for (i = 0; i < num_pages; i++) {
fb85fc9a 6441 page = eb->pages[i];
b9473439 6442 if (!PageDirty(page))
d2c3f4f6 6443 continue;
a61e6f29 6444 lock_page(page);
0d27797e 6445 btree_clear_page_dirty(page);
bf0da8c1 6446 ClearPageError(page);
a61e6f29 6447 unlock_page(page);
d1310b2e 6448 }
0b32f4bb 6449 WARN_ON(atomic_read(&eb->refs) == 0);
d1310b2e 6450}
d1310b2e 6451
abb57ef3 6452bool set_extent_buffer_dirty(struct extent_buffer *eb)
d1310b2e 6453{
cc5e31a4
DS
6454 int i;
6455 int num_pages;
abb57ef3 6456 bool was_dirty;
d1310b2e 6457
0b32f4bb
JB
6458 check_buffer_tree_ref(eb);
6459
b9473439 6460 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
0b32f4bb 6461
65ad0104 6462 num_pages = num_extent_pages(eb);
3083ee2e 6463 WARN_ON(atomic_read(&eb->refs) == 0);
0b32f4bb
JB
6464 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
6465
0d27797e
QW
6466 if (!was_dirty) {
6467 bool subpage = eb->fs_info->sectorsize < PAGE_SIZE;
51995c39 6468
0d27797e
QW
6469 /*
6470 * For subpage case, we can have other extent buffers in the
6471 * same page, and in clear_subpage_extent_buffer_dirty() we
6472 * have to clear page dirty without subpage lock held.
6473 * This can cause race where our page gets dirty cleared after
6474 * we just set it.
6475 *
6476 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
6477 * its page for other reasons, we can use page lock to prevent
6478 * the above race.
6479 */
6480 if (subpage)
6481 lock_page(eb->pages[0]);
6482 for (i = 0; i < num_pages; i++)
6483 btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
6484 eb->start, eb->len);
6485 if (subpage)
6486 unlock_page(eb->pages[0]);
6487 }
51995c39
LB
6488#ifdef CONFIG_BTRFS_DEBUG
6489 for (i = 0; i < num_pages; i++)
6490 ASSERT(PageDirty(eb->pages[i]));
6491#endif
6492
b9473439 6493 return was_dirty;
d1310b2e 6494}
d1310b2e 6495
69ba3927 6496void clear_extent_buffer_uptodate(struct extent_buffer *eb)
1259ab75 6497{
251f2acc 6498 struct btrfs_fs_info *fs_info = eb->fs_info;
1259ab75 6499 struct page *page;
cc5e31a4 6500 int num_pages;
251f2acc 6501 int i;
1259ab75 6502
b4ce94de 6503 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
65ad0104 6504 num_pages = num_extent_pages(eb);
1259ab75 6505 for (i = 0; i < num_pages; i++) {
fb85fc9a 6506 page = eb->pages[i];
33958dc6 6507 if (page)
251f2acc
QW
6508 btrfs_page_clear_uptodate(fs_info, page,
6509 eb->start, eb->len);
1259ab75 6510 }
1259ab75
CM
6511}
6512
09c25a8c 6513void set_extent_buffer_uptodate(struct extent_buffer *eb)
d1310b2e 6514{
251f2acc 6515 struct btrfs_fs_info *fs_info = eb->fs_info;
d1310b2e 6516 struct page *page;
cc5e31a4 6517 int num_pages;
251f2acc 6518 int i;
d1310b2e 6519
0b32f4bb 6520 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
65ad0104 6521 num_pages = num_extent_pages(eb);
d1310b2e 6522 for (i = 0; i < num_pages; i++) {
fb85fc9a 6523 page = eb->pages[i];
251f2acc 6524 btrfs_page_set_uptodate(fs_info, page, eb->start, eb->len);
d1310b2e 6525 }
d1310b2e 6526}
d1310b2e 6527
4012daf7
QW
6528static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
6529 int mirror_num)
6530{
6531 struct btrfs_fs_info *fs_info = eb->fs_info;
6532 struct extent_io_tree *io_tree;
6533 struct page *page = eb->pages[0];
390ed29b 6534 struct btrfs_bio_ctrl bio_ctrl = { 0 };
4012daf7
QW
6535 int ret = 0;
6536
6537 ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
6538 ASSERT(PagePrivate(page));
6539 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
6540
6541 if (wait == WAIT_NONE) {
dc56219f
GR
6542 if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
6543 return -EAGAIN;
4012daf7
QW
6544 } else {
6545 ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
6546 if (ret < 0)
6547 return ret;
6548 }
6549
6550 ret = 0;
6551 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) ||
6552 PageUptodate(page) ||
6553 btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
6554 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6555 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1);
6556 return ret;
6557 }
6558
6559 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
6560 eb->read_mirror = 0;
6561 atomic_set(&eb->io_pages, 1);
6562 check_buffer_tree_ref(eb);
6563 btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
6564
3d078efa 6565 btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len);
390ed29b
QW
6566 ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, &bio_ctrl,
6567 page, eb->start, eb->len,
6568 eb->start - page_offset(page),
6569 end_bio_extent_readpage, mirror_num, 0,
4012daf7
QW
6570 true);
6571 if (ret) {
6572 /*
6573 * In the endio function, if we hit something wrong we will
6574 * increase the io_pages, so here we need to decrease it for
6575 * error path.
6576 */
6577 atomic_dec(&eb->io_pages);
6578 }
390ed29b 6579 if (bio_ctrl.bio) {
4012daf7
QW
6580 int tmp;
6581
390ed29b
QW
6582 tmp = submit_one_bio(bio_ctrl.bio, mirror_num, 0);
6583 bio_ctrl.bio = NULL;
4012daf7
QW
6584 if (tmp < 0)
6585 return tmp;
6586 }
6587 if (ret || wait != WAIT_COMPLETE)
6588 return ret;
6589
6590 wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED);
6591 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
6592 ret = -EIO;
6593 return ret;
6594}
6595
c2ccfbc6 6596int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
d1310b2e 6597{
cc5e31a4 6598 int i;
d1310b2e
CM
6599 struct page *page;
6600 int err;
6601 int ret = 0;
ce9adaa5
CM
6602 int locked_pages = 0;
6603 int all_uptodate = 1;
cc5e31a4 6604 int num_pages;
727011e0 6605 unsigned long num_reads = 0;
390ed29b 6606 struct btrfs_bio_ctrl bio_ctrl = { 0 };
a86c12c7 6607
b4ce94de 6608 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
d1310b2e
CM
6609 return 0;
6610
651740a5
JB
6611 /*
6612 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
6613 * operation, which could potentially still be in flight. In this case
6614 * we simply want to return an error.
6615 */
6616 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
6617 return -EIO;
6618
4012daf7
QW
6619 if (eb->fs_info->sectorsize < PAGE_SIZE)
6620 return read_extent_buffer_subpage(eb, wait, mirror_num);
6621
65ad0104 6622 num_pages = num_extent_pages(eb);
8436ea91 6623 for (i = 0; i < num_pages; i++) {
fb85fc9a 6624 page = eb->pages[i];
bb82ab88 6625 if (wait == WAIT_NONE) {
2c4d8cb7
QW
6626 /*
6627 * WAIT_NONE is only utilized by readahead. If we can't
6628 * acquire the lock atomically it means either the eb
6629 * is being read out or under modification.
6630 * Either way the eb will be or has been cached,
6631 * readahead can exit safely.
6632 */
2db04966 6633 if (!trylock_page(page))
ce9adaa5 6634 goto unlock_exit;
d1310b2e
CM
6635 } else {
6636 lock_page(page);
6637 }
ce9adaa5 6638 locked_pages++;
2571e739
LB
6639 }
6640 /*
6641 * We need to firstly lock all pages to make sure that
6642 * the uptodate bit of our pages won't be affected by
6643 * clear_extent_buffer_uptodate().
6644 */
8436ea91 6645 for (i = 0; i < num_pages; i++) {
2571e739 6646 page = eb->pages[i];
727011e0
CM
6647 if (!PageUptodate(page)) {
6648 num_reads++;
ce9adaa5 6649 all_uptodate = 0;
727011e0 6650 }
ce9adaa5 6651 }
2571e739 6652
ce9adaa5 6653 if (all_uptodate) {
8436ea91 6654 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
ce9adaa5
CM
6655 goto unlock_exit;
6656 }
6657
656f30db 6658 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5cf1ab56 6659 eb->read_mirror = 0;
0b32f4bb 6660 atomic_set(&eb->io_pages, num_reads);
6bf9cd2e
BB
6661 /*
6662 * It is possible for releasepage to clear the TREE_REF bit before we
6663 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
6664 */
6665 check_buffer_tree_ref(eb);
8436ea91 6666 for (i = 0; i < num_pages; i++) {
fb85fc9a 6667 page = eb->pages[i];
baf863b9 6668
ce9adaa5 6669 if (!PageUptodate(page)) {
baf863b9
LB
6670 if (ret) {
6671 atomic_dec(&eb->io_pages);
6672 unlock_page(page);
6673 continue;
6674 }
6675
f188591e 6676 ClearPageError(page);
0420177c 6677 err = submit_extent_page(REQ_OP_READ | REQ_META, NULL,
390ed29b
QW
6678 &bio_ctrl, page, page_offset(page),
6679 PAGE_SIZE, 0, end_bio_extent_readpage,
6680 mirror_num, 0, false);
baf863b9 6681 if (err) {
baf863b9 6682 /*
0420177c
NB
6683 * We failed to submit the bio so it's the
6684 * caller's responsibility to perform cleanup
6685 * i.e unlock page/set error bit.
baf863b9 6686 */
0420177c
NB
6687 ret = err;
6688 SetPageError(page);
6689 unlock_page(page);
baf863b9
LB
6690 atomic_dec(&eb->io_pages);
6691 }
d1310b2e
CM
6692 } else {
6693 unlock_page(page);
6694 }
6695 }
6696
390ed29b
QW
6697 if (bio_ctrl.bio) {
6698 err = submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.bio_flags);
6699 bio_ctrl.bio = NULL;
79787eaa
JM
6700 if (err)
6701 return err;
355808c2 6702 }
a86c12c7 6703
bb82ab88 6704 if (ret || wait != WAIT_COMPLETE)
d1310b2e 6705 return ret;
d397712b 6706
8436ea91 6707 for (i = 0; i < num_pages; i++) {
fb85fc9a 6708 page = eb->pages[i];
d1310b2e 6709 wait_on_page_locked(page);
d397712b 6710 if (!PageUptodate(page))
d1310b2e 6711 ret = -EIO;
d1310b2e 6712 }
d397712b 6713
d1310b2e 6714 return ret;
ce9adaa5
CM
6715
6716unlock_exit:
d397712b 6717 while (locked_pages > 0) {
ce9adaa5 6718 locked_pages--;
8436ea91
JB
6719 page = eb->pages[locked_pages];
6720 unlock_page(page);
ce9adaa5
CM
6721 }
6722 return ret;
d1310b2e 6723}
d1310b2e 6724
f98b6215
QW
6725static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
6726 unsigned long len)
6727{
6728 btrfs_warn(eb->fs_info,
6729 "access to eb bytenr %llu len %lu out of range start %lu len %lu",
6730 eb->start, eb->len, start, len);
6731 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
6732
6733 return true;
6734}
6735
6736/*
6737 * Check if the [start, start + len) range is valid before reading/writing
6738 * the eb.
6739 * NOTE: @start and @len are offset inside the eb, not logical address.
6740 *
6741 * Caller should not touch the dst/src memory if this function returns error.
6742 */
6743static inline int check_eb_range(const struct extent_buffer *eb,
6744 unsigned long start, unsigned long len)
6745{
6746 unsigned long offset;
6747
6748 /* start, start + len should not go beyond eb->len nor overflow */
6749 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
6750 return report_eb_range(eb, start, len);
6751
6752 return false;
6753}
6754
1cbb1f45
JM
6755void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
6756 unsigned long start, unsigned long len)
d1310b2e
CM
6757{
6758 size_t cur;
6759 size_t offset;
6760 struct page *page;
6761 char *kaddr;
6762 char *dst = (char *)dstv;
884b07d0 6763 unsigned long i = get_eb_page_index(start);
d1310b2e 6764
f98b6215 6765 if (check_eb_range(eb, start, len))
f716abd5 6766 return;
d1310b2e 6767
884b07d0 6768 offset = get_eb_offset_in_page(eb, start);
d1310b2e 6769
d397712b 6770 while (len > 0) {
fb85fc9a 6771 page = eb->pages[i];
d1310b2e 6772
09cbfeaf 6773 cur = min(len, (PAGE_SIZE - offset));
a6591715 6774 kaddr = page_address(page);
d1310b2e 6775 memcpy(dst, kaddr + offset, cur);
d1310b2e
CM
6776
6777 dst += cur;
6778 len -= cur;
6779 offset = 0;
6780 i++;
6781 }
6782}
d1310b2e 6783
a48b73ec
JB
6784int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
6785 void __user *dstv,
6786 unsigned long start, unsigned long len)
550ac1d8
GH
6787{
6788 size_t cur;
6789 size_t offset;
6790 struct page *page;
6791 char *kaddr;
6792 char __user *dst = (char __user *)dstv;
884b07d0 6793 unsigned long i = get_eb_page_index(start);
550ac1d8
GH
6794 int ret = 0;
6795
6796 WARN_ON(start > eb->len);
6797 WARN_ON(start + len > eb->start + eb->len);
6798
884b07d0 6799 offset = get_eb_offset_in_page(eb, start);
550ac1d8
GH
6800
6801 while (len > 0) {
fb85fc9a 6802 page = eb->pages[i];
550ac1d8 6803
09cbfeaf 6804 cur = min(len, (PAGE_SIZE - offset));
550ac1d8 6805 kaddr = page_address(page);
a48b73ec 6806 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
550ac1d8
GH
6807 ret = -EFAULT;
6808 break;
6809 }
6810
6811 dst += cur;
6812 len -= cur;
6813 offset = 0;
6814 i++;
6815 }
6816
6817 return ret;
6818}
6819
1cbb1f45
JM
6820int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
6821 unsigned long start, unsigned long len)
d1310b2e
CM
6822{
6823 size_t cur;
6824 size_t offset;
6825 struct page *page;
6826 char *kaddr;
6827 char *ptr = (char *)ptrv;
884b07d0 6828 unsigned long i = get_eb_page_index(start);
d1310b2e
CM
6829 int ret = 0;
6830
f98b6215
QW
6831 if (check_eb_range(eb, start, len))
6832 return -EINVAL;
d1310b2e 6833
884b07d0 6834 offset = get_eb_offset_in_page(eb, start);
d1310b2e 6835
d397712b 6836 while (len > 0) {
fb85fc9a 6837 page = eb->pages[i];
d1310b2e 6838
09cbfeaf 6839 cur = min(len, (PAGE_SIZE - offset));
d1310b2e 6840
a6591715 6841 kaddr = page_address(page);
d1310b2e 6842 ret = memcmp(ptr, kaddr + offset, cur);
d1310b2e
CM
6843 if (ret)
6844 break;
6845
6846 ptr += cur;
6847 len -= cur;
6848 offset = 0;
6849 i++;
6850 }
6851 return ret;
6852}
d1310b2e 6853
b8f95771
QW
6854/*
6855 * Check that the extent buffer is uptodate.
6856 *
6857 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
6858 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
6859 */
6860static void assert_eb_page_uptodate(const struct extent_buffer *eb,
6861 struct page *page)
6862{
6863 struct btrfs_fs_info *fs_info = eb->fs_info;
6864
a50e1fcb
JB
6865 /*
6866 * If we are using the commit root we could potentially clear a page
6867 * Uptodate while we're using the extent buffer that we've previously
6868 * looked up. We don't want to complain in this case, as the page was
6869 * valid before, we just didn't write it out. Instead we want to catch
6870 * the case where we didn't actually read the block properly, which
6871 * would have !PageUptodate && !PageError, as we clear PageError before
6872 * reading.
6873 */
b8f95771 6874 if (fs_info->sectorsize < PAGE_SIZE) {
a50e1fcb 6875 bool uptodate, error;
b8f95771
QW
6876
6877 uptodate = btrfs_subpage_test_uptodate(fs_info, page,
6878 eb->start, eb->len);
a50e1fcb
JB
6879 error = btrfs_subpage_test_error(fs_info, page, eb->start, eb->len);
6880 WARN_ON(!uptodate && !error);
b8f95771 6881 } else {
a50e1fcb 6882 WARN_ON(!PageUptodate(page) && !PageError(page));
b8f95771
QW
6883 }
6884}
6885
2b48966a 6886void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
f157bf76
DS
6887 const void *srcv)
6888{
6889 char *kaddr;
6890
b8f95771 6891 assert_eb_page_uptodate(eb, eb->pages[0]);
24880be5
DS
6892 kaddr = page_address(eb->pages[0]) +
6893 get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
6894 chunk_tree_uuid));
6895 memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
f157bf76
DS
6896}
6897
2b48966a 6898void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
f157bf76
DS
6899{
6900 char *kaddr;
6901
b8f95771 6902 assert_eb_page_uptodate(eb, eb->pages[0]);
24880be5
DS
6903 kaddr = page_address(eb->pages[0]) +
6904 get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
6905 memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
f157bf76
DS
6906}
6907
2b48966a 6908void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
d1310b2e
CM
6909 unsigned long start, unsigned long len)
6910{
6911 size_t cur;
6912 size_t offset;
6913 struct page *page;
6914 char *kaddr;
6915 char *src = (char *)srcv;
884b07d0 6916 unsigned long i = get_eb_page_index(start);
d1310b2e 6917
d3575156
NA
6918 WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
6919
f98b6215
QW
6920 if (check_eb_range(eb, start, len))
6921 return;
d1310b2e 6922
884b07d0 6923 offset = get_eb_offset_in_page(eb, start);
d1310b2e 6924
d397712b 6925 while (len > 0) {
fb85fc9a 6926 page = eb->pages[i];
b8f95771 6927 assert_eb_page_uptodate(eb, page);
d1310b2e 6928
09cbfeaf 6929 cur = min(len, PAGE_SIZE - offset);
a6591715 6930 kaddr = page_address(page);
d1310b2e 6931 memcpy(kaddr + offset, src, cur);
d1310b2e
CM
6932
6933 src += cur;
6934 len -= cur;
6935 offset = 0;
6936 i++;
6937 }
6938}
d1310b2e 6939
2b48966a 6940void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
b159fa28 6941 unsigned long len)
d1310b2e
CM
6942{
6943 size_t cur;
6944 size_t offset;
6945 struct page *page;
6946 char *kaddr;
884b07d0 6947 unsigned long i = get_eb_page_index(start);
d1310b2e 6948
f98b6215
QW
6949 if (check_eb_range(eb, start, len))
6950 return;
d1310b2e 6951
884b07d0 6952 offset = get_eb_offset_in_page(eb, start);
d1310b2e 6953
d397712b 6954 while (len > 0) {
fb85fc9a 6955 page = eb->pages[i];
b8f95771 6956 assert_eb_page_uptodate(eb, page);
d1310b2e 6957
09cbfeaf 6958 cur = min(len, PAGE_SIZE - offset);
a6591715 6959 kaddr = page_address(page);
b159fa28 6960 memset(kaddr + offset, 0, cur);
d1310b2e
CM
6961
6962 len -= cur;
6963 offset = 0;
6964 i++;
6965 }
6966}
d1310b2e 6967
2b48966a
DS
6968void copy_extent_buffer_full(const struct extent_buffer *dst,
6969 const struct extent_buffer *src)
58e8012c
DS
6970{
6971 int i;
cc5e31a4 6972 int num_pages;
58e8012c
DS
6973
6974 ASSERT(dst->len == src->len);
6975
884b07d0
QW
6976 if (dst->fs_info->sectorsize == PAGE_SIZE) {
6977 num_pages = num_extent_pages(dst);
6978 for (i = 0; i < num_pages; i++)
6979 copy_page(page_address(dst->pages[i]),
6980 page_address(src->pages[i]));
6981 } else {
6982 size_t src_offset = get_eb_offset_in_page(src, 0);
6983 size_t dst_offset = get_eb_offset_in_page(dst, 0);
6984
6985 ASSERT(src->fs_info->sectorsize < PAGE_SIZE);
6986 memcpy(page_address(dst->pages[0]) + dst_offset,
6987 page_address(src->pages[0]) + src_offset,
6988 src->len);
6989 }
58e8012c
DS
6990}
6991
2b48966a
DS
6992void copy_extent_buffer(const struct extent_buffer *dst,
6993 const struct extent_buffer *src,
d1310b2e
CM
6994 unsigned long dst_offset, unsigned long src_offset,
6995 unsigned long len)
6996{
6997 u64 dst_len = dst->len;
6998 size_t cur;
6999 size_t offset;
7000 struct page *page;
7001 char *kaddr;
884b07d0 7002 unsigned long i = get_eb_page_index(dst_offset);
d1310b2e 7003
f98b6215
QW
7004 if (check_eb_range(dst, dst_offset, len) ||
7005 check_eb_range(src, src_offset, len))
7006 return;
7007
d1310b2e
CM
7008 WARN_ON(src->len != dst_len);
7009
884b07d0 7010 offset = get_eb_offset_in_page(dst, dst_offset);
d1310b2e 7011
d397712b 7012 while (len > 0) {
fb85fc9a 7013 page = dst->pages[i];
b8f95771 7014 assert_eb_page_uptodate(dst, page);
d1310b2e 7015
09cbfeaf 7016 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
d1310b2e 7017
a6591715 7018 kaddr = page_address(page);
d1310b2e 7019 read_extent_buffer(src, kaddr + offset, src_offset, cur);
d1310b2e
CM
7020
7021 src_offset += cur;
7022 len -= cur;
7023 offset = 0;
7024 i++;
7025 }
7026}
d1310b2e 7027
3e1e8bb7
OS
7028/*
7029 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
7030 * given bit number
7031 * @eb: the extent buffer
7032 * @start: offset of the bitmap item in the extent buffer
7033 * @nr: bit number
7034 * @page_index: return index of the page in the extent buffer that contains the
7035 * given bit number
7036 * @page_offset: return offset into the page given by page_index
7037 *
7038 * This helper hides the ugliness of finding the byte in an extent buffer which
7039 * contains a given bit.
7040 */
2b48966a 7041static inline void eb_bitmap_offset(const struct extent_buffer *eb,
3e1e8bb7
OS
7042 unsigned long start, unsigned long nr,
7043 unsigned long *page_index,
7044 size_t *page_offset)
7045{
3e1e8bb7
OS
7046 size_t byte_offset = BIT_BYTE(nr);
7047 size_t offset;
7048
7049 /*
7050 * The byte we want is the offset of the extent buffer + the offset of
7051 * the bitmap item in the extent buffer + the offset of the byte in the
7052 * bitmap item.
7053 */
884b07d0 7054 offset = start + offset_in_page(eb->start) + byte_offset;
3e1e8bb7 7055
09cbfeaf 7056 *page_index = offset >> PAGE_SHIFT;
7073017a 7057 *page_offset = offset_in_page(offset);
3e1e8bb7
OS
7058}
7059
7060/**
7061 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
7062 * @eb: the extent buffer
7063 * @start: offset of the bitmap item in the extent buffer
7064 * @nr: bit number to test
7065 */
2b48966a 7066int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
3e1e8bb7
OS
7067 unsigned long nr)
7068{
2fe1d551 7069 u8 *kaddr;
3e1e8bb7
OS
7070 struct page *page;
7071 unsigned long i;
7072 size_t offset;
7073
7074 eb_bitmap_offset(eb, start, nr, &i, &offset);
7075 page = eb->pages[i];
b8f95771 7076 assert_eb_page_uptodate(eb, page);
3e1e8bb7
OS
7077 kaddr = page_address(page);
7078 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
7079}
7080
7081/**
7082 * extent_buffer_bitmap_set - set an area of a bitmap
7083 * @eb: the extent buffer
7084 * @start: offset of the bitmap item in the extent buffer
7085 * @pos: bit number of the first bit
7086 * @len: number of bits to set
7087 */
2b48966a 7088void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
3e1e8bb7
OS
7089 unsigned long pos, unsigned long len)
7090{
2fe1d551 7091 u8 *kaddr;
3e1e8bb7
OS
7092 struct page *page;
7093 unsigned long i;
7094 size_t offset;
7095 const unsigned int size = pos + len;
7096 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
2fe1d551 7097 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
3e1e8bb7
OS
7098
7099 eb_bitmap_offset(eb, start, pos, &i, &offset);
7100 page = eb->pages[i];
b8f95771 7101 assert_eb_page_uptodate(eb, page);
3e1e8bb7
OS
7102 kaddr = page_address(page);
7103
7104 while (len >= bits_to_set) {
7105 kaddr[offset] |= mask_to_set;
7106 len -= bits_to_set;
7107 bits_to_set = BITS_PER_BYTE;
9c894696 7108 mask_to_set = ~0;
09cbfeaf 7109 if (++offset >= PAGE_SIZE && len > 0) {
3e1e8bb7
OS
7110 offset = 0;
7111 page = eb->pages[++i];
b8f95771 7112 assert_eb_page_uptodate(eb, page);
3e1e8bb7
OS
7113 kaddr = page_address(page);
7114 }
7115 }
7116 if (len) {
7117 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
7118 kaddr[offset] |= mask_to_set;
7119 }
7120}
7121
7122
7123/**
7124 * extent_buffer_bitmap_clear - clear an area of a bitmap
7125 * @eb: the extent buffer
7126 * @start: offset of the bitmap item in the extent buffer
7127 * @pos: bit number of the first bit
7128 * @len: number of bits to clear
7129 */
2b48966a
DS
7130void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
7131 unsigned long start, unsigned long pos,
7132 unsigned long len)
3e1e8bb7 7133{
2fe1d551 7134 u8 *kaddr;
3e1e8bb7
OS
7135 struct page *page;
7136 unsigned long i;
7137 size_t offset;
7138 const unsigned int size = pos + len;
7139 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
2fe1d551 7140 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
3e1e8bb7
OS
7141
7142 eb_bitmap_offset(eb, start, pos, &i, &offset);
7143 page = eb->pages[i];
b8f95771 7144 assert_eb_page_uptodate(eb, page);
3e1e8bb7
OS
7145 kaddr = page_address(page);
7146
7147 while (len >= bits_to_clear) {
7148 kaddr[offset] &= ~mask_to_clear;
7149 len -= bits_to_clear;
7150 bits_to_clear = BITS_PER_BYTE;
9c894696 7151 mask_to_clear = ~0;
09cbfeaf 7152 if (++offset >= PAGE_SIZE && len > 0) {
3e1e8bb7
OS
7153 offset = 0;
7154 page = eb->pages[++i];
b8f95771 7155 assert_eb_page_uptodate(eb, page);
3e1e8bb7
OS
7156 kaddr = page_address(page);
7157 }
7158 }
7159 if (len) {
7160 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
7161 kaddr[offset] &= ~mask_to_clear;
7162 }
7163}
7164
3387206f
ST
7165static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
7166{
7167 unsigned long distance = (src > dst) ? src - dst : dst - src;
7168 return distance < len;
7169}
7170
d1310b2e
CM
7171static void copy_pages(struct page *dst_page, struct page *src_page,
7172 unsigned long dst_off, unsigned long src_off,
7173 unsigned long len)
7174{
a6591715 7175 char *dst_kaddr = page_address(dst_page);
d1310b2e 7176 char *src_kaddr;
727011e0 7177 int must_memmove = 0;
d1310b2e 7178
3387206f 7179 if (dst_page != src_page) {
a6591715 7180 src_kaddr = page_address(src_page);
3387206f 7181 } else {
d1310b2e 7182 src_kaddr = dst_kaddr;
727011e0
CM
7183 if (areas_overlap(src_off, dst_off, len))
7184 must_memmove = 1;
3387206f 7185 }
d1310b2e 7186
727011e0
CM
7187 if (must_memmove)
7188 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
7189 else
7190 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
d1310b2e
CM
7191}
7192
2b48966a
DS
7193void memcpy_extent_buffer(const struct extent_buffer *dst,
7194 unsigned long dst_offset, unsigned long src_offset,
7195 unsigned long len)
d1310b2e
CM
7196{
7197 size_t cur;
7198 size_t dst_off_in_page;
7199 size_t src_off_in_page;
d1310b2e
CM
7200 unsigned long dst_i;
7201 unsigned long src_i;
7202
f98b6215
QW
7203 if (check_eb_range(dst, dst_offset, len) ||
7204 check_eb_range(dst, src_offset, len))
7205 return;
d1310b2e 7206
d397712b 7207 while (len > 0) {
884b07d0
QW
7208 dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
7209 src_off_in_page = get_eb_offset_in_page(dst, src_offset);
d1310b2e 7210
884b07d0
QW
7211 dst_i = get_eb_page_index(dst_offset);
7212 src_i = get_eb_page_index(src_offset);
d1310b2e 7213
09cbfeaf 7214 cur = min(len, (unsigned long)(PAGE_SIZE -
d1310b2e
CM
7215 src_off_in_page));
7216 cur = min_t(unsigned long, cur,
09cbfeaf 7217 (unsigned long)(PAGE_SIZE - dst_off_in_page));
d1310b2e 7218
fb85fc9a 7219 copy_pages(dst->pages[dst_i], dst->pages[src_i],
d1310b2e
CM
7220 dst_off_in_page, src_off_in_page, cur);
7221
7222 src_offset += cur;
7223 dst_offset += cur;
7224 len -= cur;
7225 }
7226}
d1310b2e 7227
2b48966a
DS
7228void memmove_extent_buffer(const struct extent_buffer *dst,
7229 unsigned long dst_offset, unsigned long src_offset,
7230 unsigned long len)
d1310b2e
CM
7231{
7232 size_t cur;
7233 size_t dst_off_in_page;
7234 size_t src_off_in_page;
7235 unsigned long dst_end = dst_offset + len - 1;
7236 unsigned long src_end = src_offset + len - 1;
d1310b2e
CM
7237 unsigned long dst_i;
7238 unsigned long src_i;
7239
f98b6215
QW
7240 if (check_eb_range(dst, dst_offset, len) ||
7241 check_eb_range(dst, src_offset, len))
7242 return;
727011e0 7243 if (dst_offset < src_offset) {
d1310b2e
CM
7244 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
7245 return;
7246 }
d397712b 7247 while (len > 0) {
884b07d0
QW
7248 dst_i = get_eb_page_index(dst_end);
7249 src_i = get_eb_page_index(src_end);
d1310b2e 7250
884b07d0
QW
7251 dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
7252 src_off_in_page = get_eb_offset_in_page(dst, src_end);
d1310b2e
CM
7253
7254 cur = min_t(unsigned long, len, src_off_in_page + 1);
7255 cur = min(cur, dst_off_in_page + 1);
fb85fc9a 7256 copy_pages(dst->pages[dst_i], dst->pages[src_i],
d1310b2e
CM
7257 dst_off_in_page - cur + 1,
7258 src_off_in_page - cur + 1, cur);
7259
7260 dst_end -= cur;
7261 src_end -= cur;
7262 len -= cur;
7263 }
7264}
6af118ce 7265
72a69cd0 7266#define GANG_LOOKUP_SIZE 16
d1e86e3f
QW
7267static struct extent_buffer *get_next_extent_buffer(
7268 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
7269{
72a69cd0 7270 struct extent_buffer *gang[GANG_LOOKUP_SIZE];
d1e86e3f
QW
7271 struct extent_buffer *found = NULL;
7272 u64 page_start = page_offset(page);
72a69cd0 7273 u64 cur = page_start;
d1e86e3f
QW
7274
7275 ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
d1e86e3f
QW
7276 lockdep_assert_held(&fs_info->buffer_lock);
7277
72a69cd0
QW
7278 while (cur < page_start + PAGE_SIZE) {
7279 int ret;
7280 int i;
7281
7282 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
7283 (void **)gang, cur >> fs_info->sectorsize_bits,
7284 min_t(unsigned int, GANG_LOOKUP_SIZE,
7285 PAGE_SIZE / fs_info->nodesize));
7286 if (ret == 0)
7287 goto out;
7288 for (i = 0; i < ret; i++) {
7289 /* Already beyond page end */
7290 if (gang[i]->start >= page_start + PAGE_SIZE)
7291 goto out;
7292 /* Found one */
7293 if (gang[i]->start >= bytenr) {
7294 found = gang[i];
7295 goto out;
7296 }
d1e86e3f 7297 }
72a69cd0 7298 cur = gang[ret - 1]->start + gang[ret - 1]->len;
d1e86e3f 7299 }
72a69cd0 7300out:
d1e86e3f
QW
7301 return found;
7302}
7303
7304static int try_release_subpage_extent_buffer(struct page *page)
7305{
7306 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
7307 u64 cur = page_offset(page);
7308 const u64 end = page_offset(page) + PAGE_SIZE;
7309 int ret;
7310
7311 while (cur < end) {
7312 struct extent_buffer *eb = NULL;
7313
7314 /*
7315 * Unlike try_release_extent_buffer() which uses page->private
7316 * to grab buffer, for subpage case we rely on radix tree, thus
7317 * we need to ensure radix tree consistency.
7318 *
7319 * We also want an atomic snapshot of the radix tree, thus go
7320 * with spinlock rather than RCU.
7321 */
7322 spin_lock(&fs_info->buffer_lock);
7323 eb = get_next_extent_buffer(fs_info, page, cur);
7324 if (!eb) {
7325 /* No more eb in the page range after or at cur */
7326 spin_unlock(&fs_info->buffer_lock);
7327 break;
7328 }
7329 cur = eb->start + eb->len;
7330
7331 /*
7332 * The same as try_release_extent_buffer(), to ensure the eb
7333 * won't disappear out from under us.
7334 */
7335 spin_lock(&eb->refs_lock);
7336 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
7337 spin_unlock(&eb->refs_lock);
7338 spin_unlock(&fs_info->buffer_lock);
7339 break;
7340 }
7341 spin_unlock(&fs_info->buffer_lock);
7342
7343 /*
7344 * If tree ref isn't set then we know the ref on this eb is a
7345 * real ref, so just return, this eb will likely be freed soon
7346 * anyway.
7347 */
7348 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
7349 spin_unlock(&eb->refs_lock);
7350 break;
7351 }
7352
7353 /*
7354 * Here we don't care about the return value, we will always
7355 * check the page private at the end. And
7356 * release_extent_buffer() will release the refs_lock.
7357 */
7358 release_extent_buffer(eb);
7359 }
7360 /*
7361 * Finally to check if we have cleared page private, as if we have
7362 * released all ebs in the page, the page private should be cleared now.
7363 */
7364 spin_lock(&page->mapping->private_lock);
7365 if (!PagePrivate(page))
7366 ret = 1;
7367 else
7368 ret = 0;
7369 spin_unlock(&page->mapping->private_lock);
7370 return ret;
7371
7372}
7373
f7a52a40 7374int try_release_extent_buffer(struct page *page)
19fe0a8b 7375{
6af118ce 7376 struct extent_buffer *eb;
6af118ce 7377
d1e86e3f
QW
7378 if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
7379 return try_release_subpage_extent_buffer(page);
7380
3083ee2e 7381 /*
d1e86e3f
QW
7382 * We need to make sure nobody is changing page->private, as we rely on
7383 * page->private as the pointer to extent buffer.
3083ee2e
JB
7384 */
7385 spin_lock(&page->mapping->private_lock);
7386 if (!PagePrivate(page)) {
7387 spin_unlock(&page->mapping->private_lock);
4f2de97a 7388 return 1;
45f49bce 7389 }
6af118ce 7390
3083ee2e
JB
7391 eb = (struct extent_buffer *)page->private;
7392 BUG_ON(!eb);
19fe0a8b
MX
7393
7394 /*
3083ee2e
JB
7395 * This is a little awful but should be ok, we need to make sure that
7396 * the eb doesn't disappear out from under us while we're looking at
7397 * this page.
19fe0a8b 7398 */
3083ee2e 7399 spin_lock(&eb->refs_lock);
0b32f4bb 7400 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
3083ee2e
JB
7401 spin_unlock(&eb->refs_lock);
7402 spin_unlock(&page->mapping->private_lock);
7403 return 0;
b9473439 7404 }
3083ee2e 7405 spin_unlock(&page->mapping->private_lock);
897ca6e9 7406
19fe0a8b 7407 /*
3083ee2e
JB
7408 * If tree ref isn't set then we know the ref on this eb is a real ref,
7409 * so just return, this page will likely be freed soon anyway.
19fe0a8b 7410 */
3083ee2e
JB
7411 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
7412 spin_unlock(&eb->refs_lock);
7413 return 0;
b9473439 7414 }
19fe0a8b 7415
f7a52a40 7416 return release_extent_buffer(eb);
6af118ce 7417}
bfb484d9
JB
7418
7419/*
7420 * btrfs_readahead_tree_block - attempt to readahead a child block
7421 * @fs_info: the fs_info
7422 * @bytenr: bytenr to read
3fbaf258 7423 * @owner_root: objectid of the root that owns this eb
bfb484d9 7424 * @gen: generation for the uptodate check, can be 0
3fbaf258 7425 * @level: level for the eb
bfb484d9
JB
7426 *
7427 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
7428 * normal uptodate check of the eb, without checking the generation. If we have
7429 * to read the block we will not block on anything.
7430 */
7431void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
3fbaf258 7432 u64 bytenr, u64 owner_root, u64 gen, int level)
bfb484d9
JB
7433{
7434 struct extent_buffer *eb;
7435 int ret;
7436
3fbaf258 7437 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
bfb484d9
JB
7438 if (IS_ERR(eb))
7439 return;
7440
7441 if (btrfs_buffer_uptodate(eb, gen, 1)) {
7442 free_extent_buffer(eb);
7443 return;
7444 }
7445
7446 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0);
7447 if (ret < 0)
7448 free_extent_buffer_stale(eb);
7449 else
7450 free_extent_buffer(eb);
7451}
7452
7453/*
7454 * btrfs_readahead_node_child - readahead a node's child block
7455 * @node: parent node we're reading from
7456 * @slot: slot in the parent node for the child we want to read
7457 *
7458 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
7459 * the slot in the node provided.
7460 */
7461void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
7462{
7463 btrfs_readahead_tree_block(node->fs_info,
7464 btrfs_node_blockptr(node, slot),
3fbaf258
JB
7465 btrfs_header_owner(node),
7466 btrfs_node_ptr_generation(node, slot),
7467 btrfs_header_level(node) - 1);
bfb484d9 7468}