Linux 6.10-rc2
[linux-2.6-block.git] / fs / btrfs / extent_io.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
c1d7c514 2
d1310b2e
CM
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
d1310b2e
CM
7#include <linux/pagemap.h>
8#include <linux/page-flags.h>
395cb57e 9#include <linux/sched/mm.h>
d1310b2e
CM
10#include <linux/spinlock.h>
11#include <linux/blkdev.h>
12#include <linux/swap.h>
d1310b2e
CM
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
268bb0ce 15#include <linux/prefetch.h>
14605409 16#include <linux/fsverity.h>
d1310b2e 17#include "extent_io.h"
9c7d3a54 18#include "extent-io-tree.h"
d1310b2e 19#include "extent_map.h"
902b22f3
DW
20#include "ctree.h"
21#include "btrfs_inode.h"
103c1972 22#include "bio.h"
0b32f4bb 23#include "locking.h"
fe09e16c 24#include "backref.h"
6af49dbd 25#include "disk-io.h"
760f991f 26#include "subpage.h"
d3575156 27#include "zoned.h"
0bc09ca1 28#include "block-group.h"
2a5232a8 29#include "compression.h"
ec8eb376 30#include "fs.h"
07e81dc9 31#include "accessors.h"
7c8ede16 32#include "file-item.h"
af142b6f 33#include "file.h"
77407dc0 34#include "dev-replace.h"
7f0add25 35#include "super.h"
98c8d683 36#include "transaction.h"
d1310b2e 37
d1310b2e
CM
38static struct kmem_cache *extent_buffer_cache;
39
6d49ba1b 40#ifdef CONFIG_BTRFS_DEBUG
a40246e8
JB
41static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
42{
43 struct btrfs_fs_info *fs_info = eb->fs_info;
44 unsigned long flags;
45
46 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
47 list_add(&eb->leak_list, &fs_info->allocated_ebs);
48 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
49}
50
a40246e8
JB
51static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
52{
53 struct btrfs_fs_info *fs_info = eb->fs_info;
54 unsigned long flags;
55
56 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
57 list_del(&eb->leak_list);
58 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
6d49ba1b
ES
59}
60
3fd63727 61void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
6d49ba1b 62{
6d49ba1b 63 struct extent_buffer *eb;
3fd63727 64 unsigned long flags;
6d49ba1b 65
8c38938c
JB
66 /*
67 * If we didn't get into open_ctree our allocated_ebs will not be
68 * initialized, so just skip this.
69 */
70 if (!fs_info->allocated_ebs.next)
71 return;
72
b95b78e6 73 WARN_ON(!list_empty(&fs_info->allocated_ebs));
3fd63727
JB
74 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75 while (!list_empty(&fs_info->allocated_ebs)) {
76 eb = list_first_entry(&fs_info->allocated_ebs,
77 struct extent_buffer, leak_list);
8c38938c 78 pr_err(
84cda1a6 79 "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
8c38938c
JB
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
81 btrfs_header_owner(eb));
33ca832f 82 list_del(&eb->leak_list);
8fd2b12e 83 WARN_ON_ONCE(1);
33ca832f
JB
84 kmem_cache_free(extent_buffer_cache, eb);
85 }
3fd63727 86 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
33ca832f 87}
6d49ba1b 88#else
a40246e8 89#define btrfs_leak_debug_add_eb(eb) do {} while (0)
a40246e8 90#define btrfs_leak_debug_del_eb(eb) do {} while (0)
4bef0848 91#endif
d1310b2e 92
7aab8b32
CH
93/*
94 * Structure to record info about the bio being assembled, and other info like
95 * how many bytes are there before stripe/ordered extent boundary.
96 */
97struct btrfs_bio_ctrl {
9dfde1b4 98 struct btrfs_bio *bbio;
0f07003b 99 enum btrfs_compression_type compress_type;
7aab8b32 100 u32 len_to_oe_boundary;
c000bc04 101 blk_opf_t opf;
5467abba 102 btrfs_bio_end_io_t end_io_func;
72b505dc 103 struct writeback_control *wbc;
d1310b2e
CM
104};
105
722c82ac 106static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
bb58eb9e 107{
9dfde1b4 108 struct btrfs_bio *bbio = bio_ctrl->bbio;
722c82ac 109
9dfde1b4 110 if (!bbio)
722c82ac 111 return;
bb58eb9e 112
e0eefe07 113 /* Caller should ensure the bio has at least some range added */
9dfde1b4 114 ASSERT(bbio->bio.bi_iter.bi_size);
c9583ada 115
9dfde1b4 116 if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
35a8d7da 117 bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
e1949310 118 btrfs_submit_compressed_read(bbio);
35a8d7da 119 else
b78b98e0 120 btrfs_submit_bio(bbio, 0);
35a8d7da 121
9dfde1b4
CH
122 /* The bbio is owned by the end_io handler now */
123 bio_ctrl->bbio = NULL;
3065976b
QW
124}
125
f4340622 126/*
ee5f017d 127 * Submit or fail the current bio in the bio_ctrl structure.
f4340622 128 */
ee5f017d 129static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
bb58eb9e 130{
9dfde1b4 131 struct btrfs_bio *bbio = bio_ctrl->bbio;
bb58eb9e 132
9dfde1b4 133 if (!bbio)
9845e5dd
CH
134 return;
135
136 if (ret) {
137 ASSERT(ret < 0);
9dfde1b4 138 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
917f32a2 139 /* The bio is owned by the end_io handler now */
9dfde1b4 140 bio_ctrl->bbio = NULL;
9845e5dd 141 } else {
ee5f017d 142 submit_one_bio(bio_ctrl);
bb58eb9e
QW
143 }
144}
e2932ee0 145
a62a3bd9
JB
146int __init extent_buffer_init_cachep(void)
147{
837e1972 148 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
ef5a05c5
CZ
149 sizeof(struct extent_buffer), 0, 0,
150 NULL);
a62a3bd9 151 if (!extent_buffer_cache)
6f0d04f8 152 return -ENOMEM;
b208c2f7 153
d1310b2e 154 return 0;
d1310b2e
CM
155}
156
a62a3bd9 157void __cold extent_buffer_free_cachep(void)
d1310b2e 158{
8c0a8537
KS
159 /*
160 * Make sure all delayed rcu free are flushed before we
161 * destroy caches.
162 */
163 rcu_barrier();
5598e900 164 kmem_cache_destroy(extent_buffer_cache);
d1310b2e
CM
165}
166
bd1fa4f0 167void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
4adaa611 168{
09cbfeaf
KS
169 unsigned long index = start >> PAGE_SHIFT;
170 unsigned long end_index = end >> PAGE_SHIFT;
4adaa611
CM
171 struct page *page;
172
173 while (index <= end_index) {
174 page = find_get_page(inode->i_mapping, index);
175 BUG_ON(!page); /* Pages should be in the extent_io_tree */
176 clear_page_dirty_for_io(page);
09cbfeaf 177 put_page(page);
4adaa611
CM
178 index++;
179 }
4adaa611
CM
180}
181
ef4e88e6
CH
182static void process_one_page(struct btrfs_fs_info *fs_info,
183 struct page *page, struct page *locked_page,
184 unsigned long page_ops, u64 start, u64 end)
ed8f13bf 185{
55151ea9 186 struct folio *folio = page_folio(page);
e38992be
QW
187 u32 len;
188
189 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
190 len = end + 1 - start;
191
ed8f13bf 192 if (page_ops & PAGE_SET_ORDERED)
55151ea9 193 btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
ed8f13bf 194 if (page_ops & PAGE_START_WRITEBACK) {
55151ea9
QW
195 btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
196 btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
ed8f13bf
QW
197 }
198 if (page_ops & PAGE_END_WRITEBACK)
55151ea9 199 btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
a33a8e9a 200
ef4e88e6 201 if (page != locked_page && (page_ops & PAGE_UNLOCK))
55151ea9 202 btrfs_folio_end_writer_lock(fs_info, folio, start, len);
ed8f13bf
QW
203}
204
ef4e88e6
CH
205static void __process_pages_contig(struct address_space *mapping,
206 struct page *locked_page, u64 start, u64 end,
207 unsigned long page_ops)
ed8f13bf 208{
41044b41 209 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
ed8f13bf
QW
210 pgoff_t start_index = start >> PAGE_SHIFT;
211 pgoff_t end_index = end >> PAGE_SHIFT;
212 pgoff_t index = start_index;
04c6b79a 213 struct folio_batch fbatch;
ed8f13bf
QW
214 int i;
215
04c6b79a
VMO
216 folio_batch_init(&fbatch);
217 while (index <= end_index) {
218 int found_folios;
219
220 found_folios = filemap_get_folios_contig(mapping, &index,
221 end_index, &fbatch);
04c6b79a 222 for (i = 0; i < found_folios; i++) {
04c6b79a 223 struct folio *folio = fbatch.folios[i];
ef4e88e6
CH
224
225 process_one_page(fs_info, &folio->page, locked_page,
226 page_ops, start, end);
ed8f13bf 227 }
04c6b79a 228 folio_batch_release(&fbatch);
ed8f13bf
QW
229 cond_resched();
230 }
ed8f13bf 231}
da2c7009 232
143bede5
JM
233static noinline void __unlock_for_delalloc(struct inode *inode,
234 struct page *locked_page,
235 u64 start, u64 end)
c8b97818 236{
09cbfeaf
KS
237 unsigned long index = start >> PAGE_SHIFT;
238 unsigned long end_index = end >> PAGE_SHIFT;
c8b97818 239
76c0021d 240 ASSERT(locked_page);
c8b97818 241 if (index == locked_page->index && end_index == index)
143bede5 242 return;
c8b97818 243
98af9ab1 244 __process_pages_contig(inode->i_mapping, locked_page, start, end,
ef4e88e6 245 PAGE_UNLOCK);
c8b97818
CM
246}
247
248static noinline int lock_delalloc_pages(struct inode *inode,
249 struct page *locked_page,
ef4e88e6
CH
250 u64 start,
251 u64 end)
c8b97818 252{
41044b41 253 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
ef4e88e6
CH
254 struct address_space *mapping = inode->i_mapping;
255 pgoff_t start_index = start >> PAGE_SHIFT;
256 pgoff_t end_index = end >> PAGE_SHIFT;
257 pgoff_t index = start_index;
258 u64 processed_end = start;
259 struct folio_batch fbatch;
c8b97818 260
c8b97818
CM
261 if (index == locked_page->index && index == end_index)
262 return 0;
263
ef4e88e6
CH
264 folio_batch_init(&fbatch);
265 while (index <= end_index) {
266 unsigned int found_folios, i;
267
268 found_folios = filemap_get_folios_contig(mapping, &index,
269 end_index, &fbatch);
270 if (found_folios == 0)
271 goto out;
272
273 for (i = 0; i < found_folios; i++) {
55151ea9
QW
274 struct folio *folio = fbatch.folios[i];
275 struct page *page = folio_page(folio, 0);
ef4e88e6
CH
276 u32 len = end + 1 - start;
277
278 if (page == locked_page)
279 continue;
280
55151ea9
QW
281 if (btrfs_folio_start_writer_lock(fs_info, folio, start,
282 len))
ef4e88e6
CH
283 goto out;
284
285 if (!PageDirty(page) || page->mapping != mapping) {
55151ea9
QW
286 btrfs_folio_end_writer_lock(fs_info, folio, start,
287 len);
ef4e88e6
CH
288 goto out;
289 }
290
291 processed_end = page_offset(page) + PAGE_SIZE - 1;
292 }
293 folio_batch_release(&fbatch);
294 cond_resched();
295 }
296
297 return 0;
298out:
299 folio_batch_release(&fbatch);
300 if (processed_end > start)
301 __unlock_for_delalloc(inode, locked_page, start, processed_end);
302 return -EAGAIN;
c8b97818
CM
303}
304
305/*
3522e903 306 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
2749f7ef 307 * more than @max_bytes.
c8b97818 308 *
2749f7ef
QW
309 * @start: The original start bytenr to search.
310 * Will store the extent range start bytenr.
311 * @end: The original end bytenr of the search range
312 * Will store the extent range end bytenr.
313 *
314 * Return true if we find a delalloc range which starts inside the original
315 * range, and @start/@end will store the delalloc range start/end.
316 *
317 * Return false if we can't find any delalloc range which starts inside the
318 * original range, and @start/@end will be the non-delalloc range start/end.
c8b97818 319 */
ce9f967f 320EXPORT_FOR_TESTS
3522e903 321noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
294e30fe 322 struct page *locked_page, u64 *start,
917aacec 323 u64 *end)
c8b97818 324{
41044b41 325 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
9978059b 326 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2749f7ef
QW
327 const u64 orig_start = *start;
328 const u64 orig_end = *end;
f7b12a62
NA
329 /* The sanity tests may not set a valid fs_info. */
330 u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
c8b97818
CM
331 u64 delalloc_start;
332 u64 delalloc_end;
3522e903 333 bool found;
9655d298 334 struct extent_state *cached_state = NULL;
c8b97818
CM
335 int ret;
336 int loops = 0;
337
2749f7ef
QW
338 /* Caller should pass a valid @end to indicate the search range end */
339 ASSERT(orig_end > orig_start);
340
341 /* The range should at least cover part of the page */
342 ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
343 orig_end <= page_offset(locked_page)));
c8b97818
CM
344again:
345 /* step one, find a bunch of delalloc bytes starting at start */
346 delalloc_start = *start;
347 delalloc_end = 0;
083e75e7
JB
348 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
349 max_bytes, &cached_state);
2749f7ef 350 if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
c8b97818 351 *start = delalloc_start;
2749f7ef
QW
352
353 /* @delalloc_end can be -1, never go beyond @orig_end */
354 *end = min(delalloc_end, orig_end);
c2a128d2 355 free_extent_state(cached_state);
3522e903 356 return false;
c8b97818
CM
357 }
358
70b99e69
CM
359 /*
360 * start comes from the offset of locked_page. We have to lock
361 * pages in order, so we can't process delalloc bytes before
362 * locked_page
363 */
d397712b 364 if (delalloc_start < *start)
70b99e69 365 delalloc_start = *start;
70b99e69 366
c8b97818
CM
367 /*
368 * make sure to limit the number of pages we try to lock down
c8b97818 369 */
7bf811a5
JB
370 if (delalloc_end + 1 - delalloc_start > max_bytes)
371 delalloc_end = delalloc_start + max_bytes - 1;
d397712b 372
c8b97818
CM
373 /* step two, lock all the pages after the page that has start */
374 ret = lock_delalloc_pages(inode, locked_page,
375 delalloc_start, delalloc_end);
9bfd61d9 376 ASSERT(!ret || ret == -EAGAIN);
c8b97818
CM
377 if (ret == -EAGAIN) {
378 /* some of the pages are gone, lets avoid looping by
379 * shortening the size of the delalloc range we're searching
380 */
9655d298 381 free_extent_state(cached_state);
7d788742 382 cached_state = NULL;
c8b97818 383 if (!loops) {
09cbfeaf 384 max_bytes = PAGE_SIZE;
c8b97818
CM
385 loops = 1;
386 goto again;
387 } else {
3522e903 388 found = false;
c8b97818
CM
389 goto out_failed;
390 }
391 }
c8b97818
CM
392
393 /* step three, lock the state bits for the whole range */
570eb97b 394 lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
c8b97818
CM
395
396 /* then test to make sure it is all still delalloc */
397 ret = test_range_bit(tree, delalloc_start, delalloc_end,
893fe243 398 EXTENT_DELALLOC, cached_state);
c0707c9e
JB
399
400 unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
c8b97818 401 if (!ret) {
c8b97818
CM
402 __unlock_for_delalloc(inode, locked_page,
403 delalloc_start, delalloc_end);
404 cond_resched();
405 goto again;
406 }
407 *start = delalloc_start;
408 *end = delalloc_end;
409out_failed:
410 return found;
411}
412
ad7ff17b 413void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
74e9194a 414 struct page *locked_page,
6b0a63a4 415 struct extent_state **cached,
f97e27e9 416 u32 clear_bits, unsigned long page_ops)
873695b3 417{
6b0a63a4 418 clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
873695b3 419
ad7ff17b 420 __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
ef4e88e6 421 start, end, page_ops);
873695b3
LB
422}
423
ed9ee98e
CH
424static bool btrfs_verify_page(struct page *page, u64 start)
425{
426 if (!fsverity_active(page->mapping->host) ||
57201ddd 427 PageUptodate(page) ||
ed9ee98e
CH
428 start >= i_size_read(page->mapping->host))
429 return true;
430 return fsverity_verify_page(page);
431}
432
150e4b05
QW
433static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
434{
b33d2e53 435 struct btrfs_fs_info *fs_info = page_to_fs_info(page);
55151ea9 436 struct folio *folio = page_folio(page);
150e4b05
QW
437
438 ASSERT(page_offset(page) <= start &&
439 start + len <= page_offset(page) + PAGE_SIZE);
440
2b2553f1 441 if (uptodate && btrfs_verify_page(page, start))
55151ea9 442 btrfs_folio_set_uptodate(fs_info, folio, start, len);
2b2553f1 443 else
55151ea9 444 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
150e4b05 445
13df3775 446 if (!btrfs_is_subpage(fs_info, page->mapping))
150e4b05 447 unlock_page(page);
3d078efa 448 else
55151ea9 449 btrfs_subpage_end_reader(fs_info, folio, start, len);
150e4b05
QW
450}
451
d1310b2e 452/*
a700ca5e
QW
453 * After a write IO is done, we need to:
454 *
455 * - clear the uptodate bits on error
456 * - clear the writeback bits in the extent tree for the range
457 * - filio_end_writeback() if there is no more pending io for the folio
d1310b2e
CM
458 *
459 * Scheduling is not allowed, so the extent state tree is expected
460 * to have one and only one object corresponding to this IO.
461 */
a700ca5e 462static void end_bbio_data_write(struct btrfs_bio *bbio)
d1310b2e 463{
e84bfffc 464 struct btrfs_fs_info *fs_info = bbio->fs_info;
917f32a2 465 struct bio *bio = &bbio->bio;
4e4cbee9 466 int error = blk_status_to_errno(bio->bi_status);
a700ca5e 467 struct folio_iter fi;
e84bfffc 468 const u32 sectorsize = fs_info->sectorsize;
d1310b2e 469
c09abff8 470 ASSERT(!bio_flagged(bio, BIO_CLONED));
a700ca5e
QW
471 bio_for_each_folio_all(fi, bio) {
472 struct folio *folio = fi.folio;
a700ca5e
QW
473 u64 start = folio_pos(folio) + fi.offset;
474 u32 len = fi.length;
475
476 /* Only order 0 (single page) folios are allowed for data. */
477 ASSERT(folio_order(folio) == 0);
321a02db
QW
478
479 /* Our read/write should always be sector aligned. */
a700ca5e 480 if (!IS_ALIGNED(fi.offset, sectorsize))
321a02db 481 btrfs_err(fs_info,
a700ca5e
QW
482 "partial page write in btrfs with offset %zu and length %zu",
483 fi.offset, fi.length);
484 else if (!IS_ALIGNED(fi.length, sectorsize))
321a02db 485 btrfs_info(fs_info,
a700ca5e
QW
486 "incomplete page write with offset %zu and length %zu",
487 fi.offset, fi.length);
321a02db 488
a700ca5e
QW
489 btrfs_finish_ordered_extent(bbio->ordered,
490 folio_page(folio, 0), start, len, !error);
b595d259 491 if (error)
a700ca5e
QW
492 mapping_set_error(folio->mapping, error);
493 btrfs_folio_clear_writeback(fs_info, folio, start, len);
2c30c71b 494 }
2b1f55b0 495
d1310b2e 496 bio_put(bio);
d1310b2e
CM
497}
498
94e8c95c
QW
499/*
500 * Record previously processed extent range
501 *
502 * For endio_readpage_release_extent() to handle a full extent range, reducing
503 * the extent io operations.
504 */
505struct processed_extent {
506 struct btrfs_inode *inode;
507 /* Start of the range in @inode */
508 u64 start;
2e626e56 509 /* End of the range in @inode */
94e8c95c
QW
510 u64 end;
511 bool uptodate;
512};
513
514/*
515 * Try to release processed extent range
516 *
517 * May not release the extent range right now if the current range is
518 * contiguous to processed extent.
519 *
520 * Will release processed extent when any of @inode, @uptodate, the range is
521 * no longer contiguous to the processed range.
522 *
523 * Passing @inode == NULL will force processed extent to be released.
524 */
525static void endio_readpage_release_extent(struct processed_extent *processed,
526 struct btrfs_inode *inode, u64 start, u64 end,
527 bool uptodate)
883d0de4
MX
528{
529 struct extent_state *cached = NULL;
94e8c95c
QW
530 struct extent_io_tree *tree;
531
532 /* The first extent, initialize @processed */
533 if (!processed->inode)
534 goto update;
883d0de4 535
94e8c95c
QW
536 /*
537 * Contiguous to processed extent, just uptodate the end.
538 *
539 * Several things to notice:
540 *
541 * - bio can be merged as long as on-disk bytenr is contiguous
542 * This means we can have page belonging to other inodes, thus need to
543 * check if the inode still matches.
544 * - bvec can contain range beyond current page for multi-page bvec
545 * Thus we need to do processed->end + 1 >= start check
546 */
547 if (processed->inode == inode && processed->uptodate == uptodate &&
548 processed->end + 1 >= start && end >= processed->end) {
549 processed->end = end;
550 return;
551 }
552
553 tree = &processed->inode->io_tree;
554 /*
555 * Now we don't have range contiguous to the processed range, release
556 * the processed range now.
557 */
48acc47d 558 unlock_extent(tree, processed->start, processed->end, &cached);
94e8c95c
QW
559
560update:
561 /* Update processed to current range */
562 processed->inode = inode;
563 processed->start = start;
564 processed->end = end;
565 processed->uptodate = uptodate;
883d0de4
MX
566}
567
92082d40
QW
568static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
569{
cfbf07e2
QW
570 struct folio *folio = page_folio(page);
571
55151ea9
QW
572 ASSERT(folio_test_locked(folio));
573 if (!btrfs_is_subpage(fs_info, folio->mapping))
92082d40
QW
574 return;
575
cfbf07e2 576 ASSERT(folio_test_private(folio));
55151ea9 577 btrfs_subpage_start_reader(fs_info, folio, page_offset(page), PAGE_SIZE);
92082d40
QW
578}
579
d1310b2e 580/*
a700ca5e
QW
581 * After a data read IO is done, we need to:
582 *
583 * - clear the uptodate bits on error
584 * - set the uptodate bits if things worked
585 * - set the folio up to date if all extents in the tree are uptodate
586 * - clear the lock bit in the extent tree
587 * - unlock the folio if there are no other extents locked for it
d1310b2e
CM
588 *
589 * Scheduling is not allowed, so the extent state tree is expected
590 * to have one and only one object corresponding to this IO.
591 */
a700ca5e 592static void end_bbio_data_read(struct btrfs_bio *bbio)
d1310b2e 593{
e84bfffc 594 struct btrfs_fs_info *fs_info = bbio->fs_info;
917f32a2 595 struct bio *bio = &bbio->bio;
94e8c95c 596 struct processed_extent processed = { 0 };
a700ca5e 597 struct folio_iter fi;
e84bfffc 598 const u32 sectorsize = fs_info->sectorsize;
d1310b2e 599
c09abff8 600 ASSERT(!bio_flagged(bio, BIO_CLONED));
a700ca5e 601 bio_for_each_folio_all(fi, &bbio->bio) {
150e4b05 602 bool uptodate = !bio->bi_status;
a700ca5e
QW
603 struct folio *folio = fi.folio;
604 struct inode *inode = folio->mapping->host;
7ffd27e3
QW
605 u64 start;
606 u64 end;
607 u32 len;
507903b8 608
a700ca5e
QW
609 /* For now only order 0 folios are supported for data. */
610 ASSERT(folio_order(folio) == 0);
ab8d0fc4 611 btrfs_debug(fs_info,
a700ca5e
QW
612 "%s: bi_sector=%llu, err=%d, mirror=%u",
613 __func__, bio->bi_iter.bi_sector, bio->bi_status,
c3a3b19b 614 bbio->mirror_num);
902b22f3 615
8b8bbd46
QW
616 /*
617 * We always issue full-sector reads, but if some block in a
a700ca5e 618 * folio fails to read, blk_update_request() will advance
8b8bbd46
QW
619 * bv_offset and adjust bv_len to compensate. Print a warning
620 * for unaligned offsets, and an error if they don't add up to
621 * a full sector.
622 */
a700ca5e 623 if (!IS_ALIGNED(fi.offset, sectorsize))
8b8bbd46 624 btrfs_err(fs_info,
a700ca5e
QW
625 "partial page read in btrfs with offset %zu and length %zu",
626 fi.offset, fi.length);
627 else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
8b8bbd46 628 btrfs_info(fs_info,
a700ca5e
QW
629 "incomplete page read with offset %zu and length %zu",
630 fi.offset, fi.length);
8b8bbd46 631
a700ca5e
QW
632 start = folio_pos(folio) + fi.offset;
633 end = start + fi.length - 1;
634 len = fi.length;
d1310b2e 635
883d0de4 636 if (likely(uptodate)) {
a71754fc 637 loff_t i_size = i_size_read(inode);
a700ca5e 638 pgoff_t end_index = i_size >> folio_shift(folio);
a71754fc 639
c28ea613
QW
640 /*
641 * Zero out the remaining part if this range straddles
642 * i_size.
643 *
a700ca5e 644 * Here we should only zero the range inside the folio,
c28ea613
QW
645 * not touch anything else.
646 *
647 * NOTE: i_size is exclusive while end is inclusive.
648 */
a700ca5e
QW
649 if (folio_index(folio) == end_index && i_size <= end) {
650 u32 zero_start = max(offset_in_folio(folio, i_size),
651 offset_in_folio(folio, start));
652 u32 zero_len = offset_in_folio(folio, end) + 1 -
653 zero_start;
c28ea613 654
a700ca5e 655 folio_zero_range(folio, zero_start, zero_len);
c28ea613 656 }
70dec807 657 }
97861cd1 658
7609afac 659 /* Update page status and unlock. */
a700ca5e 660 end_page_read(folio_page(folio, 0), uptodate, start, len);
7609afac 661 endio_readpage_release_extent(&processed, BTRFS_I(inode),
31dd8c81 662 start, end, uptodate);
2c30c71b 663 }
94e8c95c
QW
664 /* Release the last extent */
665 endio_readpage_release_extent(&processed, NULL, 0, 0, false);
d1310b2e 666 bio_put(bio);
d1310b2e
CM
667}
668
11e03f2f
QW
669/*
670 * Populate every free slot in a provided array with folios.
671 *
672 * @nr_folios: number of folios to allocate
673 * @folio_array: the array to fill with folios; any existing non-NULL entries in
674 * the array will be skipped
675 * @extra_gfp: the extra GFP flags for the allocation
676 *
677 * Return: 0 if all folios were able to be allocated;
678 * -ENOMEM otherwise, the partially allocated folios would be freed and
679 * the array slots zeroed
680 */
681int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array,
682 gfp_t extra_gfp)
683{
684 for (int i = 0; i < nr_folios; i++) {
685 if (folio_array[i])
686 continue;
687 folio_array[i] = folio_alloc(GFP_NOFS | extra_gfp, 0);
688 if (!folio_array[i])
689 goto error;
690 }
691 return 0;
692error:
693 for (int i = 0; i < nr_folios; i++) {
694 if (folio_array[i])
695 folio_put(folio_array[i]);
696 }
697 return -ENOMEM;
698}
699
43dd529a 700/*
dd137dd1
STD
701 * Populate every free slot in a provided array with pages.
702 *
703 * @nr_pages: number of pages to allocate
704 * @page_array: the array to fill with pages; any existing non-null entries in
705 * the array will be skipped
09e6cef1 706 * @extra_gfp: the extra GFP flags for the allocation.
dd137dd1
STD
707 *
708 * Return: 0 if all pages were able to be allocated;
94dbf7c0
QW
709 * -ENOMEM otherwise, the partially allocated pages would be freed and
710 * the array slots zeroed
dd137dd1 711 */
09e6cef1
QW
712int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
713 gfp_t extra_gfp)
dd137dd1 714{
1db7959a 715 const gfp_t gfp = GFP_NOFS | extra_gfp;
91d6ac1d 716 unsigned int allocated;
dd137dd1 717
91d6ac1d
STD
718 for (allocated = 0; allocated < nr_pages;) {
719 unsigned int last = allocated;
dd137dd1 720
1db7959a
QW
721 allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
722 if (unlikely(allocated == last)) {
723 /* No progress, fail and do cleanup. */
94dbf7c0
QW
724 for (int i = 0; i < allocated; i++) {
725 __free_page(page_array[i]);
726 page_array[i] = NULL;
727 }
dd137dd1 728 return -ENOMEM;
94dbf7c0 729 }
dd137dd1
STD
730 }
731 return 0;
732}
733
082d5bb9
QW
734/*
735 * Populate needed folios for the extent buffer.
736 *
737 * For now, the folios populated are always in order 0 (aka, single page).
738 */
739static int alloc_eb_folio_array(struct extent_buffer *eb, gfp_t extra_gfp)
740{
741 struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
742 int num_pages = num_extent_pages(eb);
743 int ret;
744
745 ret = btrfs_alloc_page_array(num_pages, page_array, extra_gfp);
746 if (ret < 0)
747 return ret;
748
749 for (int i = 0; i < num_pages; i++)
750 eb->folios[i] = page_folio(page_array[i]);
84cda1a6
QW
751 eb->folio_size = PAGE_SIZE;
752 eb->folio_shift = PAGE_SHIFT;
082d5bb9
QW
753 return 0;
754}
755
78a2ef1b
CH
756static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
757 struct page *page, u64 disk_bytenr,
758 unsigned int pg_offset)
759{
9dfde1b4 760 struct bio *bio = &bio_ctrl->bbio->bio;
78a2ef1b
CH
761 struct bio_vec *bvec = bio_last_bvec_all(bio);
762 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
763
764 if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
765 /*
766 * For compression, all IO should have its logical bytenr set
767 * to the starting bytenr of the compressed extent.
768 */
769 return bio->bi_iter.bi_sector == sector;
770 }
771
772 /*
773 * The contig check requires the following conditions to be met:
774 *
775 * 1) The pages are belonging to the same inode
776 * This is implied by the call chain.
777 *
778 * 2) The range has adjacent logical bytenr
779 *
780 * 3) The range has adjacent file offset
781 * This is required for the usage of btrfs_bio->file_offset.
782 */
783 return bio_end_sector(bio) == sector &&
784 page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
785 page_offset(page) + pg_offset;
786}
787
198bd49e
JT
788static void alloc_new_bio(struct btrfs_inode *inode,
789 struct btrfs_bio_ctrl *bio_ctrl,
790 u64 disk_bytenr, u64 file_offset)
390ed29b 791{
198bd49e 792 struct btrfs_fs_info *fs_info = inode->root->fs_info;
b41bbd29 793 struct btrfs_bio *bbio;
198bd49e 794
4317ff00 795 bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
b41bbd29
CH
796 bio_ctrl->end_io_func, NULL);
797 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
4317ff00 798 bbio->inode = inode;
b41bbd29
CH
799 bbio->file_offset = file_offset;
800 bio_ctrl->bbio = bbio;
198bd49e 801 bio_ctrl->len_to_oe_boundary = U32_MAX;
390ed29b 802
a39da514
CH
803 /* Limit data write bios to the ordered boundary. */
804 if (bio_ctrl->wbc) {
198bd49e
JT
805 struct btrfs_ordered_extent *ordered;
806
2380220e
QW
807 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
808 if (ordered) {
809 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
72fcf1a4
CH
810 ordered->file_offset +
811 ordered->disk_num_bytes - file_offset);
ec63b84d 812 bbio->ordered = ordered;
2380220e 813 }
390ed29b 814
50f1cff3 815 /*
d5e4377d
CH
816 * Pick the last added device to support cgroup writeback. For
817 * multi-device file systems this means blk-cgroup policies have
818 * to always be set on the last added/replaced device.
819 * This is a bit odd but has been like that for a long time.
50f1cff3 820 */
b41bbd29
CH
821 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
822 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
e0eefe07 823 }
e0eefe07
QW
824}
825
4b81ba48 826/*
0c64c33c 827 * @disk_bytenr: logical bytenr where the write will be
209ecde5 828 * @page: page to add to the bio
0c64c33c 829 * @size: portion of page that we want to write to
b8b3d625
DS
830 * @pg_offset: offset of the new bio or to check whether we are adding
831 * a contiguous page to the previous one
814b6f91 832 *
9dfde1b4
CH
833 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
834 * new one in @bio_ctrl->bbio.
814b6f91
QW
835 * The mirror number for this IO should already be initizlied in
836 * @bio_ctrl->mirror_num.
4b81ba48 837 */
55173337
CH
838static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
839 u64 disk_bytenr, struct page *page,
840 size_t size, unsigned long pg_offset)
d1310b2e 841{
c8293894 842 struct btrfs_inode *inode = page_to_inode(page);
5467abba 843
24e6c808 844 ASSERT(pg_offset + size <= PAGE_SIZE);
5467abba
QW
845 ASSERT(bio_ctrl->end_io_func);
846
9dfde1b4 847 if (bio_ctrl->bbio &&
78a2ef1b
CH
848 !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
849 submit_one_bio(bio_ctrl);
850
24e6c808
CH
851 do {
852 u32 len = size;
e0eefe07
QW
853
854 /* Allocate new bio if needed */
9dfde1b4 855 if (!bio_ctrl->bbio) {
72b505dc 856 alloc_new_bio(inode, bio_ctrl, disk_bytenr,
24e6c808 857 page_offset(page) + pg_offset);
e0eefe07 858 }
24e6c808
CH
859
860 /* Cap to the current ordered extent boundary if there is one. */
861 if (len > bio_ctrl->len_to_oe_boundary) {
862 ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
863 ASSERT(is_data_inode(&inode->vfs_inode));
864 len = bio_ctrl->len_to_oe_boundary;
865 }
866
9dfde1b4 867 if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
24e6c808 868 /* bio full: move on to a new one */
722c82ac 869 submit_one_bio(bio_ctrl);
24e6c808 870 continue;
d1310b2e 871 }
24e6c808
CH
872
873 if (bio_ctrl->wbc)
874 wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
875
876 size -= len;
877 pg_offset += len;
878 disk_bytenr += len;
09c3717c
CM
879
880 /*
881 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
882 * sector aligned. alloc_new_bio() then sets it to the end of
883 * our ordered extent for writes into zoned devices.
884 *
885 * When len_to_oe_boundary is tracking an ordered extent, we
886 * trust the ordered extent code to align things properly, and
887 * the check above to cap our write to the ordered extent
888 * boundary is correct.
889 *
890 * When len_to_oe_boundary is U32_MAX, the cap above would
891 * result in a 4095 byte IO for the last page right before
892 * we hit the bio limit of UINT_MAX. bio_add_page() has all
893 * the checks required to make sure we don't overflow the bio,
894 * and we should just ignore len_to_oe_boundary completely
895 * unless we're using it to track an ordered extent.
896 *
897 * It's pretty hard to make a bio sized U32_MAX, but it can
898 * happen when the page cache is able to feed us contiguous
899 * pages for large extents.
900 */
901 if (bio_ctrl->len_to_oe_boundary != U32_MAX)
902 bio_ctrl->len_to_oe_boundary -= len;
24e6c808
CH
903
904 /* Ordered extent boundary: move on to a new bio. */
905 if (bio_ctrl->len_to_oe_boundary == 0)
906 submit_one_bio(bio_ctrl);
907 } while (size);
d1310b2e
CM
908}
909
13df3775
QW
910static int attach_extent_buffer_folio(struct extent_buffer *eb,
911 struct folio *folio,
912 struct btrfs_subpage *prealloc)
d1310b2e 913{
760f991f
QW
914 struct btrfs_fs_info *fs_info = eb->fs_info;
915 int ret = 0;
916
0d01e247
QW
917 /*
918 * If the page is mapped to btree inode, we should hold the private
919 * lock to prevent race.
920 * For cloned or dummy extent buffers, their pages are not mapped and
921 * will not race with any other ebs.
922 */
13df3775 923 if (folio->mapping)
affc5af3 924 lockdep_assert_held(&folio->mapping->i_private_lock);
0d01e247 925
fbca46eb 926 if (fs_info->nodesize >= PAGE_SIZE) {
cfbf07e2
QW
927 if (!folio_test_private(folio))
928 folio_attach_private(folio, eb);
760f991f 929 else
cfbf07e2 930 WARN_ON(folio_get_private(folio) != eb);
760f991f
QW
931 return 0;
932 }
933
934 /* Already mapped, just free prealloc */
cfbf07e2 935 if (folio_test_private(folio)) {
760f991f
QW
936 btrfs_free_subpage(prealloc);
937 return 0;
938 }
939
940 if (prealloc)
941 /* Has preallocated memory for subpage */
cfbf07e2 942 folio_attach_private(folio, prealloc);
d1b89bc0 943 else
760f991f 944 /* Do new allocation to attach subpage */
55151ea9 945 ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
760f991f 946 return ret;
d1310b2e
CM
947}
948
32443de3 949int set_page_extent_mapped(struct page *page)
d1310b2e 950{
dfba9f47
MWO
951 return set_folio_extent_mapped(page_folio(page));
952}
953
954int set_folio_extent_mapped(struct folio *folio)
955{
32443de3
QW
956 struct btrfs_fs_info *fs_info;
957
dfba9f47 958 ASSERT(folio->mapping);
32443de3 959
cfbf07e2 960 if (folio_test_private(folio))
32443de3
QW
961 return 0;
962
b33d2e53 963 fs_info = folio_to_fs_info(folio);
32443de3 964
dfba9f47 965 if (btrfs_is_subpage(fs_info, folio->mapping))
55151ea9 966 return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
32443de3 967
cfbf07e2 968 folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
32443de3
QW
969 return 0;
970}
971
972void clear_page_extent_mapped(struct page *page)
973{
cfbf07e2 974 struct folio *folio = page_folio(page);
32443de3
QW
975 struct btrfs_fs_info *fs_info;
976
977 ASSERT(page->mapping);
978
cfbf07e2 979 if (!folio_test_private(folio))
32443de3
QW
980 return;
981
b33d2e53 982 fs_info = page_to_fs_info(page);
13df3775 983 if (btrfs_is_subpage(fs_info, page->mapping))
55151ea9 984 return btrfs_detach_subpage(fs_info, folio);
32443de3 985
cfbf07e2 986 folio_detach_private(folio);
d1310b2e
CM
987}
988
8bab0a30 989static struct extent_map *__get_extent_map(struct inode *inode, struct page *page,
1a5ee1e6 990 u64 start, u64 len, struct extent_map **em_cached)
125bac01
MX
991{
992 struct extent_map *em;
993
970ea374
DS
994 ASSERT(em_cached);
995
996 if (*em_cached) {
125bac01 997 em = *em_cached;
cbc0e928 998 if (extent_map_in_tree(em) && start >= em->start &&
125bac01 999 start < extent_map_end(em)) {
490b54d6 1000 refcount_inc(&em->refs);
125bac01
MX
1001 return em;
1002 }
1003
1004 free_extent_map(em);
1005 *em_cached = NULL;
1006 }
1007
8bab0a30 1008 em = btrfs_get_extent(BTRFS_I(inode), page, start, len);
970ea374 1009 if (!IS_ERR(em)) {
125bac01 1010 BUG_ON(*em_cached);
490b54d6 1011 refcount_inc(&em->refs);
125bac01
MX
1012 *em_cached = em;
1013 }
1014 return em;
1015}
d1310b2e
CM
1016/*
1017 * basic readpage implementation. Locked extent state structs are inserted
1018 * into the tree that are removed when the IO is done (by the end_io
1019 * handlers)
79787eaa 1020 * XXX JDM: This needs looking at to ensure proper page locking
baf863b9 1021 * return 0 on success, otherwise return error
d1310b2e 1022 */
7aab8b32 1023static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
c000bc04 1024 struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
d1310b2e
CM
1025{
1026 struct inode *inode = page->mapping->host;
41044b41 1027 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
4eee4fa4 1028 u64 start = page_offset(page);
8eec8296 1029 const u64 end = start + PAGE_SIZE - 1;
d1310b2e
CM
1030 u64 cur = start;
1031 u64 extent_offset;
1032 u64 last_byte = i_size_read(inode);
1033 u64 block_start;
d1310b2e 1034 struct extent_map *em;
baf863b9 1035 int ret = 0;
306e16ce 1036 size_t pg_offset = 0;
d1310b2e 1037 size_t iosize;
4e00422e 1038 size_t blocksize = fs_info->sectorsize;
f657a31c 1039 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
ae6957eb 1040
32443de3
QW
1041 ret = set_page_extent_mapped(page);
1042 if (ret < 0) {
570eb97b 1043 unlock_extent(tree, start, end, NULL);
92082d40 1044 unlock_page(page);
55173337 1045 return ret;
32443de3 1046 }
d1310b2e 1047
09cbfeaf 1048 if (page->index == last_byte >> PAGE_SHIFT) {
7073017a 1049 size_t zero_offset = offset_in_page(last_byte);
c8b97818
CM
1050
1051 if (zero_offset) {
09cbfeaf 1052 iosize = PAGE_SIZE - zero_offset;
d048b9c2 1053 memzero_page(page, zero_offset, iosize);
c8b97818
CM
1054 }
1055 }
a700ca5e 1056 bio_ctrl->end_io_func = end_bbio_data_read;
92082d40 1057 begin_page_read(fs_info, page);
d1310b2e 1058 while (cur <= end) {
a140453b 1059 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
005efedf 1060 bool force_bio_submit = false;
0c64c33c 1061 u64 disk_bytenr;
c8f2f24b 1062
6a404910 1063 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
d1310b2e 1064 if (cur >= last_byte) {
09cbfeaf 1065 iosize = PAGE_SIZE - pg_offset;
d048b9c2 1066 memzero_page(page, pg_offset, iosize);
2c8f5e8c 1067 unlock_extent(tree, cur, cur + iosize - 1, NULL);
92082d40 1068 end_page_read(page, true, cur, iosize);
d1310b2e
CM
1069 break;
1070 }
8bab0a30 1071 em = __get_extent_map(inode, page, cur, end - cur + 1, em_cached);
c0347550 1072 if (IS_ERR(em)) {
570eb97b 1073 unlock_extent(tree, cur, end, NULL);
92082d40 1074 end_page_read(page, false, cur, end + 1 - cur);
55173337 1075 return PTR_ERR(em);
d1310b2e 1076 }
d1310b2e
CM
1077 extent_offset = cur - em->start;
1078 BUG_ON(extent_map_end(em) <= cur);
1079 BUG_ON(end < cur);
1080
f86f7a75 1081 compress_type = extent_map_compression(em);
c8b97818 1082
d1310b2e 1083 iosize = min(extent_map_end(em) - cur, end - cur + 1);
fda2832f 1084 iosize = ALIGN(iosize, blocksize);
a140453b 1085 if (compress_type != BTRFS_COMPRESS_NONE)
0c64c33c 1086 disk_bytenr = em->block_start;
949b3273 1087 else
0c64c33c 1088 disk_bytenr = em->block_start + extent_offset;
d1310b2e 1089 block_start = em->block_start;
f86f7a75 1090 if (em->flags & EXTENT_FLAG_PREALLOC)
d899e052 1091 block_start = EXTENT_MAP_HOLE;
005efedf
FM
1092
1093 /*
1094 * If we have a file range that points to a compressed extent
260db43c 1095 * and it's followed by a consecutive file range that points
005efedf
FM
1096 * to the same compressed extent (possibly with a different
1097 * offset and/or length, so it either points to the whole extent
1098 * or only part of it), we must make sure we do not submit a
1099 * single bio to populate the pages for the 2 ranges because
1100 * this makes the compressed extent read zero out the pages
1101 * belonging to the 2nd range. Imagine the following scenario:
1102 *
1103 * File layout
1104 * [0 - 8K] [8K - 24K]
1105 * | |
1106 * | |
1107 * points to extent X, points to extent X,
1108 * offset 4K, length of 8K offset 0, length 16K
1109 *
1110 * [extent X, compressed length = 4K uncompressed length = 16K]
1111 *
1112 * If the bio to read the compressed extent covers both ranges,
1113 * it will decompress extent X into the pages belonging to the
1114 * first range and then it will stop, zeroing out the remaining
1115 * pages that belong to the other range that points to extent X.
1116 * So here we make sure we submit 2 bios, one for the first
1117 * range and another one for the third range. Both will target
1118 * the same physical extent from disk, but we can't currently
1119 * make the compressed bio endio callback populate the pages
1120 * for both ranges because each compressed bio is tightly
1121 * coupled with a single extent map, and each range can have
1122 * an extent map with a different offset value relative to the
1123 * uncompressed data of our extent and different lengths. This
1124 * is a corner case so we prioritize correctness over
1125 * non-optimal behavior (submitting 2 bios for the same extent).
1126 */
f86f7a75 1127 if (compress_type != BTRFS_COMPRESS_NONE &&
005efedf 1128 prev_em_start && *prev_em_start != (u64)-1 &&
8e928218 1129 *prev_em_start != em->start)
005efedf
FM
1130 force_bio_submit = true;
1131
1132 if (prev_em_start)
8e928218 1133 *prev_em_start = em->start;
005efedf 1134
d1310b2e
CM
1135 free_extent_map(em);
1136 em = NULL;
1137
1138 /* we've found a hole, just zero and go on */
1139 if (block_start == EXTENT_MAP_HOLE) {
d048b9c2 1140 memzero_page(page, pg_offset, iosize);
d1310b2e 1141
2c8f5e8c 1142 unlock_extent(tree, cur, cur + iosize - 1, NULL);
92082d40 1143 end_page_read(page, true, cur, iosize);
d1310b2e 1144 cur = cur + iosize;
306e16ce 1145 pg_offset += iosize;
d1310b2e
CM
1146 continue;
1147 }
1148 /* the get_extent function already copied into the page */
70dec807 1149 if (block_start == EXTENT_MAP_INLINE) {
570eb97b 1150 unlock_extent(tree, cur, cur + iosize - 1, NULL);
52b029f4 1151 end_page_read(page, true, cur, iosize);
70dec807 1152 cur = cur + iosize;
306e16ce 1153 pg_offset += iosize;
70dec807
CM
1154 continue;
1155 }
d1310b2e 1156
f8ed4852 1157 if (bio_ctrl->compress_type != compress_type) {
c9bc621f 1158 submit_one_bio(bio_ctrl);
f8ed4852
CH
1159 bio_ctrl->compress_type = compress_type;
1160 }
c9bc621f 1161
eb8d0c6d
CH
1162 if (force_bio_submit)
1163 submit_one_bio(bio_ctrl);
55173337
CH
1164 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1165 pg_offset);
d1310b2e 1166 cur = cur + iosize;
306e16ce 1167 pg_offset += iosize;
d1310b2e 1168 }
55173337
CH
1169
1170 return 0;
d1310b2e
CM
1171}
1172
fdaf9a58 1173int btrfs_read_folio(struct file *file, struct folio *folio)
7aab8b32 1174{
fdaf9a58 1175 struct page *page = &folio->page;
c8293894 1176 struct btrfs_inode *inode = page_to_inode(page);
7aab8b32
CH
1177 u64 start = page_offset(page);
1178 u64 end = start + PAGE_SIZE - 1;
c000bc04 1179 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
970ea374 1180 struct extent_map *em_cached = NULL;
7aab8b32
CH
1181 int ret;
1182
1183 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1184
970ea374
DS
1185 ret = btrfs_do_readpage(page, &em_cached, &bio_ctrl, NULL);
1186 free_extent_map(em_cached);
1187
7aab8b32
CH
1188 /*
1189 * If btrfs_do_readpage() failed we will want to submit the assembled
1190 * bio to do the cleanup.
1191 */
722c82ac 1192 submit_one_bio(&bio_ctrl);
7aab8b32
CH
1193 return ret;
1194}
1195
b6660e80 1196static inline void contiguous_readpages(struct page *pages[], int nr_pages,
390ed29b
QW
1197 u64 start, u64 end,
1198 struct extent_map **em_cached,
1199 struct btrfs_bio_ctrl *bio_ctrl,
1200 u64 *prev_em_start)
9974090b 1201{
c8293894 1202 struct btrfs_inode *inode = page_to_inode(pages[0]);
9974090b
MX
1203 int index;
1204
970ea374
DS
1205 ASSERT(em_cached);
1206
b272ae22 1207 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
9974090b
MX
1208
1209 for (index = 0; index < nr_pages; index++) {
390ed29b 1210 btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
c000bc04 1211 prev_em_start);
09cbfeaf 1212 put_page(pages[index]);
9974090b
MX
1213 }
1214}
1215
d1310b2e 1216/*
40f76580
CM
1217 * helper for __extent_writepage, doing all of the delayed allocation setup.
1218 *
5eaad97a 1219 * This returns 1 if btrfs_run_delalloc_range function did all the work required
40f76580
CM
1220 * to write the page (copy into inline extent). In this case the IO has
1221 * been started and the page is already unlocked.
1222 *
1223 * This returns 0 if all went well (page still locked)
1224 * This returns < 0 if there were errors (page still locked)
d1310b2e 1225 */
cd4c0bf9 1226static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
83f1b680 1227 struct page *page, struct writeback_control *wbc)
40f76580 1228{
2c73162d
CH
1229 const u64 page_start = page_offset(page);
1230 const u64 page_end = page_start + PAGE_SIZE - 1;
1231 u64 delalloc_start = page_start;
1232 u64 delalloc_end = page_end;
40f76580 1233 u64 delalloc_to_write = 0;
c56cbe90 1234 int ret = 0;
40f76580 1235
2749f7ef 1236 while (delalloc_start < page_end) {
2c73162d
CH
1237 delalloc_end = page_end;
1238 if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1239 &delalloc_start, &delalloc_end)) {
40f76580
CM
1240 delalloc_start = delalloc_end + 1;
1241 continue;
1242 }
c56cbe90 1243
cd4c0bf9 1244 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
c56cbe90
CH
1245 delalloc_end, wbc);
1246 if (ret < 0)
7361b4ae 1247 return ret;
2b2553f1 1248
40f76580
CM
1249 delalloc_start = delalloc_end + 1;
1250 }
2c73162d
CH
1251
1252 /*
1253 * delalloc_end is already one less than the total length, so
1254 * we don't subtract one from PAGE_SIZE
1255 */
1256 delalloc_to_write +=
1257 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
c56cbe90
CH
1258
1259 /*
1260 * If btrfs_run_dealloc_range() already started I/O and unlocked
1261 * the pages, we just need to account for them here.
1262 */
1263 if (ret == 1) {
1264 wbc->nr_to_write -= delalloc_to_write;
1265 return 1;
1266 }
1267
40f76580
CM
1268 if (wbc->nr_to_write < delalloc_to_write) {
1269 int thresh = 8192;
1270
1271 if (delalloc_to_write < thresh * 2)
1272 thresh = delalloc_to_write;
1273 wbc->nr_to_write = min_t(u64, delalloc_to_write,
1274 thresh);
1275 }
1276
b69d1ee9 1277 return 0;
40f76580
CM
1278}
1279
c5ef5c6c
QW
1280/*
1281 * Find the first byte we need to write.
1282 *
1283 * For subpage, one page can contain several sectors, and
1284 * __extent_writepage_io() will just grab all extent maps in the page
1285 * range and try to submit all non-inline/non-compressed extents.
1286 *
1287 * This is a big problem for subpage, we shouldn't re-submit already written
1288 * data at all.
1289 * This function will lookup subpage dirty bit to find which range we really
1290 * need to submit.
1291 *
1292 * Return the next dirty range in [@start, @end).
1293 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1294 */
1295static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1296 struct page *page, u64 *start, u64 *end)
1297{
cfbf07e2
QW
1298 struct folio *folio = page_folio(page);
1299 struct btrfs_subpage *subpage = folio_get_private(folio);
72a69cd0 1300 struct btrfs_subpage_info *spi = fs_info->subpage_info;
c5ef5c6c
QW
1301 u64 orig_start = *start;
1302 /* Declare as unsigned long so we can use bitmap ops */
c5ef5c6c 1303 unsigned long flags;
72a69cd0 1304 int range_start_bit;
c5ef5c6c
QW
1305 int range_end_bit;
1306
1307 /*
1308 * For regular sector size == page size case, since one page only
1309 * contains one sector, we return the page offset directly.
1310 */
13df3775 1311 if (!btrfs_is_subpage(fs_info, page->mapping)) {
c5ef5c6c
QW
1312 *start = page_offset(page);
1313 *end = page_offset(page) + PAGE_SIZE;
1314 return;
1315 }
1316
72a69cd0
QW
1317 range_start_bit = spi->dirty_offset +
1318 (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1319
c5ef5c6c
QW
1320 /* We should have the page locked, but just in case */
1321 spin_lock_irqsave(&subpage->lock, flags);
72a69cd0
QW
1322 bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1323 spi->dirty_offset + spi->bitmap_nr_bits);
c5ef5c6c
QW
1324 spin_unlock_irqrestore(&subpage->lock, flags);
1325
72a69cd0
QW
1326 range_start_bit -= spi->dirty_offset;
1327 range_end_bit -= spi->dirty_offset;
1328
c5ef5c6c
QW
1329 *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1330 *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1331}
1332
40f76580
CM
1333/*
1334 * helper for __extent_writepage. This calls the writepage start hooks,
1335 * and does the loop to map the page into extents and bios.
1336 *
1337 * We return 1 if the IO is started and the page is unlocked,
1338 * 0 if all went well (page still locked)
1339 * < 0 if there were errors (page still locked)
1340 */
d4580fe2 1341static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
40f76580 1342 struct page *page,
ee5f017d 1343 struct btrfs_bio_ctrl *bio_ctrl,
40f76580 1344 loff_t i_size,
57e5ffeb 1345 int *nr_ret)
d1310b2e 1346{
6bc5636a 1347 struct btrfs_fs_info *fs_info = inode->root->fs_info;
a129ffb8
QW
1348 u64 cur = page_offset(page);
1349 u64 end = cur + PAGE_SIZE - 1;
d1310b2e 1350 u64 extent_offset;
d1310b2e 1351 u64 block_start;
d1310b2e 1352 struct extent_map *em;
40f76580
CM
1353 int ret = 0;
1354 int nr = 0;
c8b97818 1355
a129ffb8 1356 ret = btrfs_writepage_cow_fixup(page);
d75855b4
NB
1357 if (ret) {
1358 /* Fixup worker will requeue */
72b505dc 1359 redirty_page_for_writepage(bio_ctrl->wbc, page);
d75855b4
NB
1360 unlock_page(page);
1361 return 1;
247e743c
CM
1362 }
1363
a700ca5e 1364 bio_ctrl->end_io_func = end_bbio_data_write;
d1310b2e 1365 while (cur <= end) {
6648cedd 1366 u32 len = end - cur + 1;
0c64c33c 1367 u64 disk_bytenr;
40f76580 1368 u64 em_end;
c5ef5c6c
QW
1369 u64 dirty_range_start = cur;
1370 u64 dirty_range_end;
6bc5636a 1371 u32 iosize;
58409edd 1372
40f76580 1373 if (cur >= i_size) {
6648cedd
CH
1374 btrfs_mark_ordered_io_finished(inode, page, cur, len,
1375 true);
cc1d0d93
QW
1376 /*
1377 * This range is beyond i_size, thus we don't need to
1378 * bother writing back.
1379 * But we still need to clear the dirty subpage bit, or
1380 * the next time the page gets dirtied, we will try to
1381 * writeback the sectors with subpage dirty bits,
1382 * causing writeback without ordered extent.
1383 */
55151ea9 1384 btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, len);
d1310b2e
CM
1385 break;
1386 }
c5ef5c6c
QW
1387
1388 find_next_dirty_byte(fs_info, page, &dirty_range_start,
1389 &dirty_range_end);
1390 if (cur < dirty_range_start) {
1391 cur = dirty_range_start;
1392 continue;
1393 }
1394
8bab0a30 1395 em = btrfs_get_extent(inode, NULL, cur, len);
c0347550 1396 if (IS_ERR(em)) {
61391d56 1397 ret = PTR_ERR_OR_ZERO(em);
5380311f 1398 goto out_error;
d1310b2e
CM
1399 }
1400
1401 extent_offset = cur - em->start;
40f76580 1402 em_end = extent_map_end(em);
6bc5636a
QW
1403 ASSERT(cur <= em_end);
1404 ASSERT(cur < end);
1405 ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1406 ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
f22b5dcb 1407
d1310b2e 1408 block_start = em->block_start;
6bc5636a
QW
1409 disk_bytenr = em->block_start + extent_offset;
1410
f86f7a75 1411 ASSERT(!extent_map_is_compressed(em));
f22b5dcb
CH
1412 ASSERT(block_start != EXTENT_MAP_HOLE);
1413 ASSERT(block_start != EXTENT_MAP_INLINE);
1414
c5ef5c6c
QW
1415 /*
1416 * Note that em_end from extent_map_end() and dirty_range_end from
1417 * find_next_dirty_byte() are all exclusive
1418 */
1419 iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
d1310b2e
CM
1420 free_extent_map(em);
1421 em = NULL;
1422
d2a91064 1423 btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
58409edd 1424 if (!PageWriteback(page)) {
d4580fe2 1425 btrfs_err(inode->root->fs_info,
58409edd
DS
1426 "page %lu not writeback, cur %llu end %llu",
1427 page->index, cur, end);
d1310b2e 1428 }
7f3c74fb 1429
c5ef5c6c
QW
1430 /*
1431 * Although the PageDirty bit is cleared before entering this
1432 * function, subpage dirty bit is not cleared.
1433 * So clear subpage dirty bit here so next time we won't submit
1434 * page for range already written to disk.
1435 */
55151ea9 1436 btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
c5ef5c6c 1437
55173337
CH
1438 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1439 cur - page_offset(page));
6bc5636a 1440 cur += iosize;
d1310b2e
CM
1441 nr++;
1442 }
5380311f 1443
55151ea9 1444 btrfs_folio_assert_not_dirty(fs_info, page_folio(page));
5380311f
CH
1445 *nr_ret = nr;
1446 return 0;
1447
1448out_error:
cc1d0d93
QW
1449 /*
1450 * If we finish without problem, we should not only clear page dirty,
1451 * but also empty subpage dirty bits
1452 */
40f76580 1453 *nr_ret = nr;
40f76580
CM
1454 return ret;
1455}
1456
1457/*
1458 * the writepage semantics are similar to regular writepage. extent
1459 * records are inserted to lock ranges in the tree, and as dirty areas
1460 * are found, they are marked writeback. Then the lock bits are removed
1461 * and the end_io handler clears the writeback ranges
3065976b
QW
1462 *
1463 * Return 0 if everything goes well.
1464 * Return <0 for error.
40f76580 1465 */
72b505dc 1466static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
40f76580 1467{
8e1dec8e 1468 struct folio *folio = page_folio(page);
40f76580 1469 struct inode *inode = page->mapping->host;
cf3075fb 1470 const u64 page_start = page_offset(page);
40f76580
CM
1471 int ret;
1472 int nr = 0;
eb70d222 1473 size_t pg_offset;
40f76580 1474 loff_t i_size = i_size_read(inode);
09cbfeaf 1475 unsigned long end_index = i_size >> PAGE_SHIFT;
40f76580 1476
72b505dc 1477 trace___extent_writepage(page, inode, bio_ctrl->wbc);
40f76580
CM
1478
1479 WARN_ON(!PageLocked(page));
1480
7073017a 1481 pg_offset = offset_in_page(i_size);
40f76580
CM
1482 if (page->index > end_index ||
1483 (page->index == end_index && !pg_offset)) {
8e1dec8e
MWO
1484 folio_invalidate(folio, 0, folio_size(folio));
1485 folio_unlock(folio);
40f76580
CM
1486 return 0;
1487 }
1488
21a8935e 1489 if (page->index == end_index)
d048b9c2 1490 memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
40f76580 1491
32443de3 1492 ret = set_page_extent_mapped(page);
2b2553f1 1493 if (ret < 0)
32443de3 1494 goto done;
40f76580 1495
eb34dcea
CH
1496 ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1497 if (ret == 1)
1498 return 0;
1499 if (ret)
1500 goto done;
40f76580 1501
72b505dc 1502 ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
40f76580 1503 if (ret == 1)
169d2c87 1504 return 0;
40f76580 1505
9ecdbee8
CH
1506 bio_ctrl->wbc->nr_to_write--;
1507
d1310b2e
CM
1508done:
1509 if (nr == 0) {
1510 /* make sure the mapping tag for page dirty gets cleared */
1511 set_page_writeback(page);
1512 end_page_writeback(page);
1513 }
9783e4de
CH
1514 if (ret) {
1515 btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1516 PAGE_SIZE, !ret);
9783e4de
CH
1517 mapping_set_error(page->mapping, ret);
1518 }
eb34dcea 1519 unlock_page(page);
3065976b 1520 ASSERT(ret <= 0);
40f76580 1521 return ret;
d1310b2e
CM
1522}
1523
fd8b2b61 1524void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
0b32f4bb 1525{
74316201
N
1526 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1527 TASK_UNINTERRUPTIBLE);
0b32f4bb
JB
1528}
1529
2e3c2513 1530/*
a3efb2f0 1531 * Lock extent buffer status and pages for writeback.
2e3c2513 1532 *
9fdd1601
CH
1533 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1534 * extent buffer is not dirty)
1535 * Return %true is the extent buffer is submitted to bio.
2e3c2513 1536 */
9fdd1601 1537static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
50b21d7a 1538 struct writeback_control *wbc)
0b32f4bb 1539{
9df76fb5 1540 struct btrfs_fs_info *fs_info = eb->fs_info;
9fdd1601 1541 bool ret = false;
0b32f4bb 1542
50b21d7a
CH
1543 btrfs_tree_lock(eb);
1544 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
0b32f4bb 1545 btrfs_tree_unlock(eb);
50b21d7a 1546 if (wbc->sync_mode != WB_SYNC_ALL)
9fdd1601 1547 return false;
50b21d7a
CH
1548 wait_on_extent_buffer_writeback(eb);
1549 btrfs_tree_lock(eb);
0b32f4bb
JB
1550 }
1551
51561ffe
JB
1552 /*
1553 * We need to do this to prevent races in people who check if the eb is
1554 * under IO since we can end up having no IO bits set for a short period
1555 * of time.
1556 */
1557 spin_lock(&eb->refs_lock);
0b32f4bb
JB
1558 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1559 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
51561ffe 1560 spin_unlock(&eb->refs_lock);
0b32f4bb 1561 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
104b4e51
NB
1562 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1563 -eb->len,
1564 fs_info->dirty_metadata_batch);
9fdd1601 1565 ret = true;
51561ffe
JB
1566 } else {
1567 spin_unlock(&eb->refs_lock);
0b32f4bb 1568 }
0b32f4bb 1569 btrfs_tree_unlock(eb);
2e3c2513 1570 return ret;
0b32f4bb
JB
1571}
1572
cd88a4fd 1573static void set_btree_ioerr(struct extent_buffer *eb)
656f30db 1574{
5a2c6075 1575 struct btrfs_fs_info *fs_info = eb->fs_info;
656f30db 1576
cd88a4fd 1577 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
656f30db 1578
c2e39305
JB
1579 /*
1580 * A read may stumble upon this buffer later, make sure that it gets an
1581 * error and knows there was an error.
1582 */
1583 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1584
68b85589
JB
1585 /*
1586 * We need to set the mapping with the io error as well because a write
1587 * error will flip the file system readonly, and then syncfs() will
1588 * return a 0 because we are readonly if we don't modify the err seq for
1589 * the superblock.
1590 */
cd88a4fd 1591 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
68b85589 1592
656f30db
FM
1593 /*
1594 * If writeback for a btree extent that doesn't belong to a log tree
1595 * failed, increment the counter transaction->eb_write_errors.
1596 * We do this because while the transaction is running and before it's
1597 * committing (when we call filemap_fdata[write|wait]_range against
1598 * the btree inode), we might have
1599 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1600 * returns an error or an error happens during writeback, when we're
1601 * committing the transaction we wouldn't know about it, since the pages
1602 * can be no longer dirty nor marked anymore for writeback (if a
1603 * subsequent modification to the extent buffer didn't happen before the
1604 * transaction commit), which makes filemap_fdata[write|wait]_range not
bc00965d 1605 * able to find the pages which contain errors at transaction
656f30db
FM
1606 * commit time. So if this happens we must abort the transaction,
1607 * otherwise we commit a super block with btree roots that point to
1608 * btree nodes/leafs whose content on disk is invalid - either garbage
1609 * or the content of some node/leaf from a past generation that got
1610 * cowed or deleted and is no longer valid.
1611 *
1612 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1613 * not be enough - we need to distinguish between log tree extents vs
1614 * non-log tree extents, and the next filemap_fdatawait_range() call
1615 * will catch and clear such errors in the mapping - and that call might
1616 * be from a log sync and not from a transaction commit. Also, checking
1617 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1618 * not done and would not be reliable - the eb might have been released
1619 * from memory and reading it back again means that flag would not be
1620 * set (since it's a runtime flag, not persisted on disk).
1621 *
1622 * Using the flags below in the btree inode also makes us achieve the
1623 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1624 * writeback for all dirty pages and before filemap_fdatawait_range()
1625 * is called, the writeback for all dirty pages had already finished
1626 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1627 * filemap_fdatawait_range() would return success, as it could not know
1628 * that writeback errors happened (the pages were no longer tagged for
1629 * writeback).
1630 */
1631 switch (eb->log_index) {
1632 case -1:
5a2c6075 1633 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
656f30db
FM
1634 break;
1635 case 0:
5a2c6075 1636 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
656f30db
FM
1637 break;
1638 case 1:
5a2c6075 1639 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
656f30db
FM
1640 break;
1641 default:
1642 BUG(); /* unexpected, logic error */
1643 }
1644}
1645
2f3186d8
QW
1646/*
1647 * The endio specific version which won't touch any unsafe spinlock in endio
1648 * context.
1649 */
1650static struct extent_buffer *find_extent_buffer_nolock(
1651 struct btrfs_fs_info *fs_info, u64 start)
1652{
1653 struct extent_buffer *eb;
1654
1655 rcu_read_lock();
01cd3909
DS
1656 eb = radix_tree_lookup(&fs_info->buffer_radix,
1657 start >> fs_info->sectorsize_bits);
2f3186d8
QW
1658 if (eb && atomic_inc_not_zero(&eb->refs)) {
1659 rcu_read_unlock();
1660 return eb;
1661 }
1662 rcu_read_unlock();
1663 return NULL;
1664}
1665
a700ca5e 1666static void end_bbio_meta_write(struct btrfs_bio *bbio)
2f3186d8 1667{
cd88a4fd
CH
1668 struct extent_buffer *eb = bbio->private;
1669 struct btrfs_fs_info *fs_info = eb->fs_info;
1670 bool uptodate = !bbio->bio.bi_status;
a700ca5e 1671 struct folio_iter fi;
cd88a4fd 1672 u32 bio_offset = 0;
2f3186d8 1673
cd88a4fd
CH
1674 if (!uptodate)
1675 set_btree_ioerr(eb);
fa04c165 1676
a700ca5e 1677 bio_for_each_folio_all(fi, &bbio->bio) {
cd88a4fd 1678 u64 start = eb->start + bio_offset;
a700ca5e
QW
1679 struct folio *folio = fi.folio;
1680 u32 len = fi.length;
2f3186d8 1681
a700ca5e 1682 btrfs_folio_clear_writeback(fs_info, folio, start, len);
cd88a4fd 1683 bio_offset += len;
2f3186d8 1684 }
0b32f4bb 1685
cd88a4fd
CH
1686 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1687 smp_mb__after_atomic();
1688 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
0b32f4bb 1689
cd88a4fd 1690 bio_put(&bbio->bio);
0b32f4bb
JB
1691}
1692
fa04c165
QW
1693static void prepare_eb_write(struct extent_buffer *eb)
1694{
1695 u32 nritems;
1696 unsigned long start;
1697 unsigned long end;
1698
1699 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
fa04c165
QW
1700
1701 /* Set btree blocks beyond nritems with 0 to avoid stale content */
1702 nritems = btrfs_header_nritems(eb);
1703 if (btrfs_header_level(eb) > 0) {
e23efd8e 1704 end = btrfs_node_key_ptr_offset(eb, nritems);
fa04c165
QW
1705 memzero_extent_buffer(eb, end, eb->len - end);
1706 } else {
1707 /*
1708 * Leaf:
1709 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1710 */
42c9419a 1711 start = btrfs_item_nr_offset(eb, nritems);
8009adf3 1712 end = btrfs_item_nr_offset(eb, 0);
3a3178c7
JB
1713 if (nritems == 0)
1714 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1715 else
1716 end += btrfs_item_offset(eb, nritems - 1);
fa04c165
QW
1717 memzero_extent_buffer(eb, start, end - start);
1718 }
1719}
1720
55173337 1721static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
50b21d7a 1722 struct writeback_control *wbc)
0b32f4bb 1723{
46672a44 1724 struct btrfs_fs_info *fs_info = eb->fs_info;
b51e6b4b 1725 struct btrfs_bio *bbio;
0b32f4bb 1726
fa04c165 1727 prepare_eb_write(eb);
35b6ddfa 1728
b51e6b4b
CH
1729 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1730 REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
a700ca5e 1731 eb->fs_info, end_bbio_meta_write, eb);
b51e6b4b 1732 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
46672a44 1733 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
b51e6b4b
CH
1734 wbc_init_bio(wbc, &bbio->bio);
1735 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1736 bbio->file_offset = eb->start;
46672a44 1737 if (fs_info->nodesize < PAGE_SIZE) {
55151ea9
QW
1738 struct folio *folio = eb->folios[0];
1739 bool ret;
0b32f4bb 1740
55151ea9
QW
1741 folio_lock(folio);
1742 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1743 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
46672a44 1744 eb->len)) {
55151ea9 1745 folio_clear_dirty_for_io(folio);
46672a44
CH
1746 wbc->nr_to_write--;
1747 }
55151ea9
QW
1748 ret = bio_add_folio(&bbio->bio, folio, eb->len,
1749 eb->start - folio_pos(folio));
1750 ASSERT(ret);
1751 wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1752 folio_unlock(folio);
46672a44 1753 } else {
13df3775
QW
1754 int num_folios = num_extent_folios(eb);
1755
1756 for (int i = 0; i < num_folios; i++) {
1757 struct folio *folio = eb->folios[i];
1758 bool ret;
1759
1760 folio_lock(folio);
1761 folio_clear_dirty_for_io(folio);
1762 folio_start_writeback(folio);
84cda1a6 1763 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
13df3775
QW
1764 ASSERT(ret);
1765 wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
84cda1a6 1766 eb->folio_size);
13df3775
QW
1767 wbc->nr_to_write -= folio_nr_pages(folio);
1768 folio_unlock(folio);
46672a44 1769 }
0b32f4bb 1770 }
b51e6b4b 1771 btrfs_submit_bio(bbio, 0);
0b32f4bb
JB
1772}
1773
c4aec299
QW
1774/*
1775 * Submit one subpage btree page.
1776 *
1777 * The main difference to submit_eb_page() is:
1778 * - Page locking
1779 * For subpage, we don't rely on page locking at all.
1780 *
1781 * - Flush write bio
1782 * We only flush bio if we may be unable to fit current extent buffers into
1783 * current bio.
1784 *
1785 * Return >=0 for the number of submitted extent buffers.
1786 * Return <0 for fatal error.
1787 */
50b21d7a 1788static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
c4aec299 1789{
b33d2e53 1790 struct btrfs_fs_info *fs_info = page_to_fs_info(page);
cfbf07e2 1791 struct folio *folio = page_folio(page);
c4aec299
QW
1792 int submitted = 0;
1793 u64 page_start = page_offset(page);
1794 int bit_start = 0;
c4aec299 1795 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
c4aec299
QW
1796
1797 /* Lock and write each dirty extent buffers in the range */
72a69cd0 1798 while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
cfbf07e2 1799 struct btrfs_subpage *subpage = folio_get_private(folio);
c4aec299
QW
1800 struct extent_buffer *eb;
1801 unsigned long flags;
1802 u64 start;
1803
1804 /*
1805 * Take private lock to ensure the subpage won't be detached
1806 * in the meantime.
1807 */
600f111e 1808 spin_lock(&page->mapping->i_private_lock);
cfbf07e2 1809 if (!folio_test_private(folio)) {
600f111e 1810 spin_unlock(&page->mapping->i_private_lock);
c4aec299
QW
1811 break;
1812 }
1813 spin_lock_irqsave(&subpage->lock, flags);
72a69cd0
QW
1814 if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1815 subpage->bitmaps)) {
c4aec299 1816 spin_unlock_irqrestore(&subpage->lock, flags);
600f111e 1817 spin_unlock(&page->mapping->i_private_lock);
c4aec299
QW
1818 bit_start++;
1819 continue;
1820 }
1821
1822 start = page_start + bit_start * fs_info->sectorsize;
1823 bit_start += sectors_per_node;
1824
1825 /*
1826 * Here we just want to grab the eb without touching extra
1827 * spin locks, so call find_extent_buffer_nolock().
1828 */
1829 eb = find_extent_buffer_nolock(fs_info, start);
1830 spin_unlock_irqrestore(&subpage->lock, flags);
600f111e 1831 spin_unlock(&page->mapping->i_private_lock);
c4aec299
QW
1832
1833 /*
1834 * The eb has already reached 0 refs thus find_extent_buffer()
1835 * doesn't return it. We don't need to write back such eb
1836 * anyway.
1837 */
1838 if (!eb)
1839 continue;
1840
50b21d7a 1841 if (lock_extent_buffer_for_io(eb, wbc)) {
46672a44 1842 write_one_eb(eb, wbc);
9fdd1601 1843 submitted++;
c4aec299 1844 }
c4aec299 1845 free_extent_buffer(eb);
c4aec299
QW
1846 }
1847 return submitted;
c4aec299
QW
1848}
1849
f91e0d0c
QW
1850/*
1851 * Submit all page(s) of one extent buffer.
1852 *
1853 * @page: the page of one extent buffer
1854 * @eb_context: to determine if we need to submit this page, if current page
1855 * belongs to this eb, we don't need to submit
1856 *
1857 * The caller should pass each page in their bytenr order, and here we use
1858 * @eb_context to determine if we have submitted pages of one extent buffer.
1859 *
1860 * If we have, we just skip until we hit a new page that doesn't belong to
1861 * current @eb_context.
1862 *
1863 * If not, we submit all the page(s) of the extent buffer.
1864 *
1865 * Return >0 if we have submitted the extent buffer successfully.
1866 * Return 0 if we don't need to submit the page, as it's already submitted by
1867 * previous call.
1868 * Return <0 for fatal error.
1869 */
861093ef 1870static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
f91e0d0c 1871{
861093ef 1872 struct writeback_control *wbc = ctx->wbc;
f91e0d0c 1873 struct address_space *mapping = page->mapping;
cfbf07e2 1874 struct folio *folio = page_folio(page);
f91e0d0c
QW
1875 struct extent_buffer *eb;
1876 int ret;
1877
cfbf07e2 1878 if (!folio_test_private(folio))
f91e0d0c
QW
1879 return 0;
1880
b33d2e53 1881 if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
50b21d7a 1882 return submit_eb_subpage(page, wbc);
c4aec299 1883
600f111e 1884 spin_lock(&mapping->i_private_lock);
cfbf07e2 1885 if (!folio_test_private(folio)) {
600f111e 1886 spin_unlock(&mapping->i_private_lock);
f91e0d0c
QW
1887 return 0;
1888 }
1889
cfbf07e2 1890 eb = folio_get_private(folio);
f91e0d0c
QW
1891
1892 /*
1893 * Shouldn't happen and normally this would be a BUG_ON but no point
1894 * crashing the machine for something we can survive anyway.
1895 */
1896 if (WARN_ON(!eb)) {
600f111e 1897 spin_unlock(&mapping->i_private_lock);
f91e0d0c
QW
1898 return 0;
1899 }
1900
861093ef 1901 if (eb == ctx->eb) {
600f111e 1902 spin_unlock(&mapping->i_private_lock);
f91e0d0c
QW
1903 return 0;
1904 }
1905 ret = atomic_inc_not_zero(&eb->refs);
600f111e 1906 spin_unlock(&mapping->i_private_lock);
f91e0d0c
QW
1907 if (!ret)
1908 return 0;
1909
861093ef
NA
1910 ctx->eb = eb;
1911
2ad8c051
NA
1912 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1913 if (ret) {
1914 if (ret == -EBUSY)
0bc09ca1
NA
1915 ret = 0;
1916 free_extent_buffer(eb);
1917 return ret;
1918 }
1919
50b21d7a 1920 if (!lock_extent_buffer_for_io(eb, wbc)) {
f91e0d0c 1921 free_extent_buffer(eb);
50b21d7a 1922 return 0;
f91e0d0c 1923 }
0356ad41 1924 /* Implies write in zoned mode. */
7db94301 1925 if (ctx->zoned_bg) {
0356ad41 1926 /* Mark the last eb in the block group. */
7db94301 1927 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
0356ad41 1928 ctx->zoned_bg->meta_write_pointer += eb->len;
be1a1d7a 1929 }
50b21d7a 1930 write_one_eb(eb, wbc);
f91e0d0c 1931 free_extent_buffer(eb);
f91e0d0c
QW
1932 return 1;
1933}
1934
0b32f4bb
JB
1935int btree_write_cache_pages(struct address_space *mapping,
1936 struct writeback_control *wbc)
1937{
861093ef 1938 struct btrfs_eb_write_context ctx = { .wbc = wbc };
41044b41 1939 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
0b32f4bb
JB
1940 int ret = 0;
1941 int done = 0;
1942 int nr_to_write_done = 0;
51c5cd3b
VMO
1943 struct folio_batch fbatch;
1944 unsigned int nr_folios;
0b32f4bb
JB
1945 pgoff_t index;
1946 pgoff_t end; /* Inclusive */
1947 int scanned = 0;
10bbd235 1948 xa_mark_t tag;
0b32f4bb 1949
51c5cd3b 1950 folio_batch_init(&fbatch);
0b32f4bb
JB
1951 if (wbc->range_cyclic) {
1952 index = mapping->writeback_index; /* Start from prev offset */
1953 end = -1;
556755a8
JB
1954 /*
1955 * Start from the beginning does not need to cycle over the
1956 * range, mark it as scanned.
1957 */
1958 scanned = (index == 0);
0b32f4bb 1959 } else {
09cbfeaf
KS
1960 index = wbc->range_start >> PAGE_SHIFT;
1961 end = wbc->range_end >> PAGE_SHIFT;
0b32f4bb
JB
1962 scanned = 1;
1963 }
1964 if (wbc->sync_mode == WB_SYNC_ALL)
1965 tag = PAGECACHE_TAG_TOWRITE;
1966 else
1967 tag = PAGECACHE_TAG_DIRTY;
0bc09ca1 1968 btrfs_zoned_meta_io_lock(fs_info);
0b32f4bb
JB
1969retry:
1970 if (wbc->sync_mode == WB_SYNC_ALL)
1971 tag_pages_for_writeback(mapping, index, end);
1972 while (!done && !nr_to_write_done && (index <= end) &&
51c5cd3b
VMO
1973 (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1974 tag, &fbatch))) {
0b32f4bb
JB
1975 unsigned i;
1976
51c5cd3b
VMO
1977 for (i = 0; i < nr_folios; i++) {
1978 struct folio *folio = fbatch.folios[i];
0b32f4bb 1979
861093ef 1980 ret = submit_eb_page(&folio->page, &ctx);
f91e0d0c 1981 if (ret == 0)
0b32f4bb 1982 continue;
f91e0d0c 1983 if (ret < 0) {
0b32f4bb 1984 done = 1;
0b32f4bb
JB
1985 break;
1986 }
0b32f4bb
JB
1987
1988 /*
1989 * the filesystem may choose to bump up nr_to_write.
1990 * We have to make sure to honor the new nr_to_write
1991 * at any time
1992 */
1993 nr_to_write_done = wbc->nr_to_write <= 0;
1994 }
51c5cd3b 1995 folio_batch_release(&fbatch);
0b32f4bb
JB
1996 cond_resched();
1997 }
1998 if (!scanned && !done) {
1999 /*
2000 * We hit the last page and there is more work to be done: wrap
2001 * back to the start of the file
2002 */
2003 scanned = 1;
2004 index = 0;
2005 goto retry;
2006 }
b3ff8f1d
QW
2007 /*
2008 * If something went wrong, don't allow any metadata write bio to be
2009 * submitted.
2010 *
2011 * This would prevent use-after-free if we had dirty pages not
2012 * cleaned up, which can still happen by fuzzed images.
2013 *
2014 * - Bad extent tree
2015 * Allowing existing tree block to be allocated for other trees.
2016 *
2017 * - Log tree operations
2018 * Exiting tree blocks get allocated to log tree, bumps its
2019 * generation, then get cleaned in tree re-balance.
2020 * Such tree block will not be written back, since it's clean,
2021 * thus no WRITTEN flag set.
2022 * And after log writes back, this tree block is not traced by
2023 * any dirty extent_io_tree.
2024 *
2025 * - Offending tree block gets re-dirtied from its original owner
2026 * Since it has bumped generation, no WRITTEN flag, it can be
2027 * reused without COWing. This tree block will not be traced
2028 * by btrfs_transaction::dirty_pages.
2029 *
2030 * Now such dirty tree block will not be cleaned by any dirty
2031 * extent io tree. Thus we don't want to submit such wild eb
2032 * if the fs already has error.
9845e5dd 2033 *
c9583ada
QW
2034 * We can get ret > 0 from submit_extent_page() indicating how many ebs
2035 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2036 */
2037 if (ret > 0)
2038 ret = 0;
9845e5dd
CH
2039 if (!ret && BTRFS_FS_ERROR(fs_info))
2040 ret = -EROFS;
7db94301
NA
2041
2042 if (ctx.zoned_bg)
2043 btrfs_put_block_group(ctx.zoned_bg);
9845e5dd 2044 btrfs_zoned_meta_io_unlock(fs_info);
0b32f4bb
JB
2045 return ret;
2046}
2047
43dd529a 2048/*
3bed2da1
NB
2049 * Walk the list of dirty pages of the given address space and write all of them.
2050 *
ee5f017d
DS
2051 * @mapping: address space structure to write
2052 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2053 * @bio_ctrl: holds context for the write, namely the bio
d1310b2e
CM
2054 *
2055 * If a page is already under I/O, write_cache_pages() skips it, even
2056 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2057 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2058 * and msync() need to guarantee that all the data which was dirty at the time
2059 * the call was made get new I/O started against them. If wbc->sync_mode is
2060 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2061 * existing IO to complete.
2062 */
4242b64a 2063static int extent_write_cache_pages(struct address_space *mapping,
ee5f017d 2064 struct btrfs_bio_ctrl *bio_ctrl)
d1310b2e 2065{
72b505dc 2066 struct writeback_control *wbc = bio_ctrl->wbc;
7fd1a3f7 2067 struct inode *inode = mapping->host;
d1310b2e
CM
2068 int ret = 0;
2069 int done = 0;
f85d7d6c 2070 int nr_to_write_done = 0;
9f50fd2e
VMO
2071 struct folio_batch fbatch;
2072 unsigned int nr_folios;
d1310b2e
CM
2073 pgoff_t index;
2074 pgoff_t end; /* Inclusive */
a9132667
LB
2075 pgoff_t done_index;
2076 int range_whole = 0;
d1310b2e 2077 int scanned = 0;
10bbd235 2078 xa_mark_t tag;
d1310b2e 2079
7fd1a3f7
JB
2080 /*
2081 * We have to hold onto the inode so that ordered extents can do their
2082 * work when the IO finishes. The alternative to this is failing to add
2083 * an ordered extent if the igrab() fails there and that is a huge pain
2084 * to deal with, so instead just hold onto the inode throughout the
2085 * writepages operation. If it fails here we are freeing up the inode
2086 * anyway and we'd rather not waste our time writing out stuff that is
2087 * going to be truncated anyway.
2088 */
2089 if (!igrab(inode))
2090 return 0;
2091
9f50fd2e 2092 folio_batch_init(&fbatch);
d1310b2e
CM
2093 if (wbc->range_cyclic) {
2094 index = mapping->writeback_index; /* Start from prev offset */
2095 end = -1;
556755a8
JB
2096 /*
2097 * Start from the beginning does not need to cycle over the
2098 * range, mark it as scanned.
2099 */
2100 scanned = (index == 0);
d1310b2e 2101 } else {
09cbfeaf
KS
2102 index = wbc->range_start >> PAGE_SHIFT;
2103 end = wbc->range_end >> PAGE_SHIFT;
a9132667
LB
2104 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2105 range_whole = 1;
d1310b2e
CM
2106 scanned = 1;
2107 }
3cd24c69
EL
2108
2109 /*
2110 * We do the tagged writepage as long as the snapshot flush bit is set
2111 * and we are the first one who do the filemap_flush() on this inode.
2112 *
2113 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2114 * not race in and drop the bit.
2115 */
2116 if (range_whole && wbc->nr_to_write == LONG_MAX &&
2117 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2118 &BTRFS_I(inode)->runtime_flags))
2119 wbc->tagged_writepages = 1;
2120
2121 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
f7aaa06b
JB
2122 tag = PAGECACHE_TAG_TOWRITE;
2123 else
2124 tag = PAGECACHE_TAG_DIRTY;
d1310b2e 2125retry:
3cd24c69 2126 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
f7aaa06b 2127 tag_pages_for_writeback(mapping, index, end);
a9132667 2128 done_index = index;
f85d7d6c 2129 while (!done && !nr_to_write_done && (index <= end) &&
9f50fd2e
VMO
2130 (nr_folios = filemap_get_folios_tag(mapping, &index,
2131 end, tag, &fbatch))) {
d1310b2e
CM
2132 unsigned i;
2133
9f50fd2e
VMO
2134 for (i = 0; i < nr_folios; i++) {
2135 struct folio *folio = fbatch.folios[i];
d1310b2e 2136
7b365a2a 2137 done_index = folio_next_index(folio);
d1310b2e 2138 /*
b93b0163
MW
2139 * At this point we hold neither the i_pages lock nor
2140 * the page lock: the page may be truncated or
2141 * invalidated (changing page->mapping to NULL),
2142 * or even swizzled back from swapper_space to
2143 * tmpfs file mapping
d1310b2e 2144 */
9f50fd2e 2145 if (!folio_trylock(folio)) {
ee5f017d 2146 submit_write_bio(bio_ctrl, 0);
9f50fd2e 2147 folio_lock(folio);
01d658f2 2148 }
d1310b2e 2149
9f50fd2e
VMO
2150 if (unlikely(folio->mapping != mapping)) {
2151 folio_unlock(folio);
d1310b2e
CM
2152 continue;
2153 }
2154
5c256998
CH
2155 if (!folio_test_dirty(folio)) {
2156 /* Someone wrote it for us. */
2157 folio_unlock(folio);
2158 continue;
2159 }
2160
d2c3f4f6 2161 if (wbc->sync_mode != WB_SYNC_NONE) {
9f50fd2e 2162 if (folio_test_writeback(folio))
ee5f017d 2163 submit_write_bio(bio_ctrl, 0);
9f50fd2e 2164 folio_wait_writeback(folio);
d2c3f4f6 2165 }
d1310b2e 2166
9f50fd2e
VMO
2167 if (folio_test_writeback(folio) ||
2168 !folio_clear_dirty_for_io(folio)) {
2169 folio_unlock(folio);
d1310b2e
CM
2170 continue;
2171 }
2172
72b505dc 2173 ret = __extent_writepage(&folio->page, bio_ctrl);
a9132667 2174 if (ret < 0) {
a9132667
LB
2175 done = 1;
2176 break;
2177 }
f85d7d6c
CM
2178
2179 /*
effa24f6 2180 * The filesystem may choose to bump up nr_to_write.
f85d7d6c 2181 * We have to make sure to honor the new nr_to_write
effa24f6 2182 * at any time.
f85d7d6c 2183 */
effa24f6
CH
2184 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2185 wbc->nr_to_write <= 0);
d1310b2e 2186 }
9f50fd2e 2187 folio_batch_release(&fbatch);
d1310b2e
CM
2188 cond_resched();
2189 }
894b36e3 2190 if (!scanned && !done) {
d1310b2e
CM
2191 /*
2192 * We hit the last page and there is more work to be done: wrap
2193 * back to the start of the file
2194 */
2195 scanned = 1;
2196 index = 0;
42ffb0bf
JB
2197
2198 /*
2199 * If we're looping we could run into a page that is locked by a
2200 * writer and that writer could be waiting on writeback for a
2201 * page in our current bio, and thus deadlock, so flush the
2202 * write bio here.
2203 */
ee5f017d 2204 submit_write_bio(bio_ctrl, 0);
c9583ada 2205 goto retry;
d1310b2e 2206 }
a9132667
LB
2207
2208 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2209 mapping->writeback_index = done_index;
2210
e55cf7ca 2211 btrfs_add_delayed_iput(BTRFS_I(inode));
894b36e3 2212 return ret;
d1310b2e 2213}
d1310b2e 2214
2bd0fc93
QW
2215/*
2216 * Submit the pages in the range to bio for call sites which delalloc range has
2217 * already been ran (aka, ordered extent inserted) and all pages are still
2218 * locked.
2219 */
778b8785
CH
2220void extent_write_locked_range(struct inode *inode, struct page *locked_page,
2221 u64 start, u64 end, struct writeback_control *wbc,
2222 bool pages_dirty)
771ed689 2223{
2bd0fc93 2224 bool found_error = false;
771ed689
CM
2225 int ret = 0;
2226 struct address_space *mapping = inode->i_mapping;
41044b41 2227 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
eb34dcea
CH
2228 const u32 sectorsize = fs_info->sectorsize;
2229 loff_t i_size = i_size_read(inode);
2bd0fc93 2230 u64 cur = start;
c000bc04 2231 struct btrfs_bio_ctrl bio_ctrl = {
7027f871
CH
2232 .wbc = wbc,
2233 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
c000bc04 2234 };
771ed689 2235
7027f871
CH
2236 if (wbc->no_cgroup_owner)
2237 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2238
66448b9d 2239 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
66448b9d 2240
2bd0fc93 2241 while (cur <= end) {
66448b9d 2242 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
9783e4de 2243 u32 cur_len = cur_end + 1 - cur;
eb34dcea
CH
2244 struct page *page;
2245 int nr = 0;
66448b9d 2246
2bd0fc93 2247 page = find_get_page(mapping, cur >> PAGE_SHIFT);
66448b9d 2248 ASSERT(PageLocked(page));
778b8785 2249 if (pages_dirty && page != locked_page) {
44962ca3
CH
2250 ASSERT(PageDirty(page));
2251 clear_page_dirty_for_io(page);
2252 }
eb34dcea
CH
2253
2254 ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
2255 i_size, &nr);
2256 if (ret == 1)
2257 goto next_page;
2258
2259 /* Make sure the mapping tag for page dirty gets cleared. */
2260 if (nr == 0) {
2261 set_page_writeback(page);
2262 end_page_writeback(page);
2263 }
9783e4de
CH
2264 if (ret) {
2265 btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2266 cur, cur_len, !ret);
9783e4de
CH
2267 mapping_set_error(page->mapping, ret);
2268 }
55151ea9 2269 btrfs_folio_unlock_writer(fs_info, page_folio(page), cur, cur_len);
0835d1e6 2270 if (ret < 0)
2bd0fc93 2271 found_error = true;
eb34dcea 2272next_page:
09cbfeaf 2273 put_page(page);
66448b9d 2274 cur = cur_end + 1;
771ed689
CM
2275 }
2276
ee5f017d 2277 submit_write_bio(&bio_ctrl, found_error ? ret : 0);
771ed689 2278}
d1310b2e 2279
c66f2afc 2280int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
d1310b2e 2281{
35156d85 2282 struct inode *inode = mapping->host;
d1310b2e 2283 int ret = 0;
ee5f017d 2284 struct btrfs_bio_ctrl bio_ctrl = {
72b505dc 2285 .wbc = wbc,
c000bc04 2286 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
d1310b2e
CM
2287 };
2288
35156d85
JT
2289 /*
2290 * Allow only a single thread to do the reloc work in zoned mode to
2291 * protect the write pointer updates.
2292 */
869f4cdc 2293 btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
72b505dc 2294 ret = extent_write_cache_pages(mapping, &bio_ctrl);
ee5f017d 2295 submit_write_bio(&bio_ctrl, ret);
19ab78ca 2296 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
d1310b2e
CM
2297 return ret;
2298}
d1310b2e 2299
7938d38b 2300void btrfs_readahead(struct readahead_control *rac)
d1310b2e 2301{
c000bc04 2302 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
67c9684f 2303 struct page *pagepool[16];
125bac01 2304 struct extent_map *em_cached = NULL;
808f80b4 2305 u64 prev_em_start = (u64)-1;
ba206a02 2306 int nr;
d1310b2e 2307
ba206a02 2308 while ((nr = readahead_page_batch(rac, pagepool))) {
32c0a6bc
MWO
2309 u64 contig_start = readahead_pos(rac);
2310 u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
e65ef21e 2311
ba206a02 2312 contiguous_readpages(pagepool, nr, contig_start, contig_end,
390ed29b 2313 &em_cached, &bio_ctrl, &prev_em_start);
d1310b2e 2314 }
67c9684f 2315
125bac01
MX
2316 if (em_cached)
2317 free_extent_map(em_cached);
722c82ac 2318 submit_one_bio(&bio_ctrl);
d1310b2e 2319}
d1310b2e
CM
2320
2321/*
895586eb
MWO
2322 * basic invalidate_folio code, this waits on any locked or writeback
2323 * ranges corresponding to the folio, and then deletes any extent state
d1310b2e
CM
2324 * records from the tree
2325 */
895586eb
MWO
2326int extent_invalidate_folio(struct extent_io_tree *tree,
2327 struct folio *folio, size_t offset)
d1310b2e 2328{
2ac55d41 2329 struct extent_state *cached_state = NULL;
895586eb
MWO
2330 u64 start = folio_pos(folio);
2331 u64 end = start + folio_size(folio) - 1;
b33d2e53 2332 size_t blocksize = folio_to_fs_info(folio)->sectorsize;
d1310b2e 2333
829ddec9
QW
2334 /* This function is only called for the btree inode */
2335 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2336
fda2832f 2337 start += ALIGN(offset, blocksize);
d1310b2e
CM
2338 if (start > end)
2339 return 0;
2340
570eb97b 2341 lock_extent(tree, start, end, &cached_state);
895586eb 2342 folio_wait_writeback(folio);
829ddec9
QW
2343
2344 /*
2345 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2346 * so here we only need to unlock the extent range to free any
2347 * existing extent state.
2348 */
570eb97b 2349 unlock_extent(tree, start, end, &cached_state);
d1310b2e
CM
2350 return 0;
2351}
d1310b2e 2352
7b13b7b1 2353/*
f913cff3 2354 * a helper for release_folio, this tests for areas of the page that
7b13b7b1
CM
2355 * are locked or under IO and drops the related state bits if it is safe
2356 * to drop the page.
2357 */
de6f14e8 2358static bool try_release_extent_state(struct extent_io_tree *tree,
48a3b636 2359 struct page *page, gfp_t mask)
7b13b7b1 2360{
4eee4fa4 2361 u64 start = page_offset(page);
09cbfeaf 2362 u64 end = start + PAGE_SIZE - 1;
de6f14e8 2363 bool ret;
7b13b7b1 2364
99be1a66 2365 if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
de6f14e8 2366 ret = false;
8882679e 2367 } else {
b71fb16b 2368 u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
a8680550
BB
2369 EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2370 EXTENT_QGROUP_RESERVED);
de6f14e8 2371 int ret2;
b71fb16b 2372
11ef160f 2373 /*
2766ff61
FM
2374 * At this point we can safely clear everything except the
2375 * locked bit, the nodatasum bit and the delalloc new bit.
2376 * The delalloc new bit will be cleared by ordered extent
2377 * completion.
11ef160f 2378 */
de6f14e8 2379 ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
e3f24cc5
CM
2380
2381 /* if clear_extent_bit failed for enomem reasons,
2382 * we can't allow the release to continue.
2383 */
de6f14e8
FM
2384 if (ret2 < 0)
2385 ret = false;
e3f24cc5 2386 else
de6f14e8 2387 ret = true;
7b13b7b1
CM
2388 }
2389 return ret;
2390}
7b13b7b1 2391
d1310b2e 2392/*
f913cff3 2393 * a helper for release_folio. As long as there are no locked extents
d1310b2e
CM
2394 * in the range corresponding to the page, both state records and extent
2395 * map records are removed
2396 */
de6f14e8 2397bool try_release_extent_mapping(struct page *page, gfp_t mask)
d1310b2e 2398{
4eee4fa4 2399 u64 start = page_offset(page);
09cbfeaf 2400 u64 end = start + PAGE_SIZE - 1;
078b981a
FM
2401 struct btrfs_inode *inode = page_to_inode(page);
2402 struct extent_io_tree *io_tree = &inode->io_tree;
2e504418
FM
2403
2404 while (start <= end) {
2405 const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2406 const u64 len = end - start + 1;
2407 struct extent_map_tree *extent_tree = &inode->extent_tree;
2408 struct extent_map *em;
2409
2410 write_lock(&extent_tree->lock);
2411 em = lookup_extent_mapping(extent_tree, start, len);
2412 if (!em) {
2413 write_unlock(&extent_tree->lock);
2414 break;
2415 }
2416 if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2417 write_unlock(&extent_tree->lock);
fbc2bd7e 2418 free_extent_map(em);
2e504418
FM
2419 break;
2420 }
2421 if (test_range_bit_exists(io_tree, em->start,
2422 extent_map_end(em) - 1, EXTENT_LOCKED))
2423 goto next;
2424 /*
2425 * If it's not in the list of modified extents, used by a fast
2426 * fsync, we can remove it. If it's being logged we can safely
2427 * remove it since fsync took an extra reference on the em.
2428 */
2429 if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2430 goto remove_em;
2431 /*
2432 * If it's in the list of modified extents, remove it only if
2433 * its generation is older then the current one, in which case
2434 * we don't need it for a fast fsync. Otherwise don't remove it,
2435 * we could be racing with an ongoing fast fsync that could miss
2436 * the new extent.
2437 */
2438 if (em->generation >= cur_gen)
2439 goto next;
2440remove_em:
2441 /*
2442 * We only remove extent maps that are not in the list of
2443 * modified extents or that are in the list but with a
2444 * generation lower then the current generation, so there is no
2445 * need to set the full fsync flag on the inode (it hurts the
2446 * fsync performance for workloads with a data size that exceeds
2447 * or is close to the system's memory).
2448 */
2449 remove_extent_mapping(inode, em);
2450 /* Once for the inode's extent map tree. */
2451 free_extent_map(em);
3d6448e6 2452next:
2e504418
FM
2453 start = extent_map_end(em);
2454 write_unlock(&extent_tree->lock);
70dec807 2455
2e504418
FM
2456 /* Once for us, for the lookup_extent_mapping() reference. */
2457 free_extent_map(em);
2458
2459 if (need_resched()) {
2460 /*
2461 * If we need to resched but we can't block just exit
2462 * and leave any remaining extent maps.
2463 */
2464 if (!gfpflags_allow_blocking(mask))
2465 break;
9f47eb54 2466
2e504418 2467 cond_resched();
d1310b2e 2468 }
d1310b2e 2469 }
078b981a 2470 return try_release_extent_state(io_tree, page, mask);
d1310b2e 2471}
d1310b2e 2472
978b63f7
FM
2473struct btrfs_fiemap_entry {
2474 u64 offset;
2475 u64 phys;
2476 u64 len;
2477 u32 flags;
2478};
2479
4751832d 2480/*
978b63f7
FM
2481 * Indicate the caller of emit_fiemap_extent() that it needs to unlock the file
2482 * range from the inode's io tree, unlock the subvolume tree search path, flush
2483 * the fiemap cache and relock the file range and research the subvolume tree.
2484 * The value here is something negative that can't be confused with a valid
2485 * errno value and different from 1 because that's also a return value from
2486 * fiemap_fill_next_extent() and also it's often used to mean some btree search
2487 * did not find a key, so make it some distinct negative value.
2488 */
2489#define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
2490
2491/*
2492 * Used to:
2493 *
2494 * - Cache the next entry to be emitted to the fiemap buffer, so that we can
2495 * merge extents that are contiguous and can be grouped as a single one;
4751832d 2496 *
978b63f7
FM
2497 * - Store extents ready to be written to the fiemap buffer in an intermediary
2498 * buffer. This intermediary buffer is to ensure that in case the fiemap
2499 * buffer is memory mapped to the fiemap target file, we don't deadlock
2500 * during btrfs_page_mkwrite(). This is because during fiemap we are locking
2501 * an extent range in order to prevent races with delalloc flushing and
2502 * ordered extent completion, which is needed in order to reliably detect
2503 * delalloc in holes and prealloc extents. And this can lead to a deadlock
2504 * if the fiemap buffer is memory mapped to the file we are running fiemap
2505 * against (a silly, useless in practice scenario, but possible) because
2506 * btrfs_page_mkwrite() will try to lock the same extent range.
4751832d
QW
2507 */
2508struct fiemap_cache {
978b63f7
FM
2509 /* An array of ready fiemap entries. */
2510 struct btrfs_fiemap_entry *entries;
2511 /* Number of entries in the entries array. */
2512 int entries_size;
2513 /* Index of the next entry in the entries array to write to. */
2514 int entries_pos;
2515 /*
2516 * Once the entries array is full, this indicates what's the offset for
2517 * the next file extent item we must search for in the inode's subvolume
2518 * tree after unlocking the extent range in the inode's io tree and
2519 * releasing the search path.
2520 */
2521 u64 next_search_offset;
2522 /*
2523 * This matches struct fiemap_extent_info::fi_mapped_extents, we use it
2524 * to count ourselves emitted extents and stop instead of relying on
2525 * fiemap_fill_next_extent() because we buffer ready fiemap entries at
2526 * the @entries array, and we want to stop as soon as we hit the max
2527 * amount of extents to map, not just to save time but also to make the
2528 * logic at extent_fiemap() simpler.
2529 */
2530 unsigned int extents_mapped;
2531 /* Fields for the cached extent (unsubmitted, not ready, extent). */
4751832d
QW
2532 u64 offset;
2533 u64 phys;
2534 u64 len;
2535 u32 flags;
2536 bool cached;
2537};
2538
978b63f7
FM
2539static int flush_fiemap_cache(struct fiemap_extent_info *fieinfo,
2540 struct fiemap_cache *cache)
2541{
2542 for (int i = 0; i < cache->entries_pos; i++) {
2543 struct btrfs_fiemap_entry *entry = &cache->entries[i];
2544 int ret;
2545
2546 ret = fiemap_fill_next_extent(fieinfo, entry->offset,
2547 entry->phys, entry->len,
2548 entry->flags);
2549 /*
2550 * Ignore 1 (reached max entries) because we keep track of that
2551 * ourselves in emit_fiemap_extent().
2552 */
2553 if (ret < 0)
2554 return ret;
2555 }
2556 cache->entries_pos = 0;
2557
2558 return 0;
2559}
2560
4751832d
QW
2561/*
2562 * Helper to submit fiemap extent.
2563 *
2564 * Will try to merge current fiemap extent specified by @offset, @phys,
2565 * @len and @flags with cached one.
2566 * And only when we fails to merge, cached one will be submitted as
2567 * fiemap extent.
2568 *
2569 * Return value is the same as fiemap_fill_next_extent().
2570 */
2571static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
2572 struct fiemap_cache *cache,
2573 u64 offset, u64 phys, u64 len, u32 flags)
2574{
978b63f7 2575 struct btrfs_fiemap_entry *entry;
a1a4a9ca 2576 u64 cache_end;
4751832d 2577
ac3c0d36
FM
2578 /* Set at the end of extent_fiemap(). */
2579 ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
2580
4751832d
QW
2581 if (!cache->cached)
2582 goto assign;
2583
2584 /*
a1a4a9ca
FM
2585 * When iterating the extents of the inode, at extent_fiemap(), we may
2586 * find an extent that starts at an offset behind the end offset of the
2587 * previous extent we processed. This happens if fiemap is called
2588 * without FIEMAP_FLAG_SYNC and there are ordered extents completing
978b63f7
FM
2589 * after we had to unlock the file range, release the search path, emit
2590 * the fiemap extents stored in the buffer (cache->entries array) and
2591 * the lock the remainder of the range and re-search the btree.
4751832d 2592 *
a1a4a9ca
FM
2593 * For example we are in leaf X processing its last item, which is the
2594 * file extent item for file range [512K, 1M[, and after
2595 * btrfs_next_leaf() releases the path, there's an ordered extent that
2596 * completes for the file range [768K, 2M[, and that results in trimming
2597 * the file extent item so that it now corresponds to the file range
2598 * [512K, 768K[ and a new file extent item is inserted for the file
2599 * range [768K, 2M[, which may end up as the last item of leaf X or as
2600 * the first item of the next leaf - in either case btrfs_next_leaf()
2601 * will leave us with a path pointing to the new extent item, for the
2602 * file range [768K, 2M[, since that's the first key that follows the
2603 * last one we processed. So in order not to report overlapping extents
2604 * to user space, we trim the length of the previously cached extent and
2605 * emit it.
2606 *
2607 * Upon calling btrfs_next_leaf() we may also find an extent with an
2608 * offset smaller than or equals to cache->offset, and this happens
2609 * when we had a hole or prealloc extent with several delalloc ranges in
2610 * it, but after btrfs_next_leaf() released the path, delalloc was
2611 * flushed and the resulting ordered extents were completed, so we can
2612 * now have found a file extent item for an offset that is smaller than
2613 * or equals to what we have in cache->offset. We deal with this as
2614 * described below.
4751832d 2615 */
a1a4a9ca
FM
2616 cache_end = cache->offset + cache->len;
2617 if (cache_end > offset) {
2618 if (offset == cache->offset) {
2619 /*
2620 * We cached a dealloc range (found in the io tree) for
2621 * a hole or prealloc extent and we have now found a
2622 * file extent item for the same offset. What we have
2623 * now is more recent and up to date, so discard what
2624 * we had in the cache and use what we have just found.
2625 */
2626 goto assign;
2627 } else if (offset > cache->offset) {
2628 /*
2629 * The extent range we previously found ends after the
2630 * offset of the file extent item we found and that
2631 * offset falls somewhere in the middle of that previous
2632 * extent range. So adjust the range we previously found
2633 * to end at the offset of the file extent item we have
2634 * just found, since this extent is more up to date.
2635 * Emit that adjusted range and cache the file extent
2636 * item we have just found. This corresponds to the case
2637 * where a previously found file extent item was split
2638 * due to an ordered extent completing.
2639 */
2640 cache->len = offset - cache->offset;
2641 goto emit;
2642 } else {
2643 const u64 range_end = offset + len;
2644
2645 /*
2646 * The offset of the file extent item we have just found
2647 * is behind the cached offset. This means we were
2648 * processing a hole or prealloc extent for which we
2649 * have found delalloc ranges (in the io tree), so what
2650 * we have in the cache is the last delalloc range we
2651 * found while the file extent item we found can be
2652 * either for a whole delalloc range we previously
2653 * emmitted or only a part of that range.
2654 *
2655 * We have two cases here:
2656 *
2657 * 1) The file extent item's range ends at or behind the
2658 * cached extent's end. In this case just ignore the
2659 * current file extent item because we don't want to
2660 * overlap with previous ranges that may have been
2661 * emmitted already;
2662 *
2663 * 2) The file extent item starts behind the currently
2664 * cached extent but its end offset goes beyond the
2665 * end offset of the cached extent. We don't want to
2666 * overlap with a previous range that may have been
2667 * emmitted already, so we emit the currently cached
2668 * extent and then partially store the current file
2669 * extent item's range in the cache, for the subrange
2670 * going the cached extent's end to the end of the
2671 * file extent item.
2672 */
2673 if (range_end <= cache_end)
2674 return 0;
2675
2676 if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
2677 phys += cache_end - offset;
2678
2679 offset = cache_end;
2680 len = range_end - cache_end;
2681 goto emit;
2682 }
4751832d
QW
2683 }
2684
2685 /*
2686 * Only merges fiemap extents if
2687 * 1) Their logical addresses are continuous
2688 *
2689 * 2) Their physical addresses are continuous
2690 * So truly compressed (physical size smaller than logical size)
2691 * extents won't get merged with each other
2692 *
ac3c0d36 2693 * 3) Share same flags
4751832d
QW
2694 */
2695 if (cache->offset + cache->len == offset &&
2696 cache->phys + cache->len == phys &&
ac3c0d36 2697 cache->flags == flags) {
4751832d 2698 cache->len += len;
ac3c0d36 2699 return 0;
4751832d
QW
2700 }
2701
a1a4a9ca 2702emit:
4751832d 2703 /* Not mergeable, need to submit cached one */
978b63f7
FM
2704
2705 if (cache->entries_pos == cache->entries_size) {
2706 /*
2707 * We will need to research for the end offset of the last
2708 * stored extent and not from the current offset, because after
2709 * unlocking the range and releasing the path, if there's a hole
2710 * between that end offset and this current offset, a new extent
2711 * may have been inserted due to a new write, so we don't want
2712 * to miss it.
2713 */
2714 entry = &cache->entries[cache->entries_size - 1];
2715 cache->next_search_offset = entry->offset + entry->len;
2716 cache->cached = false;
2717
2718 return BTRFS_FIEMAP_FLUSH_CACHE;
2719 }
2720
2721 entry = &cache->entries[cache->entries_pos];
2722 entry->offset = cache->offset;
2723 entry->phys = cache->phys;
2724 entry->len = cache->len;
2725 entry->flags = cache->flags;
2726 cache->entries_pos++;
2727 cache->extents_mapped++;
2728
2729 if (cache->extents_mapped == fieinfo->fi_extents_max) {
2730 cache->cached = false;
2731 return 1;
2732 }
4751832d
QW
2733assign:
2734 cache->cached = true;
2735 cache->offset = offset;
2736 cache->phys = phys;
2737 cache->len = len;
2738 cache->flags = flags;
ac3c0d36
FM
2739
2740 return 0;
4751832d
QW
2741}
2742
2743/*
848c23b7 2744 * Emit last fiemap cache
4751832d 2745 *
848c23b7
QW
2746 * The last fiemap cache may still be cached in the following case:
2747 * 0 4k 8k
2748 * |<- Fiemap range ->|
2749 * |<------------ First extent ----------->|
2750 *
2751 * In this case, the first extent range will be cached but not emitted.
2752 * So we must emit it before ending extent_fiemap().
4751832d 2753 */
5c5aff98 2754static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
848c23b7 2755 struct fiemap_cache *cache)
4751832d
QW
2756{
2757 int ret;
2758
2759 if (!cache->cached)
2760 return 0;
2761
4751832d
QW
2762 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2763 cache->len, cache->flags);
2764 cache->cached = false;
2765 if (ret > 0)
2766 ret = 0;
2767 return ret;
2768}
2769
ac3c0d36 2770static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
1506fcc8 2771{
1cab1375 2772 struct extent_buffer *clone = path->nodes[0];
ac3c0d36
FM
2773 struct btrfs_key key;
2774 int slot;
2775 int ret;
2776
2777 path->slots[0]++;
2778 if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
2779 return 0;
2780
1cab1375
FM
2781 /*
2782 * Add a temporary extra ref to an already cloned extent buffer to
2783 * prevent btrfs_next_leaf() freeing it, we want to reuse it to avoid
2784 * the cost of allocating a new one.
2785 */
2786 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
2787 atomic_inc(&clone->refs);
2788
ac3c0d36
FM
2789 ret = btrfs_next_leaf(inode->root, path);
2790 if (ret != 0)
1cab1375 2791 goto out;
ac3c0d36
FM
2792
2793 /*
2794 * Don't bother with cloning if there are no more file extent items for
2795 * our inode.
2796 */
2797 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1cab1375
FM
2798 if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) {
2799 ret = 1;
2800 goto out;
2801 }
ac3c0d36 2802
1cab1375
FM
2803 /*
2804 * Important to preserve the start field, for the optimizations when
2805 * checking if extents are shared (see extent_fiemap()).
53e24158
JB
2806 *
2807 * We must set ->start before calling copy_extent_buffer_full(). If we
2808 * are on sub-pagesize blocksize, we use ->start to determine the offset
2809 * into the folio where our eb exists, and if we update ->start after
2810 * the fact then any subsequent reads of the eb may read from a
2811 * different offset in the folio than where we originally copied into.
1cab1375
FM
2812 */
2813 clone->start = path->nodes[0]->start;
53e24158
JB
2814 /* See the comment at fiemap_search_slot() about why we clone. */
2815 copy_extent_buffer_full(clone, path->nodes[0]);
ac3c0d36
FM
2816
2817 slot = path->slots[0];
2818 btrfs_release_path(path);
2819 path->nodes[0] = clone;
2820 path->slots[0] = slot;
1cab1375
FM
2821out:
2822 if (ret)
2823 free_extent_buffer(clone);
ac3c0d36 2824
1cab1375 2825 return ret;
ac3c0d36
FM
2826}
2827
2828/*
2829 * Search for the first file extent item that starts at a given file offset or
2830 * the one that starts immediately before that offset.
2831 * Returns: 0 on success, < 0 on error, 1 if not found.
2832 */
2833static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
2834 u64 file_offset)
2835{
2836 const u64 ino = btrfs_ino(inode);
facee0a0 2837 struct btrfs_root *root = inode->root;
ac3c0d36
FM
2838 struct extent_buffer *clone;
2839 struct btrfs_key key;
2840 int slot;
2841 int ret;
1506fcc8 2842
ac3c0d36
FM
2843 key.objectid = ino;
2844 key.type = BTRFS_EXTENT_DATA_KEY;
2845 key.offset = file_offset;
2846
2847 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2848 if (ret < 0)
2849 return ret;
2850
2851 if (ret > 0 && path->slots[0] > 0) {
2852 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
2853 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
2854 path->slots[0]--;
2855 }
2856
2857 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2858 ret = btrfs_next_leaf(root, path);
2859 if (ret != 0)
2860 return ret;
2861
2862 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2863 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2864 return 1;
5911c8fe
DS
2865 }
2866
15c7745c 2867 /*
ac3c0d36
FM
2868 * We clone the leaf and use it during fiemap. This is because while
2869 * using the leaf we do expensive things like checking if an extent is
2870 * shared, which can take a long time. In order to prevent blocking
2871 * other tasks for too long, we use a clone of the leaf. We have locked
2872 * the file range in the inode's io tree, so we know none of our file
2873 * extent items can change. This way we avoid blocking other tasks that
2874 * want to insert items for other inodes in the same leaf or b+tree
2875 * rebalance operations (triggered for example when someone is trying
2876 * to push items into this leaf when trying to insert an item in a
2877 * neighbour leaf).
2878 * We also need the private clone because holding a read lock on an
2879 * extent buffer of the subvolume's b+tree will make lockdep unhappy
978b63f7
FM
2880 * when we check if extents are shared, as backref walking may need to
2881 * lock the same leaf we are processing.
15c7745c 2882 */
ac3c0d36
FM
2883 clone = btrfs_clone_extent_buffer(path->nodes[0]);
2884 if (!clone)
2885 return -ENOMEM;
2886
2887 slot = path->slots[0];
2888 btrfs_release_path(path);
2889 path->nodes[0] = clone;
2890 path->slots[0] = slot;
2891
2892 return 0;
2893}
2894
2895/*
2896 * Process a range which is a hole or a prealloc extent in the inode's subvolume
2897 * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2898 * extent. The end offset (@end) is inclusive.
2899 */
2900static int fiemap_process_hole(struct btrfs_inode *inode,
2901 struct fiemap_extent_info *fieinfo,
2902 struct fiemap_cache *cache,
b3e744fe 2903 struct extent_state **delalloc_cached_state,
61dbb952 2904 struct btrfs_backref_share_check_ctx *backref_ctx,
ac3c0d36
FM
2905 u64 disk_bytenr, u64 extent_offset,
2906 u64 extent_gen,
ac3c0d36
FM
2907 u64 start, u64 end)
2908{
2909 const u64 i_size = i_size_read(&inode->vfs_inode);
ac3c0d36
FM
2910 u64 cur_offset = start;
2911 u64 last_delalloc_end = 0;
2912 u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
2913 bool checked_extent_shared = false;
2914 int ret;
4d479cf0 2915
ec29ed5b 2916 /*
ac3c0d36
FM
2917 * There can be no delalloc past i_size, so don't waste time looking for
2918 * it beyond i_size.
ec29ed5b 2919 */
ac3c0d36
FM
2920 while (cur_offset < end && cur_offset < i_size) {
2921 u64 delalloc_start;
2922 u64 delalloc_end;
2923 u64 prealloc_start;
2924 u64 prealloc_len = 0;
2925 bool delalloc;
2926
2927 delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
b3e744fe 2928 delalloc_cached_state,
ac3c0d36
FM
2929 &delalloc_start,
2930 &delalloc_end);
2931 if (!delalloc)
2932 break;
2d324f59 2933
ec29ed5b 2934 /*
ac3c0d36
FM
2935 * If this is a prealloc extent we have to report every section
2936 * of it that has no delalloc.
ec29ed5b 2937 */
ac3c0d36
FM
2938 if (disk_bytenr != 0) {
2939 if (last_delalloc_end == 0) {
2940 prealloc_start = start;
2941 prealloc_len = delalloc_start - start;
2942 } else {
2943 prealloc_start = last_delalloc_end + 1;
2944 prealloc_len = delalloc_start - prealloc_start;
2945 }
2946 }
2947
2948 if (prealloc_len > 0) {
2949 if (!checked_extent_shared && fieinfo->fi_extents_max) {
ceb707da 2950 ret = btrfs_is_data_extent_shared(inode,
84a7949d
FM
2951 disk_bytenr,
2952 extent_gen,
2953 backref_ctx);
ac3c0d36
FM
2954 if (ret < 0)
2955 return ret;
2956 else if (ret > 0)
2957 prealloc_flags |= FIEMAP_EXTENT_SHARED;
2958
2959 checked_extent_shared = true;
2960 }
2961 ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2962 disk_bytenr + extent_offset,
2963 prealloc_len, prealloc_flags);
2964 if (ret)
2965 return ret;
2966 extent_offset += prealloc_len;
2967 }
2968
2969 ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
2970 delalloc_end + 1 - delalloc_start,
2971 FIEMAP_EXTENT_DELALLOC |
2972 FIEMAP_EXTENT_UNKNOWN);
2973 if (ret)
2974 return ret;
2975
2976 last_delalloc_end = delalloc_end;
2977 cur_offset = delalloc_end + 1;
2978 extent_offset += cur_offset - delalloc_start;
2979 cond_resched();
2980 }
2981
2982 /*
2983 * Either we found no delalloc for the whole prealloc extent or we have
2984 * a prealloc extent that spans i_size or starts at or after i_size.
2985 */
2986 if (disk_bytenr != 0 && last_delalloc_end < end) {
2987 u64 prealloc_start;
2988 u64 prealloc_len;
2989
2990 if (last_delalloc_end == 0) {
2991 prealloc_start = start;
2992 prealloc_len = end + 1 - start;
2993 } else {
2994 prealloc_start = last_delalloc_end + 1;
2995 prealloc_len = end + 1 - prealloc_start;
2996 }
2997
2998 if (!checked_extent_shared && fieinfo->fi_extents_max) {
ceb707da
FM
2999 ret = btrfs_is_data_extent_shared(inode,
3000 disk_bytenr,
84a7949d 3001 extent_gen,
61dbb952 3002 backref_ctx);
ac3c0d36
FM
3003 if (ret < 0)
3004 return ret;
3005 else if (ret > 0)
3006 prealloc_flags |= FIEMAP_EXTENT_SHARED;
3007 }
3008 ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
3009 disk_bytenr + extent_offset,
3010 prealloc_len, prealloc_flags);
3011 if (ret)
3012 return ret;
3013 }
3014
3015 return 0;
3016}
3017
3018static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
3019 struct btrfs_path *path,
3020 u64 *last_extent_end_ret)
3021{
3022 const u64 ino = btrfs_ino(inode);
3023 struct btrfs_root *root = inode->root;
3024 struct extent_buffer *leaf;
3025 struct btrfs_file_extent_item *ei;
3026 struct btrfs_key key;
3027 u64 disk_bytenr;
3028 int ret;
3029
3030 /*
3031 * Lookup the last file extent. We're not using i_size here because
3032 * there might be preallocation past i_size.
3033 */
3034 ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
3035 /* There can't be a file extent item at offset (u64)-1 */
3036 ASSERT(ret != 0);
3037 if (ret < 0)
3038 return ret;
3039
3040 /*
3041 * For a non-existing key, btrfs_search_slot() always leaves us at a
3042 * slot > 0, except if the btree is empty, which is impossible because
3043 * at least it has the inode item for this inode and all the items for
3044 * the root inode 256.
3045 */
3046 ASSERT(path->slots[0] > 0);
3047 path->slots[0]--;
3048 leaf = path->nodes[0];
3049 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3050 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
3051 /* No file extent items in the subvolume tree. */
3052 *last_extent_end_ret = 0;
3053 return 0;
975f84fe 3054 }
975f84fe 3055
ec29ed5b 3056 /*
ac3c0d36
FM
3057 * For an inline extent, the disk_bytenr is where inline data starts at,
3058 * so first check if we have an inline extent item before checking if we
3059 * have an implicit hole (disk_bytenr == 0).
ec29ed5b 3060 */
ac3c0d36
FM
3061 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
3062 if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
3063 *last_extent_end_ret = btrfs_file_extent_end(path);
3064 return 0;
ec29ed5b
CM
3065 }
3066
ac3c0d36
FM
3067 /*
3068 * Find the last file extent item that is not a hole (when NO_HOLES is
3069 * not enabled). This should take at most 2 iterations in the worst
3070 * case: we have one hole file extent item at slot 0 of a leaf and
3071 * another hole file extent item as the last item in the previous leaf.
3072 * This is because we merge file extent items that represent holes.
3073 */
3074 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3075 while (disk_bytenr == 0) {
3076 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
3077 if (ret < 0) {
3078 return ret;
3079 } else if (ret > 0) {
3080 /* No file extent items that are not holes. */
3081 *last_extent_end_ret = 0;
3082 return 0;
3083 }
3084 leaf = path->nodes[0];
3085 ei = btrfs_item_ptr(leaf, path->slots[0],
3086 struct btrfs_file_extent_item);
3087 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3088 }
ec29ed5b 3089
ac3c0d36
FM
3090 *last_extent_end_ret = btrfs_file_extent_end(path);
3091 return 0;
3092}
3093
3094int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
3095 u64 start, u64 len)
3096{
3097 const u64 ino = btrfs_ino(inode);
978b63f7 3098 struct extent_state *cached_state = NULL;
b3e744fe 3099 struct extent_state *delalloc_cached_state = NULL;
ac3c0d36 3100 struct btrfs_path *path;
ac3c0d36 3101 struct fiemap_cache cache = { 0 };
61dbb952 3102 struct btrfs_backref_share_check_ctx *backref_ctx;
ac3c0d36
FM
3103 u64 last_extent_end;
3104 u64 prev_extent_end;
b0ad381f
JB
3105 u64 range_start;
3106 u64 range_end;
3107 const u64 sectorsize = inode->root->fs_info->sectorsize;
ac3c0d36
FM
3108 bool stopped = false;
3109 int ret;
3110
978b63f7
FM
3111 cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry);
3112 cache.entries = kmalloc_array(cache.entries_size,
3113 sizeof(struct btrfs_fiemap_entry),
3114 GFP_KERNEL);
84a7949d 3115 backref_ctx = btrfs_alloc_backref_share_check_ctx();
ac3c0d36 3116 path = btrfs_alloc_path();
978b63f7 3117 if (!cache.entries || !backref_ctx || !path) {
ac3c0d36 3118 ret = -ENOMEM;
1506fcc8
YS
3119 goto out;
3120 }
975f84fe 3121
978b63f7 3122restart:
b0ad381f
JB
3123 range_start = round_down(start, sectorsize);
3124 range_end = round_up(start + len, sectorsize);
3125 prev_extent_end = range_start;
ea8efc74 3126
978b63f7
FM
3127 lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
3128
ac3c0d36
FM
3129 ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
3130 if (ret < 0)
978b63f7 3131 goto out_unlock;
ac3c0d36 3132 btrfs_release_path(path);
1506fcc8 3133
ac3c0d36 3134 path->reada = READA_FORWARD;
b0ad381f 3135 ret = fiemap_search_slot(inode, path, range_start);
ac3c0d36 3136 if (ret < 0) {
978b63f7 3137 goto out_unlock;
ac3c0d36 3138 } else if (ret > 0) {
ea8efc74 3139 /*
ac3c0d36
FM
3140 * No file extent item found, but we may have delalloc between
3141 * the current offset and i_size. So check for that.
ea8efc74 3142 */
ac3c0d36
FM
3143 ret = 0;
3144 goto check_eof_delalloc;
3145 }
3146
b0ad381f 3147 while (prev_extent_end < range_end) {
ac3c0d36
FM
3148 struct extent_buffer *leaf = path->nodes[0];
3149 struct btrfs_file_extent_item *ei;
3150 struct btrfs_key key;
3151 u64 extent_end;
3152 u64 extent_len;
3153 u64 extent_offset = 0;
3154 u64 extent_gen;
3155 u64 disk_bytenr = 0;
3156 u64 flags = 0;
3157 int extent_type;
3158 u8 compression;
3159
3160 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3161 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3162 break;
3163
3164 extent_end = btrfs_file_extent_end(path);
1506fcc8 3165
ea8efc74 3166 /*
ac3c0d36
FM
3167 * The first iteration can leave us at an extent item that ends
3168 * before our range's start. Move to the next item.
ea8efc74 3169 */
b0ad381f 3170 if (extent_end <= range_start)
ac3c0d36 3171 goto next_item;
fe09e16c 3172
877c1476
FM
3173 backref_ctx->curr_leaf_bytenr = leaf->start;
3174
ac3c0d36
FM
3175 /* We have in implicit hole (NO_HOLES feature enabled). */
3176 if (prev_extent_end < key.offset) {
b0ad381f 3177 const u64 hole_end = min(key.offset, range_end) - 1;
b8f164e3 3178
ac3c0d36 3179 ret = fiemap_process_hole(inode, fieinfo, &cache,
b3e744fe 3180 &delalloc_cached_state,
61dbb952 3181 backref_ctx, 0, 0, 0,
b0ad381f 3182 prev_extent_end, hole_end);
ac3c0d36 3183 if (ret < 0) {
978b63f7 3184 goto out_unlock;
ac3c0d36
FM
3185 } else if (ret > 0) {
3186 /* fiemap_fill_next_extent() told us to stop. */
3187 stopped = true;
3188 break;
3189 }
1506fcc8 3190
ac3c0d36 3191 /* We've reached the end of the fiemap range, stop. */
b0ad381f 3192 if (key.offset >= range_end) {
ac3c0d36
FM
3193 stopped = true;
3194 break;
3195 }
1506fcc8
YS
3196 }
3197
ac3c0d36
FM
3198 extent_len = extent_end - key.offset;
3199 ei = btrfs_item_ptr(leaf, path->slots[0],
3200 struct btrfs_file_extent_item);
3201 compression = btrfs_file_extent_compression(leaf, ei);
3202 extent_type = btrfs_file_extent_type(leaf, ei);
3203 extent_gen = btrfs_file_extent_generation(leaf, ei);
3204
3205 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3206 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3207 if (compression == BTRFS_COMPRESS_NONE)
3208 extent_offset = btrfs_file_extent_offset(leaf, ei);
ec29ed5b 3209 }
ac3c0d36
FM
3210
3211 if (compression != BTRFS_COMPRESS_NONE)
3212 flags |= FIEMAP_EXTENT_ENCODED;
3213
3214 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3215 flags |= FIEMAP_EXTENT_DATA_INLINE;
3216 flags |= FIEMAP_EXTENT_NOT_ALIGNED;
3217 ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
3218 extent_len, flags);
3219 } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
3220 ret = fiemap_process_hole(inode, fieinfo, &cache,
b3e744fe 3221 &delalloc_cached_state,
61dbb952 3222 backref_ctx,
ac3c0d36 3223 disk_bytenr, extent_offset,
84a7949d
FM
3224 extent_gen, key.offset,
3225 extent_end - 1);
ac3c0d36
FM
3226 } else if (disk_bytenr == 0) {
3227 /* We have an explicit hole. */
3228 ret = fiemap_process_hole(inode, fieinfo, &cache,
b3e744fe 3229 &delalloc_cached_state,
61dbb952 3230 backref_ctx, 0, 0, 0,
ac3c0d36
FM
3231 key.offset, extent_end - 1);
3232 } else {
3233 /* We have a regular extent. */
3234 if (fieinfo->fi_extents_max) {
ceb707da 3235 ret = btrfs_is_data_extent_shared(inode,
ac3c0d36
FM
3236 disk_bytenr,
3237 extent_gen,
61dbb952 3238 backref_ctx);
ac3c0d36 3239 if (ret < 0)
978b63f7 3240 goto out_unlock;
ac3c0d36
FM
3241 else if (ret > 0)
3242 flags |= FIEMAP_EXTENT_SHARED;
3243 }
3244
3245 ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
3246 disk_bytenr + extent_offset,
3247 extent_len, flags);
975f84fe 3248 }
ac3c0d36
FM
3249
3250 if (ret < 0) {
978b63f7 3251 goto out_unlock;
ac3c0d36 3252 } else if (ret > 0) {
978b63f7 3253 /* emit_fiemap_extent() told us to stop. */
ac3c0d36
FM
3254 stopped = true;
3255 break;
26e726af 3256 }
09fbc1c8 3257
ac3c0d36
FM
3258 prev_extent_end = extent_end;
3259next_item:
09fbc1c8
FM
3260 if (fatal_signal_pending(current)) {
3261 ret = -EINTR;
978b63f7 3262 goto out_unlock;
09fbc1c8 3263 }
ac3c0d36
FM
3264
3265 ret = fiemap_next_leaf_item(inode, path);
3266 if (ret < 0) {
978b63f7 3267 goto out_unlock;
ac3c0d36
FM
3268 } else if (ret > 0) {
3269 /* No more file extent items for this inode. */
3270 break;
3271 }
3272 cond_resched();
1506fcc8 3273 }
5911c8fe 3274
ac3c0d36 3275check_eof_delalloc:
b0ad381f 3276 if (!stopped && prev_extent_end < range_end) {
b3e744fe
FM
3277 ret = fiemap_process_hole(inode, fieinfo, &cache,
3278 &delalloc_cached_state, backref_ctx,
b0ad381f 3279 0, 0, 0, prev_extent_end, range_end - 1);
ac3c0d36 3280 if (ret < 0)
978b63f7 3281 goto out_unlock;
b0ad381f 3282 prev_extent_end = range_end;
ac3c0d36
FM
3283 }
3284
3285 if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3286 const u64 i_size = i_size_read(&inode->vfs_inode);
3287
3288 if (prev_extent_end < i_size) {
3289 u64 delalloc_start;
3290 u64 delalloc_end;
3291 bool delalloc;
3292
3293 delalloc = btrfs_find_delalloc_in_range(inode,
3294 prev_extent_end,
3295 i_size - 1,
b3e744fe 3296 &delalloc_cached_state,
ac3c0d36
FM
3297 &delalloc_start,
3298 &delalloc_end);
3299 if (!delalloc)
3300 cache.flags |= FIEMAP_EXTENT_LAST;
3301 } else {
3302 cache.flags |= FIEMAP_EXTENT_LAST;
3303 }
3304 }
3305
978b63f7
FM
3306out_unlock:
3307 unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
3308
3309 if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
3310 btrfs_release_path(path);
3311 ret = flush_fiemap_cache(fieinfo, &cache);
3312 if (ret)
3313 goto out;
3314 len -= cache.next_search_offset - start;
3315 start = cache.next_search_offset;
3316 goto restart;
3317 } else if (ret < 0) {
3318 goto out;
3319 }
3320
3321 /*
3322 * Must free the path before emitting to the fiemap buffer because we
3323 * may have a non-cloned leaf and if the fiemap buffer is memory mapped
3324 * to a file, a write into it (through btrfs_page_mkwrite()) may trigger
3325 * waiting for an ordered extent that in order to complete needs to
3326 * modify that leaf, therefore leading to a deadlock.
3327 */
3328 btrfs_free_path(path);
3329 path = NULL;
3330
3331 ret = flush_fiemap_cache(fieinfo, &cache);
3332 if (ret)
3333 goto out;
3334
ac3c0d36 3335 ret = emit_last_fiemap_cache(fieinfo, &cache);
ac3c0d36 3336out:
b3e744fe 3337 free_extent_state(delalloc_cached_state);
978b63f7 3338 kfree(cache.entries);
84a7949d 3339 btrfs_free_backref_share_ctx(backref_ctx);
e02d48ea 3340 btrfs_free_path(path);
1506fcc8
YS
3341 return ret;
3342}
3343
727011e0
CM
3344static void __free_extent_buffer(struct extent_buffer *eb)
3345{
727011e0
CM
3346 kmem_cache_free(extent_buffer_cache, eb);
3347}
3348
7f26fb1c 3349static int extent_buffer_under_io(const struct extent_buffer *eb)
db7f3436 3350{
113fa05c 3351 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
db7f3436
JB
3352 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3353}
3354
13df3775 3355static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
db7f3436 3356{
8ff8466d 3357 struct btrfs_subpage *subpage;
db7f3436 3358
affc5af3 3359 lockdep_assert_held(&folio->mapping->i_private_lock);
db7f3436 3360
cfbf07e2
QW
3361 if (folio_test_private(folio)) {
3362 subpage = folio_get_private(folio);
8ff8466d
QW
3363 if (atomic_read(&subpage->eb_refs))
3364 return true;
3d078efa
QW
3365 /*
3366 * Even there is no eb refs here, we may still have
3367 * end_page_read() call relying on page::private.
3368 */
3369 if (atomic_read(&subpage->readers))
3370 return true;
8ff8466d
QW
3371 }
3372 return false;
3373}
db7f3436 3374
13df3775 3375static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio)
8ff8466d
QW
3376{
3377 struct btrfs_fs_info *fs_info = eb->fs_info;
3378 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3379
3380 /*
cfbf07e2 3381 * For mapped eb, we're going to change the folio private, which should
600f111e 3382 * be done under the i_private_lock.
8ff8466d
QW
3383 */
3384 if (mapped)
affc5af3 3385 spin_lock(&folio->mapping->i_private_lock);
8ff8466d 3386
cfbf07e2 3387 if (!folio_test_private(folio)) {
5d2361db 3388 if (mapped)
affc5af3 3389 spin_unlock(&folio->mapping->i_private_lock);
8ff8466d
QW
3390 return;
3391 }
3392
fbca46eb 3393 if (fs_info->nodesize >= PAGE_SIZE) {
5d2361db
FL
3394 /*
3395 * We do this since we'll remove the pages after we've
3396 * removed the eb from the radix tree, so we could race
3397 * and have this page now attached to the new eb. So
cfbf07e2 3398 * only clear folio if it's still connected to
5d2361db
FL
3399 * this eb.
3400 */
cfbf07e2 3401 if (folio_test_private(folio) && folio_get_private(folio) == eb) {
5d2361db 3402 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
13df3775
QW
3403 BUG_ON(folio_test_dirty(folio));
3404 BUG_ON(folio_test_writeback(folio));
cfbf07e2
QW
3405 /* We need to make sure we haven't be attached to a new eb. */
3406 folio_detach_private(folio);
db7f3436 3407 }
5d2361db 3408 if (mapped)
affc5af3 3409 spin_unlock(&folio->mapping->i_private_lock);
8ff8466d
QW
3410 return;
3411 }
3412
3413 /*
cfbf07e2
QW
3414 * For subpage, we can have dummy eb with folio private attached. In
3415 * this case, we can directly detach the private as such folio is only
3416 * attached to one dummy eb, no sharing.
8ff8466d
QW
3417 */
3418 if (!mapped) {
55151ea9 3419 btrfs_detach_subpage(fs_info, folio);
8ff8466d
QW
3420 return;
3421 }
3422
13df3775 3423 btrfs_folio_dec_eb_refs(fs_info, folio);
8ff8466d
QW
3424
3425 /*
cfbf07e2 3426 * We can only detach the folio private if there are no other ebs in the
3d078efa 3427 * page range and no unfinished IO.
8ff8466d 3428 */
13df3775 3429 if (!folio_range_has_eb(fs_info, folio))
55151ea9 3430 btrfs_detach_subpage(fs_info, folio);
8ff8466d 3431
affc5af3 3432 spin_unlock(&folio->mapping->i_private_lock);
8ff8466d
QW
3433}
3434
3435/* Release all pages attached to the extent buffer */
3436static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
3437{
8ff8466d
QW
3438 ASSERT(!extent_buffer_under_io(eb));
3439
4a565c80 3440 for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
13df3775 3441 struct folio *folio = eb->folios[i];
8ff8466d 3442
13df3775 3443 if (!folio)
8ff8466d
QW
3444 continue;
3445
13df3775 3446 detach_extent_buffer_folio(eb, folio);
5d2361db 3447
13df3775
QW
3448 /* One for when we allocated the folio. */
3449 folio_put(folio);
d64766fd 3450 }
db7f3436
JB
3451}
3452
3453/*
3454 * Helper for releasing the extent buffer.
3455 */
3456static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3457{
55ac0139 3458 btrfs_release_extent_buffer_pages(eb);
a40246e8 3459 btrfs_leak_debug_del_eb(eb);
db7f3436
JB
3460 __free_extent_buffer(eb);
3461}
3462
f28491e0
JB
3463static struct extent_buffer *
3464__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
23d79d81 3465 unsigned long len)
d1310b2e
CM
3466{
3467 struct extent_buffer *eb = NULL;
3468
d1b5c567 3469 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
d1310b2e
CM
3470 eb->start = start;
3471 eb->len = len;
f28491e0 3472 eb->fs_info = fs_info;
196d59ab 3473 init_rwsem(&eb->lock);
b4ce94de 3474
a40246e8 3475 btrfs_leak_debug_add_eb(eb);
6d49ba1b 3476
3083ee2e 3477 spin_lock_init(&eb->refs_lock);
d1310b2e 3478 atomic_set(&eb->refs, 1);
727011e0 3479
deb67895 3480 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
d1310b2e
CM
3481
3482 return eb;
3483}
3484
2b48966a 3485struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
815a51c7 3486{
815a51c7 3487 struct extent_buffer *new;
13df3775 3488 int num_folios = num_extent_folios(src);
dd137dd1 3489 int ret;
815a51c7 3490
3f556f78 3491 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
815a51c7
JS
3492 if (new == NULL)
3493 return NULL;
3494
62c053fb
QW
3495 /*
3496 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3497 * btrfs_release_extent_buffer() have different behavior for
3498 * UNMAPPED subpage extent buffer.
3499 */
3500 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
3501
082d5bb9 3502 ret = alloc_eb_folio_array(new, 0);
dd137dd1
STD
3503 if (ret) {
3504 btrfs_release_extent_buffer(new);
3505 return NULL;
3506 }
3507
13df3775
QW
3508 for (int i = 0; i < num_folios; i++) {
3509 struct folio *folio = new->folios[i];
760f991f
QW
3510 int ret;
3511
13df3775 3512 ret = attach_extent_buffer_folio(new, folio, NULL);
760f991f 3513 if (ret < 0) {
760f991f
QW
3514 btrfs_release_extent_buffer(new);
3515 return NULL;
3516 }
13df3775 3517 WARN_ON(folio_test_dirty(folio));
815a51c7 3518 }
682a0bc5 3519 copy_extent_buffer_full(new, src);
92d83e94 3520 set_extent_buffer_uptodate(new);
815a51c7
JS
3521
3522 return new;
3523}
3524
0f331229
OS
3525struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3526 u64 start, unsigned long len)
815a51c7
JS
3527{
3528 struct extent_buffer *eb;
13df3775 3529 int num_folios = 0;
dd137dd1 3530 int ret;
815a51c7 3531
3f556f78 3532 eb = __alloc_extent_buffer(fs_info, start, len);
815a51c7
JS
3533 if (!eb)
3534 return NULL;
3535
082d5bb9 3536 ret = alloc_eb_folio_array(eb, 0);
dd137dd1
STD
3537 if (ret)
3538 goto err;
3539
13df3775
QW
3540 num_folios = num_extent_folios(eb);
3541 for (int i = 0; i < num_folios; i++) {
3542 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
09bc1f0f
QW
3543 if (ret < 0)
3544 goto err;
815a51c7 3545 }
dd137dd1 3546
815a51c7
JS
3547 set_extent_buffer_uptodate(eb);
3548 btrfs_set_header_nritems(eb, 0);
b0132a3b 3549 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
815a51c7
JS
3550
3551 return eb;
3552err:
13df3775 3553 for (int i = 0; i < num_folios; i++) {
082d5bb9 3554 if (eb->folios[i]) {
13df3775
QW
3555 detach_extent_buffer_folio(eb, eb->folios[i]);
3556 __folio_put(eb->folios[i]);
dd137dd1 3557 }
09bc1f0f 3558 }
815a51c7
JS
3559 __free_extent_buffer(eb);
3560 return NULL;
3561}
3562
0f331229 3563struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
da17066c 3564 u64 start)
0f331229 3565{
da17066c 3566 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
0f331229
OS
3567}
3568
0b32f4bb
JB
3569static void check_buffer_tree_ref(struct extent_buffer *eb)
3570{
242e18c7 3571 int refs;
6bf9cd2e
BB
3572 /*
3573 * The TREE_REF bit is first set when the extent_buffer is added
3574 * to the radix tree. It is also reset, if unset, when a new reference
3575 * is created by find_extent_buffer.
0b32f4bb 3576 *
6bf9cd2e
BB
3577 * It is only cleared in two cases: freeing the last non-tree
3578 * reference to the extent_buffer when its STALE bit is set or
f913cff3 3579 * calling release_folio when the tree reference is the only reference.
0b32f4bb 3580 *
6bf9cd2e 3581 * In both cases, care is taken to ensure that the extent_buffer's
f913cff3 3582 * pages are not under io. However, release_folio can be concurrently
6bf9cd2e
BB
3583 * called with creating new references, which is prone to race
3584 * conditions between the calls to check_buffer_tree_ref in those
3585 * codepaths and clearing TREE_REF in try_release_extent_buffer.
0b32f4bb 3586 *
6bf9cd2e
BB
3587 * The actual lifetime of the extent_buffer in the radix tree is
3588 * adequately protected by the refcount, but the TREE_REF bit and
3589 * its corresponding reference are not. To protect against this
3590 * class of races, we call check_buffer_tree_ref from the codepaths
113fa05c
CH
3591 * which trigger io. Note that once io is initiated, TREE_REF can no
3592 * longer be cleared, so that is the moment at which any such race is
3593 * best fixed.
0b32f4bb 3594 */
242e18c7
CM
3595 refs = atomic_read(&eb->refs);
3596 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3597 return;
3598
594831c4
JB
3599 spin_lock(&eb->refs_lock);
3600 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
0b32f4bb 3601 atomic_inc(&eb->refs);
594831c4 3602 spin_unlock(&eb->refs_lock);
0b32f4bb
JB
3603}
3604
13df3775 3605static void mark_extent_buffer_accessed(struct extent_buffer *eb)
5df4235e 3606{
13df3775 3607 int num_folios= num_extent_folios(eb);
5df4235e 3608
0b32f4bb
JB
3609 check_buffer_tree_ref(eb);
3610
13df3775
QW
3611 for (int i = 0; i < num_folios; i++)
3612 folio_mark_accessed(eb->folios[i]);
5df4235e
JB
3613}
3614
f28491e0
JB
3615struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3616 u64 start)
452c75c3
CS
3617{
3618 struct extent_buffer *eb;
3619
2f3186d8
QW
3620 eb = find_extent_buffer_nolock(fs_info, start);
3621 if (!eb)
3622 return NULL;
3623 /*
3624 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3625 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3626 * another task running free_extent_buffer() might have seen that flag
3627 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3628 * writeback flags not set) and it's still in the tree (flag
3629 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3630 * decrementing the extent buffer's reference count twice. So here we
3631 * could race and increment the eb's reference count, clear its stale
3632 * flag, mark it as dirty and drop our reference before the other task
3633 * finishes executing free_extent_buffer, which would later result in
3634 * an attempt to free an extent buffer that is dirty.
3635 */
3636 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3637 spin_lock(&eb->refs_lock);
3638 spin_unlock(&eb->refs_lock);
452c75c3 3639 }
13df3775 3640 mark_extent_buffer_accessed(eb);
2f3186d8 3641 return eb;
452c75c3
CS
3642}
3643
faa2dbf0
JB
3644#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3645struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
da17066c 3646 u64 start)
faa2dbf0
JB
3647{
3648 struct extent_buffer *eb, *exists = NULL;
3649 int ret;
3650
3651 eb = find_extent_buffer(fs_info, start);
3652 if (eb)
3653 return eb;
da17066c 3654 eb = alloc_dummy_extent_buffer(fs_info, start);
faa2dbf0 3655 if (!eb)
b6293c82 3656 return ERR_PTR(-ENOMEM);
faa2dbf0 3657 eb->fs_info = fs_info;
01cd3909
DS
3658again:
3659 ret = radix_tree_preload(GFP_NOFS);
3660 if (ret) {
3661 exists = ERR_PTR(ret);
3662 goto free_eb;
3663 }
3664 spin_lock(&fs_info->buffer_lock);
3665 ret = radix_tree_insert(&fs_info->buffer_radix,
3666 start >> fs_info->sectorsize_bits, eb);
3667 spin_unlock(&fs_info->buffer_lock);
3668 radix_tree_preload_end();
3669 if (ret == -EEXIST) {
3670 exists = find_extent_buffer(fs_info, start);
3671 if (exists)
faa2dbf0 3672 goto free_eb;
01cd3909
DS
3673 else
3674 goto again;
3675 }
faa2dbf0
JB
3676 check_buffer_tree_ref(eb);
3677 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3678
faa2dbf0
JB
3679 return eb;
3680free_eb:
3681 btrfs_release_extent_buffer(eb);
3682 return exists;
3683}
3684#endif
3685
81982210
QW
3686static struct extent_buffer *grab_extent_buffer(
3687 struct btrfs_fs_info *fs_info, struct page *page)
c0f0a9e7 3688{
cfbf07e2 3689 struct folio *folio = page_folio(page);
c0f0a9e7
QW
3690 struct extent_buffer *exists;
3691
81982210
QW
3692 /*
3693 * For subpage case, we completely rely on radix tree to ensure we
3694 * don't try to insert two ebs for the same bytenr. So here we always
3695 * return NULL and just continue.
3696 */
fbca46eb 3697 if (fs_info->nodesize < PAGE_SIZE)
81982210
QW
3698 return NULL;
3699
c0f0a9e7 3700 /* Page not yet attached to an extent buffer */
cfbf07e2 3701 if (!folio_test_private(folio))
c0f0a9e7
QW
3702 return NULL;
3703
3704 /*
3705 * We could have already allocated an eb for this page and attached one
3706 * so lets see if we can get a ref on the existing eb, and if we can we
3707 * know it's good and we can just return that one, else we know we can
cfbf07e2 3708 * just overwrite folio private.
c0f0a9e7 3709 */
cfbf07e2 3710 exists = folio_get_private(folio);
c0f0a9e7
QW
3711 if (atomic_inc_not_zero(&exists->refs))
3712 return exists;
3713
3714 WARN_ON(PageDirty(page));
cfbf07e2 3715 folio_detach_private(folio);
c0f0a9e7
QW
3716 return NULL;
3717}
3718
fbca46eb
QW
3719static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3720{
3721 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3722 btrfs_err(fs_info, "bad tree block start %llu", start);
3723 return -EINVAL;
3724 }
3725
3726 if (fs_info->nodesize < PAGE_SIZE &&
3727 offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
3728 btrfs_err(fs_info,
3729 "tree block crosses page boundary, start %llu nodesize %u",
3730 start, fs_info->nodesize);
3731 return -EINVAL;
3732 }
3733 if (fs_info->nodesize >= PAGE_SIZE &&
1280d2d1 3734 !PAGE_ALIGNED(start)) {
fbca46eb
QW
3735 btrfs_err(fs_info,
3736 "tree block is not page aligned, start %llu nodesize %u",
3737 start, fs_info->nodesize);
3738 return -EINVAL;
3739 }
6d3a6194
QW
3740 if (!IS_ALIGNED(start, fs_info->nodesize) &&
3741 !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
3742 btrfs_warn(fs_info,
3743"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
3744 start, fs_info->nodesize);
3745 }
fbca46eb
QW
3746 return 0;
3747}
3748
09e6cef1
QW
3749
3750/*
082d5bb9
QW
3751 * Return 0 if eb->folios[i] is attached to btree inode successfully.
3752 * Return >0 if there is already another extent buffer for the range,
09e6cef1 3753 * and @found_eb_ret would be updated.
13df3775
QW
3754 * Return -EAGAIN if the filemap has an existing folio but with different size
3755 * than @eb.
3756 * The caller needs to free the existing folios and retry using the same order.
09e6cef1 3757 */
13df3775
QW
3758static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
3759 struct extent_buffer **found_eb_ret)
09e6cef1
QW
3760{
3761
3762 struct btrfs_fs_info *fs_info = eb->fs_info;
3763 struct address_space *mapping = fs_info->btree_inode->i_mapping;
3764 const unsigned long index = eb->start >> PAGE_SHIFT;
3765 struct folio *existing_folio;
3766 int ret;
3767
3768 ASSERT(found_eb_ret);
3769
082d5bb9
QW
3770 /* Caller should ensure the folio exists. */
3771 ASSERT(eb->folios[i]);
09e6cef1
QW
3772
3773retry:
082d5bb9 3774 ret = filemap_add_folio(mapping, eb->folios[i], index + i,
09e6cef1
QW
3775 GFP_NOFS | __GFP_NOFAIL);
3776 if (!ret)
3777 return 0;
3778
3779 existing_folio = filemap_lock_folio(mapping, index + i);
3780 /* The page cache only exists for a very short time, just retry. */
3781 if (IS_ERR(existing_folio))
3782 goto retry;
3783
3784 /* For now, we should only have single-page folios for btree inode. */
3785 ASSERT(folio_nr_pages(existing_folio) == 1);
3786
84cda1a6 3787 if (folio_size(existing_folio) != eb->folio_size) {
13df3775
QW
3788 folio_unlock(existing_folio);
3789 folio_put(existing_folio);
3790 return -EAGAIN;
3791 }
3792
09e6cef1
QW
3793 if (fs_info->nodesize < PAGE_SIZE) {
3794 /*
3795 * We're going to reuse the existing page, can drop our page
3796 * and subpage structure now.
3797 */
082d5bb9
QW
3798 __free_page(folio_page(eb->folios[i], 0));
3799 eb->folios[i] = existing_folio;
09e6cef1
QW
3800 } else {
3801 struct extent_buffer *existing_eb;
3802
3803 existing_eb = grab_extent_buffer(fs_info,
3804 folio_page(existing_folio, 0));
3805 if (existing_eb) {
3806 /* The extent buffer still exists, we can use it directly. */
3807 *found_eb_ret = existing_eb;
3808 folio_unlock(existing_folio);
3809 folio_put(existing_folio);
3810 return 1;
3811 }
3812 /* The extent buffer no longer exists, we can reuse the folio. */
082d5bb9
QW
3813 __free_page(folio_page(eb->folios[i], 0));
3814 eb->folios[i] = existing_folio;
09e6cef1
QW
3815 }
3816 return 0;
3817}
3818
f28491e0 3819struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3fbaf258 3820 u64 start, u64 owner_root, int level)
d1310b2e 3821{
da17066c 3822 unsigned long len = fs_info->nodesize;
13df3775 3823 int num_folios;
09e6cef1 3824 int attached = 0;
d1310b2e 3825 struct extent_buffer *eb;
09e6cef1 3826 struct extent_buffer *existing_eb = NULL;
f28491e0 3827 struct address_space *mapping = fs_info->btree_inode->i_mapping;
52ea5bfb 3828 struct btrfs_subpage *prealloc = NULL;
b40130b2 3829 u64 lockdep_owner = owner_root;
397239ed 3830 bool page_contig = true;
d1310b2e 3831 int uptodate = 1;
19fe0a8b 3832 int ret;
d1310b2e 3833
fbca46eb 3834 if (check_eb_alignment(fs_info, start))
c871b0f2 3835 return ERR_PTR(-EINVAL);
c871b0f2 3836
e9306ad4
QW
3837#if BITS_PER_LONG == 32
3838 if (start >= MAX_LFS_FILESIZE) {
3839 btrfs_err_rl(fs_info,
3840 "extent buffer %llu is beyond 32bit page cache limit", start);
3841 btrfs_err_32bit_limit(fs_info);
3842 return ERR_PTR(-EOVERFLOW);
3843 }
3844 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3845 btrfs_warn_32bit_limit(fs_info);
3846#endif
3847
f28491e0 3848 eb = find_extent_buffer(fs_info, start);
452c75c3 3849 if (eb)
6af118ce 3850 return eb;
6af118ce 3851
23d79d81 3852 eb = __alloc_extent_buffer(fs_info, start, len);
2b114d1d 3853 if (!eb)
c871b0f2 3854 return ERR_PTR(-ENOMEM);
b40130b2
JB
3855
3856 /*
3857 * The reloc trees are just snapshots, so we need them to appear to be
3858 * just like any other fs tree WRT lockdep.
3859 */
3860 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3861 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3862
3863 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
d1310b2e 3864
52ea5bfb 3865 /*
cfbf07e2 3866 * Preallocate folio private for subpage case, so that we won't
600f111e 3867 * allocate memory with i_private_lock nor page lock hold.
52ea5bfb
QW
3868 *
3869 * The memory will be freed by attach_extent_buffer_page() or freed
3870 * manually if we exit earlier.
3871 */
3872 if (fs_info->nodesize < PAGE_SIZE) {
3873 prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3874 if (IS_ERR(prealloc)) {
09e6cef1
QW
3875 ret = PTR_ERR(prealloc);
3876 goto out;
52ea5bfb
QW
3877 }
3878 }
3879
13df3775 3880reallocate:
09e6cef1 3881 /* Allocate all pages first. */
082d5bb9 3882 ret = alloc_eb_folio_array(eb, __GFP_NOFAIL);
09e6cef1
QW
3883 if (ret < 0) {
3884 btrfs_free_subpage(prealloc);
3885 goto out;
3886 }
3887
13df3775 3888 num_folios = num_extent_folios(eb);
09e6cef1 3889 /* Attach all pages to the filemap. */
13df3775
QW
3890 for (int i = 0; i < num_folios; i++) {
3891 struct folio *folio;
09e6cef1 3892
13df3775 3893 ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
09e6cef1
QW
3894 if (ret > 0) {
3895 ASSERT(existing_eb);
3896 goto out;
c871b0f2 3897 }
4f2de97a 3898
13df3775
QW
3899 /*
3900 * TODO: Special handling for a corner case where the order of
3901 * folios mismatch between the new eb and filemap.
3902 *
3903 * This happens when:
3904 *
3905 * - the new eb is using higher order folio
3906 *
3907 * - the filemap is still using 0-order folios for the range
3908 * This can happen at the previous eb allocation, and we don't
3909 * have higher order folio for the call.
3910 *
3911 * - the existing eb has already been freed
3912 *
3913 * In this case, we have to free the existing folios first, and
3914 * re-allocate using the same order.
3915 * Thankfully this is not going to happen yet, as we're still
3916 * using 0-order folios.
3917 */
3918 if (unlikely(ret == -EAGAIN)) {
3919 ASSERT(0);
3920 goto reallocate;
d1310b2e 3921 }
09e6cef1 3922 attached++;
4f2de97a 3923
09e6cef1 3924 /*
13df3775 3925 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
09e6cef1
QW
3926 * reliable, as we may choose to reuse the existing page cache
3927 * and free the allocated page.
3928 */
13df3775 3929 folio = eb->folios[i];
84cda1a6
QW
3930 eb->folio_size = folio_size(folio);
3931 eb->folio_shift = folio_shift(folio);
affc5af3 3932 spin_lock(&mapping->i_private_lock);
760f991f 3933 /* Should not fail, as we have preallocated the memory */
13df3775 3934 ret = attach_extent_buffer_folio(eb, folio, prealloc);
760f991f 3935 ASSERT(!ret);
8ff8466d
QW
3936 /*
3937 * To inform we have extra eb under allocation, so that
cfbf07e2 3938 * detach_extent_buffer_page() won't release the folio private
8ff8466d
QW
3939 * when the eb hasn't yet been inserted into radix tree.
3940 *
3941 * The ref will be decreased when the eb released the page, in
3942 * detach_extent_buffer_page().
3943 * Thus needs no special handling in error path.
3944 */
13df3775 3945 btrfs_folio_inc_eb_refs(fs_info, folio);
600f111e 3946 spin_unlock(&mapping->i_private_lock);
760f991f 3947
55151ea9 3948 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
397239ed
QW
3949
3950 /*
3951 * Check if the current page is physically contiguous with previous eb
3952 * page.
13df3775
QW
3953 * At this stage, either we allocated a large folio, thus @i
3954 * would only be 0, or we fall back to per-page allocation.
397239ed 3955 */
13df3775 3956 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
397239ed
QW
3957 page_contig = false;
3958
55151ea9 3959 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
d1310b2e 3960 uptodate = 0;
eb14ab8e
CM
3961
3962 /*
b16d011e
NB
3963 * We can't unlock the pages just yet since the extent buffer
3964 * hasn't been properly inserted in the radix tree, this
f913cff3 3965 * opens a race with btree_release_folio which can free a page
b16d011e
NB
3966 * while we are still filling in all pages for the buffer and
3967 * we could crash.
eb14ab8e 3968 */
d1310b2e
CM
3969 }
3970 if (uptodate)
b4ce94de 3971 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
397239ed
QW
3972 /* All pages are physically contiguous, can skip cross page handling. */
3973 if (page_contig)
082d5bb9 3974 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
01cd3909
DS
3975again:
3976 ret = radix_tree_preload(GFP_NOFS);
09e6cef1
QW
3977 if (ret)
3978 goto out;
01cd3909
DS
3979
3980 spin_lock(&fs_info->buffer_lock);
3981 ret = radix_tree_insert(&fs_info->buffer_radix,
3982 start >> fs_info->sectorsize_bits, eb);
3983 spin_unlock(&fs_info->buffer_lock);
3984 radix_tree_preload_end();
3985 if (ret == -EEXIST) {
09e6cef1
QW
3986 ret = 0;
3987 existing_eb = find_extent_buffer(fs_info, start);
3988 if (existing_eb)
3989 goto out;
01cd3909
DS
3990 else
3991 goto again;
3992 }
6af118ce 3993 /* add one reference for the tree */
0b32f4bb 3994 check_buffer_tree_ref(eb);
34b41ace 3995 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
eb14ab8e
CM
3996
3997 /*
b16d011e 3998 * Now it's safe to unlock the pages because any calls to
f913cff3 3999 * btree_release_folio will correctly detect that a page belongs to a
b16d011e 4000 * live buffer and won't free them prematurely.
eb14ab8e 4001 */
13df3775 4002 for (int i = 0; i < num_folios; i++)
082d5bb9 4003 unlock_page(folio_page(eb->folios[i], 0));
d1310b2e
CM
4004 return eb;
4005
09e6cef1 4006out:
5ca64f45 4007 WARN_ON(!atomic_dec_and_test(&eb->refs));
4a565c80
JB
4008
4009 /*
4010 * Any attached folios need to be detached before we unlock them. This
4011 * is because when we're inserting our new folios into the mapping, and
4012 * then attaching our eb to that folio. If we fail to insert our folio
4013 * we'll lookup the folio for that index, and grab that EB. We do not
4014 * want that to grab this eb, as we're getting ready to free it. So we
4015 * have to detach it first and then unlock it.
4016 *
4017 * We have to drop our reference and NULL it out here because in the
4018 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
4019 * Below when we call btrfs_release_extent_buffer() we will call
4020 * detach_extent_buffer_folio() on our remaining pages in the !subpage
4021 * case. If we left eb->folios[i] populated in the subpage case we'd
4022 * double put our reference and be super sad.
4023 */
09e6cef1 4024 for (int i = 0; i < attached; i++) {
082d5bb9 4025 ASSERT(eb->folios[i]);
13df3775 4026 detach_extent_buffer_folio(eb, eb->folios[i]);
082d5bb9 4027 unlock_page(folio_page(eb->folios[i], 0));
4a565c80
JB
4028 folio_put(eb->folios[i]);
4029 eb->folios[i] = NULL;
727011e0 4030 }
09e6cef1
QW
4031 /*
4032 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
4033 * so it can be cleaned up without utlizing page->mapping.
4034 */
4035 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
eb14ab8e 4036
897ca6e9 4037 btrfs_release_extent_buffer(eb);
09e6cef1
QW
4038 if (ret < 0)
4039 return ERR_PTR(ret);
4040 ASSERT(existing_eb);
4041 return existing_eb;
d1310b2e 4042}
d1310b2e 4043
3083ee2e
JB
4044static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4045{
4046 struct extent_buffer *eb =
4047 container_of(head, struct extent_buffer, rcu_head);
4048
4049 __free_extent_buffer(eb);
4050}
4051
f7a52a40 4052static int release_extent_buffer(struct extent_buffer *eb)
5ce48d0f 4053 __releases(&eb->refs_lock)
3083ee2e 4054{
07e21c4d
NB
4055 lockdep_assert_held(&eb->refs_lock);
4056
3083ee2e
JB
4057 WARN_ON(atomic_read(&eb->refs) == 0);
4058 if (atomic_dec_and_test(&eb->refs)) {
34b41ace 4059 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
f28491e0 4060 struct btrfs_fs_info *fs_info = eb->fs_info;
3083ee2e 4061
815a51c7 4062 spin_unlock(&eb->refs_lock);
3083ee2e 4063
01cd3909
DS
4064 spin_lock(&fs_info->buffer_lock);
4065 radix_tree_delete(&fs_info->buffer_radix,
4066 eb->start >> fs_info->sectorsize_bits);
4067 spin_unlock(&fs_info->buffer_lock);
34b41ace
JB
4068 } else {
4069 spin_unlock(&eb->refs_lock);
815a51c7 4070 }
3083ee2e 4071
a40246e8 4072 btrfs_leak_debug_del_eb(eb);
3083ee2e 4073 /* Should be safe to release our pages at this point */
55ac0139 4074 btrfs_release_extent_buffer_pages(eb);
bcb7e449 4075#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
b0132a3b 4076 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
bcb7e449
JB
4077 __free_extent_buffer(eb);
4078 return 1;
4079 }
4080#endif
3083ee2e 4081 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
e64860aa 4082 return 1;
3083ee2e
JB
4083 }
4084 spin_unlock(&eb->refs_lock);
e64860aa
JB
4085
4086 return 0;
3083ee2e
JB
4087}
4088
d1310b2e
CM
4089void free_extent_buffer(struct extent_buffer *eb)
4090{
242e18c7 4091 int refs;
d1310b2e
CM
4092 if (!eb)
4093 return;
4094
e5677f05 4095 refs = atomic_read(&eb->refs);
242e18c7 4096 while (1) {
46cc775e
NB
4097 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
4098 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
4099 refs == 1))
242e18c7 4100 break;
e5677f05 4101 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
242e18c7
CM
4102 return;
4103 }
4104
3083ee2e
JB
4105 spin_lock(&eb->refs_lock);
4106 if (atomic_read(&eb->refs) == 2 &&
4107 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
0b32f4bb 4108 !extent_buffer_under_io(eb) &&
3083ee2e
JB
4109 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4110 atomic_dec(&eb->refs);
4111
4112 /*
4113 * I know this is terrible, but it's temporary until we stop tracking
4114 * the uptodate bits and such for the extent buffers.
4115 */
f7a52a40 4116 release_extent_buffer(eb);
3083ee2e
JB
4117}
4118
4119void free_extent_buffer_stale(struct extent_buffer *eb)
4120{
4121 if (!eb)
d1310b2e
CM
4122 return;
4123
3083ee2e
JB
4124 spin_lock(&eb->refs_lock);
4125 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4126
0b32f4bb 4127 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3083ee2e
JB
4128 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4129 atomic_dec(&eb->refs);
f7a52a40 4130 release_extent_buffer(eb);
d1310b2e 4131}
d1310b2e 4132
13df3775 4133static void btree_clear_folio_dirty(struct folio *folio)
0d27797e 4134{
13df3775
QW
4135 ASSERT(folio_test_dirty(folio));
4136 ASSERT(folio_test_locked(folio));
4137 folio_clear_dirty_for_io(folio);
4138 xa_lock_irq(&folio->mapping->i_pages);
4139 if (!folio_test_dirty(folio))
4140 __xa_clear_mark(&folio->mapping->i_pages,
4141 folio_index(folio), PAGECACHE_TAG_DIRTY);
4142 xa_unlock_irq(&folio->mapping->i_pages);
0d27797e
QW
4143}
4144
4145static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
4146{
4147 struct btrfs_fs_info *fs_info = eb->fs_info;
13df3775 4148 struct folio *folio = eb->folios[0];
0d27797e
QW
4149 bool last;
4150
13df3775
QW
4151 /* btree_clear_folio_dirty() needs page locked. */
4152 folio_lock(folio);
55151ea9 4153 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
0d27797e 4154 if (last)
13df3775
QW
4155 btree_clear_folio_dirty(folio);
4156 folio_unlock(folio);
0d27797e
QW
4157 WARN_ON(atomic_read(&eb->refs) == 0);
4158}
4159
98c8d683
JB
4160void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
4161 struct extent_buffer *eb)
d1310b2e 4162{
98c8d683 4163 struct btrfs_fs_info *fs_info = eb->fs_info;
13df3775 4164 int num_folios;
d1310b2e 4165
98c8d683
JB
4166 btrfs_assert_tree_write_locked(eb);
4167
4168 if (trans && btrfs_header_generation(eb) != trans->transid)
4169 return;
4170
aa6313e6
JT
4171 /*
4172 * Instead of clearing the dirty flag off of the buffer, mark it as
4173 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
4174 * write-ordering in zoned mode, without the need to later re-dirty
4175 * the extent_buffer.
4176 *
4177 * The actual zeroout of the buffer will happen later in
4178 * btree_csum_one_bio.
4179 */
68879386 4180 if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
aa6313e6
JT
4181 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
4182 return;
4183 }
4184
98c8d683
JB
4185 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
4186 return;
4187
4188 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
4189 fs_info->dirty_metadata_batch);
4190
fbca46eb 4191 if (eb->fs_info->nodesize < PAGE_SIZE)
0d27797e
QW
4192 return clear_subpage_extent_buffer_dirty(eb);
4193
13df3775
QW
4194 num_folios = num_extent_folios(eb);
4195 for (int i = 0; i < num_folios; i++) {
4196 struct folio *folio = eb->folios[i];
d1310b2e 4197
13df3775 4198 if (!folio_test_dirty(folio))
d2c3f4f6 4199 continue;
13df3775
QW
4200 folio_lock(folio);
4201 btree_clear_folio_dirty(folio);
4202 folio_unlock(folio);
d1310b2e 4203 }
0b32f4bb 4204 WARN_ON(atomic_read(&eb->refs) == 0);
d1310b2e 4205}
d1310b2e 4206
f18cc978 4207void set_extent_buffer_dirty(struct extent_buffer *eb)
d1310b2e 4208{
13df3775 4209 int num_folios;
abb57ef3 4210 bool was_dirty;
d1310b2e 4211
0b32f4bb
JB
4212 check_buffer_tree_ref(eb);
4213
b9473439 4214 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
0b32f4bb 4215
13df3775 4216 num_folios = num_extent_folios(eb);
3083ee2e 4217 WARN_ON(atomic_read(&eb->refs) == 0);
0b32f4bb 4218 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
073bda7a 4219 WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
0b32f4bb 4220
0d27797e 4221 if (!was_dirty) {
fbca46eb 4222 bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
51995c39 4223
0d27797e
QW
4224 /*
4225 * For subpage case, we can have other extent buffers in the
4226 * same page, and in clear_subpage_extent_buffer_dirty() we
4227 * have to clear page dirty without subpage lock held.
4228 * This can cause race where our page gets dirty cleared after
4229 * we just set it.
4230 *
4231 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
4232 * its page for other reasons, we can use page lock to prevent
4233 * the above race.
4234 */
4235 if (subpage)
082d5bb9 4236 lock_page(folio_page(eb->folios[0], 0));
13df3775 4237 for (int i = 0; i < num_folios; i++)
55151ea9
QW
4238 btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
4239 eb->start, eb->len);
0d27797e 4240 if (subpage)
082d5bb9 4241 unlock_page(folio_page(eb->folios[0], 0));
f18cc978
CH
4242 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
4243 eb->len,
4244 eb->fs_info->dirty_metadata_batch);
0d27797e 4245 }
51995c39 4246#ifdef CONFIG_BTRFS_DEBUG
13df3775
QW
4247 for (int i = 0; i < num_folios; i++)
4248 ASSERT(folio_test_dirty(eb->folios[i]));
51995c39 4249#endif
d1310b2e 4250}
d1310b2e 4251
69ba3927 4252void clear_extent_buffer_uptodate(struct extent_buffer *eb)
1259ab75 4253{
251f2acc 4254 struct btrfs_fs_info *fs_info = eb->fs_info;
13df3775 4255 int num_folios = num_extent_folios(eb);
1259ab75 4256
b4ce94de 4257 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
13df3775
QW
4258 for (int i = 0; i < num_folios; i++) {
4259 struct folio *folio = eb->folios[i];
4260
4261 if (!folio)
fbca46eb
QW
4262 continue;
4263
4264 /*
4265 * This is special handling for metadata subpage, as regular
4266 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4267 */
4268 if (fs_info->nodesize >= PAGE_SIZE)
13df3775 4269 folio_clear_uptodate(folio);
fbca46eb 4270 else
55151ea9 4271 btrfs_subpage_clear_uptodate(fs_info, folio,
13df3775 4272 eb->start, eb->len);
1259ab75 4273 }
1259ab75
CM
4274}
4275
09c25a8c 4276void set_extent_buffer_uptodate(struct extent_buffer *eb)
d1310b2e 4277{
251f2acc 4278 struct btrfs_fs_info *fs_info = eb->fs_info;
13df3775 4279 int num_folios = num_extent_folios(eb);
d1310b2e 4280
0b32f4bb 4281 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
13df3775
QW
4282 for (int i = 0; i < num_folios; i++) {
4283 struct folio *folio = eb->folios[i];
fbca46eb
QW
4284
4285 /*
4286 * This is special handling for metadata subpage, as regular
4287 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4288 */
4289 if (fs_info->nodesize >= PAGE_SIZE)
13df3775 4290 folio_mark_uptodate(folio);
fbca46eb 4291 else
55151ea9 4292 btrfs_subpage_set_uptodate(fs_info, folio,
13df3775 4293 eb->start, eb->len);
d1310b2e 4294 }
d1310b2e 4295}
d1310b2e 4296
1e2d1837
TB
4297static void clear_extent_buffer_reading(struct extent_buffer *eb)
4298{
4299 clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4300 smp_mb__after_atomic();
4301 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4302}
4303
a700ca5e 4304static void end_bbio_meta_read(struct btrfs_bio *bbio)
046b562b
CH
4305{
4306 struct extent_buffer *eb = bbio->private;
d7172f52 4307 struct btrfs_fs_info *fs_info = eb->fs_info;
046b562b 4308 bool uptodate = !bbio->bio.bi_status;
a700ca5e 4309 struct folio_iter fi;
046b562b
CH
4310 u32 bio_offset = 0;
4311
f32f20e2
TB
4312 /*
4313 * If the extent buffer is marked UPTODATE before the read operation
4314 * completes, other calls to read_extent_buffer_pages() will return
4315 * early without waiting for the read to finish, causing data races.
4316 */
4317 WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
4318
046b562b
CH
4319 eb->read_mirror = bbio->mirror_num;
4320
4321 if (uptodate &&
4322 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
4323 uptodate = false;
4324
4325 if (uptodate) {
4326 set_extent_buffer_uptodate(eb);
4327 } else {
4328 clear_extent_buffer_uptodate(eb);
4329 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4330 }
4331
a700ca5e
QW
4332 bio_for_each_folio_all(fi, &bbio->bio) {
4333 struct folio *folio = fi.folio;
d7172f52 4334 u64 start = eb->start + bio_offset;
a700ca5e 4335 u32 len = fi.length;
046b562b 4336
d7172f52 4337 if (uptodate)
a700ca5e 4338 btrfs_folio_set_uptodate(fs_info, folio, start, len);
d7172f52 4339 else
a700ca5e 4340 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
d7172f52
CH
4341
4342 bio_offset += len;
3d66b4b2 4343 }
d7172f52 4344
1e2d1837 4345 clear_extent_buffer_reading(eb);
046b562b
CH
4346 free_extent_buffer(eb);
4347
4348 bio_put(&bbio->bio);
4349}
4350
d7172f52
CH
4351int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
4352 struct btrfs_tree_parent_check *check)
b78b98e0 4353{
b78b98e0 4354 struct btrfs_bio *bbio;
13df3775 4355 bool ret;
b78b98e0 4356
d7172f52
CH
4357 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4358 return 0;
4359
4360 /*
4361 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
4362 * operation, which could potentially still be in flight. In this case
4363 * we simply want to return an error.
4364 */
4365 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
4366 return -EIO;
4367
4368 /* Someone else is already reading the buffer, just wait for it. */
4369 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
4370 goto done;
4371
ef1e6823
TB
4372 /*
4373 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
4374 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
4375 * started and finished reading the same eb. In this case, UPTODATE
4376 * will now be set, and we shouldn't read it in again.
4377 */
4378 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
1e2d1837 4379 clear_extent_buffer_reading(eb);
ef1e6823
TB
4380 return 0;
4381 }
4382
b78b98e0
CH
4383 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4384 eb->read_mirror = 0;
b78b98e0 4385 check_buffer_tree_ref(eb);
113fa05c 4386 atomic_inc(&eb->refs);
b78b98e0
CH
4387
4388 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
4389 REQ_OP_READ | REQ_META, eb->fs_info,
a700ca5e 4390 end_bbio_meta_read, eb);
b78b98e0
CH
4391 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
4392 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
4393 bbio->file_offset = eb->start;
4394 memcpy(&bbio->parent_check, check, sizeof(*check));
4395 if (eb->fs_info->nodesize < PAGE_SIZE) {
13df3775
QW
4396 ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
4397 eb->start - folio_pos(eb->folios[0]));
4398 ASSERT(ret);
b78b98e0 4399 } else {
13df3775
QW
4400 int num_folios = num_extent_folios(eb);
4401
4402 for (int i = 0; i < num_folios; i++) {
4403 struct folio *folio = eb->folios[i];
4404
84cda1a6 4405 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
13df3775
QW
4406 ASSERT(ret);
4407 }
b78b98e0
CH
4408 }
4409 btrfs_submit_bio(bbio, mirror_num);
b78b98e0 4410
d7172f52
CH
4411done:
4412 if (wait == WAIT_COMPLETE) {
4413 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
4414 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
55173337 4415 return -EIO;
d1310b2e 4416 }
d397712b 4417
55173337 4418 return 0;
d1310b2e 4419}
d1310b2e 4420
f98b6215
QW
4421static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
4422 unsigned long len)
4423{
4424 btrfs_warn(eb->fs_info,
84cda1a6 4425 "access to eb bytenr %llu len %u out of range start %lu len %lu",
f98b6215
QW
4426 eb->start, eb->len, start, len);
4427 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4428
4429 return true;
4430}
4431
4432/*
4433 * Check if the [start, start + len) range is valid before reading/writing
4434 * the eb.
4435 * NOTE: @start and @len are offset inside the eb, not logical address.
4436 *
4437 * Caller should not touch the dst/src memory if this function returns error.
4438 */
4439static inline int check_eb_range(const struct extent_buffer *eb,
4440 unsigned long start, unsigned long len)
4441{
4442 unsigned long offset;
4443
4444 /* start, start + len should not go beyond eb->len nor overflow */
4445 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
4446 return report_eb_range(eb, start, len);
4447
4448 return false;
4449}
4450
1cbb1f45
JM
4451void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
4452 unsigned long start, unsigned long len)
d1310b2e 4453{
84cda1a6 4454 const int unit_size = eb->folio_size;
d1310b2e
CM
4455 size_t cur;
4456 size_t offset;
d1310b2e 4457 char *dst = (char *)dstv;
8d993618 4458 unsigned long i = get_eb_folio_index(eb, start);
d1310b2e 4459
74ee7914
QW
4460 if (check_eb_range(eb, start, len)) {
4461 /*
4462 * Invalid range hit, reset the memory, so callers won't get
eefaf0a1 4463 * some random garbage for their uninitialized memory.
74ee7914
QW
4464 */
4465 memset(dstv, 0, len);
f716abd5 4466 return;
74ee7914 4467 }
d1310b2e 4468
397239ed
QW
4469 if (eb->addr) {
4470 memcpy(dstv, eb->addr + start, len);
4471 return;
4472 }
4473
8d993618 4474 offset = get_eb_offset_in_folio(eb, start);
d1310b2e 4475
d397712b 4476 while (len > 0) {
8d993618 4477 char *kaddr;
d1310b2e 4478
8d993618
QW
4479 cur = min(len, unit_size - offset);
4480 kaddr = folio_address(eb->folios[i]);
d1310b2e 4481 memcpy(dst, kaddr + offset, cur);
d1310b2e
CM
4482
4483 dst += cur;
4484 len -= cur;
4485 offset = 0;
4486 i++;
4487 }
4488}
d1310b2e 4489
a48b73ec
JB
4490int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4491 void __user *dstv,
4492 unsigned long start, unsigned long len)
550ac1d8 4493{
84cda1a6 4494 const int unit_size = eb->folio_size;
550ac1d8
GH
4495 size_t cur;
4496 size_t offset;
550ac1d8 4497 char __user *dst = (char __user *)dstv;
8d993618 4498 unsigned long i = get_eb_folio_index(eb, start);
550ac1d8
GH
4499 int ret = 0;
4500
4501 WARN_ON(start > eb->len);
4502 WARN_ON(start + len > eb->start + eb->len);
4503
397239ed
QW
4504 if (eb->addr) {
4505 if (copy_to_user_nofault(dstv, eb->addr + start, len))
4506 ret = -EFAULT;
4507 return ret;
4508 }
4509
8d993618 4510 offset = get_eb_offset_in_folio(eb, start);
550ac1d8
GH
4511
4512 while (len > 0) {
8d993618 4513 char *kaddr;
550ac1d8 4514
8d993618
QW
4515 cur = min(len, unit_size - offset);
4516 kaddr = folio_address(eb->folios[i]);
a48b73ec 4517 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
550ac1d8
GH
4518 ret = -EFAULT;
4519 break;
4520 }
4521
4522 dst += cur;
4523 len -= cur;
4524 offset = 0;
4525 i++;
4526 }
4527
4528 return ret;
4529}
4530
1cbb1f45
JM
4531int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4532 unsigned long start, unsigned long len)
d1310b2e 4533{
84cda1a6 4534 const int unit_size = eb->folio_size;
d1310b2e
CM
4535 size_t cur;
4536 size_t offset;
d1310b2e
CM
4537 char *kaddr;
4538 char *ptr = (char *)ptrv;
8d993618 4539 unsigned long i = get_eb_folio_index(eb, start);
d1310b2e
CM
4540 int ret = 0;
4541
f98b6215
QW
4542 if (check_eb_range(eb, start, len))
4543 return -EINVAL;
d1310b2e 4544
397239ed
QW
4545 if (eb->addr)
4546 return memcmp(ptrv, eb->addr + start, len);
d1310b2e 4547
8d993618 4548 offset = get_eb_offset_in_folio(eb, start);
d1310b2e 4549
d397712b 4550 while (len > 0) {
8d993618
QW
4551 cur = min(len, unit_size - offset);
4552 kaddr = folio_address(eb->folios[i]);
d1310b2e 4553 ret = memcmp(ptr, kaddr + offset, cur);
d1310b2e
CM
4554 if (ret)
4555 break;
4556
4557 ptr += cur;
4558 len -= cur;
4559 offset = 0;
4560 i++;
4561 }
4562 return ret;
4563}
d1310b2e 4564
b8f95771
QW
4565/*
4566 * Check that the extent buffer is uptodate.
4567 *
4568 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4569 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4570 */
8d993618 4571static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
b8f95771
QW
4572{
4573 struct btrfs_fs_info *fs_info = eb->fs_info;
8d993618
QW
4574 struct folio *folio = eb->folios[i];
4575
4576 ASSERT(folio);
b8f95771 4577
a50e1fcb
JB
4578 /*
4579 * If we are using the commit root we could potentially clear a page
4580 * Uptodate while we're using the extent buffer that we've previously
4581 * looked up. We don't want to complain in this case, as the page was
4582 * valid before, we just didn't write it out. Instead we want to catch
4583 * the case where we didn't actually read the block properly, which
011134f4 4584 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
a50e1fcb 4585 */
011134f4
CH
4586 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4587 return;
b8f95771 4588
011134f4 4589 if (fs_info->nodesize < PAGE_SIZE) {
55151ea9 4590 struct folio *folio = eb->folios[0];
8d993618 4591
55151ea9
QW
4592 ASSERT(i == 0);
4593 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
75258f20 4594 eb->start, eb->len)))
55151ea9 4595 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
b8f95771 4596 } else {
8d993618 4597 WARN_ON(!folio_test_uptodate(folio));
b8f95771
QW
4598 }
4599}
4600
13840f3f
QW
4601static void __write_extent_buffer(const struct extent_buffer *eb,
4602 const void *srcv, unsigned long start,
4603 unsigned long len, bool use_memmove)
d1310b2e 4604{
84cda1a6 4605 const int unit_size = eb->folio_size;
d1310b2e
CM
4606 size_t cur;
4607 size_t offset;
d1310b2e
CM
4608 char *kaddr;
4609 char *src = (char *)srcv;
8d993618 4610 unsigned long i = get_eb_folio_index(eb, start);
13840f3f
QW
4611 /* For unmapped (dummy) ebs, no need to check their uptodate status. */
4612 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
d1310b2e 4613
f98b6215
QW
4614 if (check_eb_range(eb, start, len))
4615 return;
d1310b2e 4616
397239ed
QW
4617 if (eb->addr) {
4618 if (use_memmove)
4619 memmove(eb->addr + start, srcv, len);
4620 else
4621 memcpy(eb->addr + start, srcv, len);
4622 return;
4623 }
4624
8d993618 4625 offset = get_eb_offset_in_folio(eb, start);
d1310b2e 4626
d397712b 4627 while (len > 0) {
13840f3f 4628 if (check_uptodate)
8d993618 4629 assert_eb_folio_uptodate(eb, i);
d1310b2e 4630
8d993618
QW
4631 cur = min(len, unit_size - offset);
4632 kaddr = folio_address(eb->folios[i]);
13840f3f
QW
4633 if (use_memmove)
4634 memmove(kaddr + offset, src, cur);
4635 else
4636 memcpy(kaddr + offset, src, cur);
d1310b2e
CM
4637
4638 src += cur;
4639 len -= cur;
4640 offset = 0;
4641 i++;
4642 }
4643}
d1310b2e 4644
13840f3f
QW
4645void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4646 unsigned long start, unsigned long len)
4647{
4648 return __write_extent_buffer(eb, srcv, start, len, false);
4649}
4650
cb22964f
QW
4651static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4652 unsigned long start, unsigned long len)
d1310b2e 4653{
84cda1a6 4654 const int unit_size = eb->folio_size;
cb22964f 4655 unsigned long cur = start;
d1310b2e 4656
397239ed
QW
4657 if (eb->addr) {
4658 memset(eb->addr + start, c, len);
4659 return;
4660 }
4661
cb22964f 4662 while (cur < start + len) {
8d993618
QW
4663 unsigned long index = get_eb_folio_index(eb, cur);
4664 unsigned int offset = get_eb_offset_in_folio(eb, cur);
4665 unsigned int cur_len = min(start + len - cur, unit_size - offset);
d1310b2e 4666
8d993618
QW
4667 assert_eb_folio_uptodate(eb, index);
4668 memset(folio_address(eb->folios[index]) + offset, c, cur_len);
d1310b2e 4669
cb22964f 4670 cur += cur_len;
d1310b2e
CM
4671 }
4672}
d1310b2e 4673
cb22964f
QW
4674void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4675 unsigned long len)
4676{
4677 if (check_eb_range(eb, start, len))
4678 return;
4679 return memset_extent_buffer(eb, 0, start, len);
4680}
4681
2b48966a
DS
4682void copy_extent_buffer_full(const struct extent_buffer *dst,
4683 const struct extent_buffer *src)
58e8012c 4684{
84cda1a6 4685 const int unit_size = src->folio_size;
54948681 4686 unsigned long cur = 0;
58e8012c
DS
4687
4688 ASSERT(dst->len == src->len);
4689
54948681 4690 while (cur < src->len) {
8d993618
QW
4691 unsigned long index = get_eb_folio_index(src, cur);
4692 unsigned long offset = get_eb_offset_in_folio(src, cur);
4693 unsigned long cur_len = min(src->len, unit_size - offset);
082d5bb9 4694 void *addr = folio_address(src->folios[index]) + offset;
54948681
QW
4695
4696 write_extent_buffer(dst, addr, cur, cur_len);
884b07d0 4697
54948681 4698 cur += cur_len;
884b07d0 4699 }
58e8012c
DS
4700}
4701
2b48966a
DS
4702void copy_extent_buffer(const struct extent_buffer *dst,
4703 const struct extent_buffer *src,
d1310b2e
CM
4704 unsigned long dst_offset, unsigned long src_offset,
4705 unsigned long len)
4706{
84cda1a6 4707 const int unit_size = dst->folio_size;
d1310b2e
CM
4708 u64 dst_len = dst->len;
4709 size_t cur;
4710 size_t offset;
d1310b2e 4711 char *kaddr;
8d993618 4712 unsigned long i = get_eb_folio_index(dst, dst_offset);
d1310b2e 4713
f98b6215
QW
4714 if (check_eb_range(dst, dst_offset, len) ||
4715 check_eb_range(src, src_offset, len))
4716 return;
4717
d1310b2e
CM
4718 WARN_ON(src->len != dst_len);
4719
8d993618 4720 offset = get_eb_offset_in_folio(dst, dst_offset);
d1310b2e 4721
d397712b 4722 while (len > 0) {
8d993618 4723 assert_eb_folio_uptodate(dst, i);
d1310b2e 4724
8d993618 4725 cur = min(len, (unsigned long)(unit_size - offset));
d1310b2e 4726
8d993618 4727 kaddr = folio_address(dst->folios[i]);
d1310b2e 4728 read_extent_buffer(src, kaddr + offset, src_offset, cur);
d1310b2e
CM
4729
4730 src_offset += cur;
4731 len -= cur;
4732 offset = 0;
4733 i++;
4734 }
4735}
d1310b2e 4736
3e1e8bb7 4737/*
f4521b01 4738 * Calculate the folio and offset of the byte containing the given bit number.
9580503b
DS
4739 *
4740 * @eb: the extent buffer
4741 * @start: offset of the bitmap item in the extent buffer
4742 * @nr: bit number
f4521b01 4743 * @folio_index: return index of the folio in the extent buffer that contains
9580503b 4744 * the given bit number
f4521b01 4745 * @folio_offset: return offset into the folio given by folio_index
3e1e8bb7
OS
4746 *
4747 * This helper hides the ugliness of finding the byte in an extent buffer which
4748 * contains a given bit.
4749 */
2b48966a 4750static inline void eb_bitmap_offset(const struct extent_buffer *eb,
3e1e8bb7 4751 unsigned long start, unsigned long nr,
f4521b01
QW
4752 unsigned long *folio_index,
4753 size_t *folio_offset)
3e1e8bb7 4754{
3e1e8bb7
OS
4755 size_t byte_offset = BIT_BYTE(nr);
4756 size_t offset;
4757
4758 /*
4759 * The byte we want is the offset of the extent buffer + the offset of
4760 * the bitmap item in the extent buffer + the offset of the byte in the
4761 * bitmap item.
4762 */
84cda1a6 4763 offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
3e1e8bb7 4764
84cda1a6
QW
4765 *folio_index = offset >> eb->folio_shift;
4766 *folio_offset = offset_in_eb_folio(eb, offset);
3e1e8bb7
OS
4767}
4768
43dd529a
DS
4769/*
4770 * Determine whether a bit in a bitmap item is set.
4771 *
4772 * @eb: the extent buffer
4773 * @start: offset of the bitmap item in the extent buffer
4774 * @nr: bit number to test
3e1e8bb7 4775 */
2b48966a 4776int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
3e1e8bb7
OS
4777 unsigned long nr)
4778{
3e1e8bb7
OS
4779 unsigned long i;
4780 size_t offset;
f4521b01 4781 u8 *kaddr;
3e1e8bb7
OS
4782
4783 eb_bitmap_offset(eb, start, nr, &i, &offset);
8d993618 4784 assert_eb_folio_uptodate(eb, i);
f4521b01 4785 kaddr = folio_address(eb->folios[i]);
3e1e8bb7
OS
4786 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4787}
4788
cb22964f
QW
4789static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4790{
8d993618 4791 unsigned long index = get_eb_folio_index(eb, bytenr);
cb22964f
QW
4792
4793 if (check_eb_range(eb, bytenr, 1))
4794 return NULL;
8d993618 4795 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
cb22964f
QW
4796}
4797
43dd529a
DS
4798/*
4799 * Set an area of a bitmap to 1.
4800 *
4801 * @eb: the extent buffer
4802 * @start: offset of the bitmap item in the extent buffer
4803 * @pos: bit number of the first bit
4804 * @len: number of bits to set
3e1e8bb7 4805 */
2b48966a 4806void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
3e1e8bb7
OS
4807 unsigned long pos, unsigned long len)
4808{
cb22964f
QW
4809 unsigned int first_byte = start + BIT_BYTE(pos);
4810 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4811 const bool same_byte = (first_byte == last_byte);
4812 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
2fe1d551 4813 u8 *kaddr;
3e1e8bb7 4814
cb22964f
QW
4815 if (same_byte)
4816 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
3e1e8bb7 4817
cb22964f
QW
4818 /* Handle the first byte. */
4819 kaddr = extent_buffer_get_byte(eb, first_byte);
4820 *kaddr |= mask;
4821 if (same_byte)
4822 return;
4823
4824 /* Handle the byte aligned part. */
4825 ASSERT(first_byte + 1 <= last_byte);
4826 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4827
4828 /* Handle the last byte. */
4829 kaddr = extent_buffer_get_byte(eb, last_byte);
4830 *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
3e1e8bb7
OS
4831}
4832
4833
43dd529a
DS
4834/*
4835 * Clear an area of a bitmap.
4836 *
4837 * @eb: the extent buffer
4838 * @start: offset of the bitmap item in the extent buffer
4839 * @pos: bit number of the first bit
4840 * @len: number of bits to clear
3e1e8bb7 4841 */
2b48966a
DS
4842void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4843 unsigned long start, unsigned long pos,
4844 unsigned long len)
3e1e8bb7 4845{
cb22964f
QW
4846 unsigned int first_byte = start + BIT_BYTE(pos);
4847 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4848 const bool same_byte = (first_byte == last_byte);
4849 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
2fe1d551 4850 u8 *kaddr;
3e1e8bb7 4851
cb22964f
QW
4852 if (same_byte)
4853 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
3e1e8bb7 4854
cb22964f
QW
4855 /* Handle the first byte. */
4856 kaddr = extent_buffer_get_byte(eb, first_byte);
4857 *kaddr &= ~mask;
4858 if (same_byte)
4859 return;
4860
4861 /* Handle the byte aligned part. */
4862 ASSERT(first_byte + 1 <= last_byte);
4863 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4864
4865 /* Handle the last byte. */
4866 kaddr = extent_buffer_get_byte(eb, last_byte);
4867 *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
3e1e8bb7
OS
4868}
4869
3387206f
ST
4870static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4871{
4872 unsigned long distance = (src > dst) ? src - dst : dst - src;
4873 return distance < len;
4874}
4875
2b48966a
DS
4876void memcpy_extent_buffer(const struct extent_buffer *dst,
4877 unsigned long dst_offset, unsigned long src_offset,
4878 unsigned long len)
d1310b2e 4879{
84cda1a6 4880 const int unit_size = dst->folio_size;
13840f3f 4881 unsigned long cur_off = 0;
d1310b2e 4882
f98b6215
QW
4883 if (check_eb_range(dst, dst_offset, len) ||
4884 check_eb_range(dst, src_offset, len))
4885 return;
d1310b2e 4886
397239ed
QW
4887 if (dst->addr) {
4888 const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4889
4890 if (use_memmove)
4891 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4892 else
4893 memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4894 return;
4895 }
4896
13840f3f
QW
4897 while (cur_off < len) {
4898 unsigned long cur_src = cur_off + src_offset;
8d993618
QW
4899 unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4900 unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
13840f3f 4901 unsigned long cur_len = min(src_offset + len - cur_src,
8d993618
QW
4902 unit_size - folio_off);
4903 void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
13840f3f
QW
4904 const bool use_memmove = areas_overlap(src_offset + cur_off,
4905 dst_offset + cur_off, cur_len);
4906
4907 __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4908 use_memmove);
4909 cur_off += cur_len;
d1310b2e
CM
4910 }
4911}
d1310b2e 4912
2b48966a
DS
4913void memmove_extent_buffer(const struct extent_buffer *dst,
4914 unsigned long dst_offset, unsigned long src_offset,
4915 unsigned long len)
d1310b2e 4916{
d1310b2e
CM
4917 unsigned long dst_end = dst_offset + len - 1;
4918 unsigned long src_end = src_offset + len - 1;
d1310b2e 4919
f98b6215
QW
4920 if (check_eb_range(dst, dst_offset, len) ||
4921 check_eb_range(dst, src_offset, len))
4922 return;
096d2301 4923
727011e0 4924 if (dst_offset < src_offset) {
d1310b2e
CM
4925 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4926 return;
4927 }
096d2301 4928
397239ed
QW
4929 if (dst->addr) {
4930 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4931 return;
4932 }
4933
d397712b 4934 while (len > 0) {
096d2301
QW
4935 unsigned long src_i;
4936 size_t cur;
8d993618
QW
4937 size_t dst_off_in_folio;
4938 size_t src_off_in_folio;
096d2301
QW
4939 void *src_addr;
4940 bool use_memmove;
4941
8d993618 4942 src_i = get_eb_folio_index(dst, src_end);
d1310b2e 4943
8d993618
QW
4944 dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4945 src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
d1310b2e 4946
8d993618
QW
4947 cur = min_t(unsigned long, len, src_off_in_folio + 1);
4948 cur = min(cur, dst_off_in_folio + 1);
096d2301 4949
8d993618 4950 src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
082d5bb9 4951 cur + 1;
096d2301
QW
4952 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4953 cur);
4954
4955 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4956 use_memmove);
d1310b2e
CM
4957
4958 dst_end -= cur;
4959 src_end -= cur;
4960 len -= cur;
4961 }
4962}
6af118ce 4963
01cd3909 4964#define GANG_LOOKUP_SIZE 16
d1e86e3f
QW
4965static struct extent_buffer *get_next_extent_buffer(
4966 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4967{
01cd3909
DS
4968 struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4969 struct extent_buffer *found = NULL;
d1e86e3f 4970 u64 page_start = page_offset(page);
01cd3909 4971 u64 cur = page_start;
d1e86e3f
QW
4972
4973 ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
d1e86e3f
QW
4974 lockdep_assert_held(&fs_info->buffer_lock);
4975
01cd3909
DS
4976 while (cur < page_start + PAGE_SIZE) {
4977 int ret;
4978 int i;
4979
4980 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4981 (void **)gang, cur >> fs_info->sectorsize_bits,
4982 min_t(unsigned int, GANG_LOOKUP_SIZE,
4983 PAGE_SIZE / fs_info->nodesize));
4984 if (ret == 0)
4985 goto out;
4986 for (i = 0; i < ret; i++) {
4987 /* Already beyond page end */
4988 if (gang[i]->start >= page_start + PAGE_SIZE)
4989 goto out;
4990 /* Found one */
4991 if (gang[i]->start >= bytenr) {
4992 found = gang[i];
4993 goto out;
4994 }
4995 }
4996 cur = gang[ret - 1]->start + gang[ret - 1]->len;
d1e86e3f 4997 }
01cd3909
DS
4998out:
4999 return found;
d1e86e3f
QW
5000}
5001
5002static int try_release_subpage_extent_buffer(struct page *page)
5003{
b33d2e53 5004 struct btrfs_fs_info *fs_info = page_to_fs_info(page);
d1e86e3f
QW
5005 u64 cur = page_offset(page);
5006 const u64 end = page_offset(page) + PAGE_SIZE;
5007 int ret;
5008
5009 while (cur < end) {
5010 struct extent_buffer *eb = NULL;
5011
5012 /*
cfbf07e2 5013 * Unlike try_release_extent_buffer() which uses folio private
d1e86e3f
QW
5014 * to grab buffer, for subpage case we rely on radix tree, thus
5015 * we need to ensure radix tree consistency.
5016 *
5017 * We also want an atomic snapshot of the radix tree, thus go
5018 * with spinlock rather than RCU.
5019 */
5020 spin_lock(&fs_info->buffer_lock);
5021 eb = get_next_extent_buffer(fs_info, page, cur);
5022 if (!eb) {
5023 /* No more eb in the page range after or at cur */
5024 spin_unlock(&fs_info->buffer_lock);
5025 break;
5026 }
5027 cur = eb->start + eb->len;
5028
5029 /*
5030 * The same as try_release_extent_buffer(), to ensure the eb
5031 * won't disappear out from under us.
5032 */
5033 spin_lock(&eb->refs_lock);
5034 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5035 spin_unlock(&eb->refs_lock);
5036 spin_unlock(&fs_info->buffer_lock);
5037 break;
5038 }
5039 spin_unlock(&fs_info->buffer_lock);
5040
5041 /*
5042 * If tree ref isn't set then we know the ref on this eb is a
5043 * real ref, so just return, this eb will likely be freed soon
5044 * anyway.
5045 */
5046 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5047 spin_unlock(&eb->refs_lock);
5048 break;
5049 }
5050
5051 /*
5052 * Here we don't care about the return value, we will always
cfbf07e2 5053 * check the folio private at the end. And
d1e86e3f
QW
5054 * release_extent_buffer() will release the refs_lock.
5055 */
5056 release_extent_buffer(eb);
5057 }
5058 /*
cfbf07e2
QW
5059 * Finally to check if we have cleared folio private, as if we have
5060 * released all ebs in the page, the folio private should be cleared now.
d1e86e3f 5061 */
600f111e 5062 spin_lock(&page->mapping->i_private_lock);
cfbf07e2 5063 if (!folio_test_private(page_folio(page)))
d1e86e3f
QW
5064 ret = 1;
5065 else
5066 ret = 0;
600f111e 5067 spin_unlock(&page->mapping->i_private_lock);
d1e86e3f
QW
5068 return ret;
5069
5070}
5071
f7a52a40 5072int try_release_extent_buffer(struct page *page)
19fe0a8b 5073{
cfbf07e2 5074 struct folio *folio = page_folio(page);
6af118ce 5075 struct extent_buffer *eb;
6af118ce 5076
b33d2e53 5077 if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
d1e86e3f
QW
5078 return try_release_subpage_extent_buffer(page);
5079
3083ee2e 5080 /*
cfbf07e2
QW
5081 * We need to make sure nobody is changing folio private, as we rely on
5082 * folio private as the pointer to extent buffer.
3083ee2e 5083 */
600f111e 5084 spin_lock(&page->mapping->i_private_lock);
cfbf07e2 5085 if (!folio_test_private(folio)) {
600f111e 5086 spin_unlock(&page->mapping->i_private_lock);
4f2de97a 5087 return 1;
45f49bce 5088 }
6af118ce 5089
cfbf07e2 5090 eb = folio_get_private(folio);
3083ee2e 5091 BUG_ON(!eb);
19fe0a8b
MX
5092
5093 /*
3083ee2e
JB
5094 * This is a little awful but should be ok, we need to make sure that
5095 * the eb doesn't disappear out from under us while we're looking at
5096 * this page.
19fe0a8b 5097 */
3083ee2e 5098 spin_lock(&eb->refs_lock);
0b32f4bb 5099 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
3083ee2e 5100 spin_unlock(&eb->refs_lock);
600f111e 5101 spin_unlock(&page->mapping->i_private_lock);
3083ee2e 5102 return 0;
b9473439 5103 }
600f111e 5104 spin_unlock(&page->mapping->i_private_lock);
897ca6e9 5105
19fe0a8b 5106 /*
3083ee2e
JB
5107 * If tree ref isn't set then we know the ref on this eb is a real ref,
5108 * so just return, this page will likely be freed soon anyway.
19fe0a8b 5109 */
3083ee2e
JB
5110 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5111 spin_unlock(&eb->refs_lock);
5112 return 0;
b9473439 5113 }
19fe0a8b 5114
f7a52a40 5115 return release_extent_buffer(eb);
6af118ce 5116}
bfb484d9
JB
5117
5118/*
9580503b
DS
5119 * Attempt to readahead a child block.
5120 *
bfb484d9
JB
5121 * @fs_info: the fs_info
5122 * @bytenr: bytenr to read
3fbaf258 5123 * @owner_root: objectid of the root that owns this eb
bfb484d9 5124 * @gen: generation for the uptodate check, can be 0
3fbaf258 5125 * @level: level for the eb
bfb484d9
JB
5126 *
5127 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
5128 * normal uptodate check of the eb, without checking the generation. If we have
5129 * to read the block we will not block on anything.
5130 */
5131void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
3fbaf258 5132 u64 bytenr, u64 owner_root, u64 gen, int level)
bfb484d9 5133{
947a6299
QW
5134 struct btrfs_tree_parent_check check = {
5135 .has_first_key = 0,
5136 .level = level,
5137 .transid = gen
5138 };
bfb484d9
JB
5139 struct extent_buffer *eb;
5140 int ret;
5141
3fbaf258 5142 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
bfb484d9
JB
5143 if (IS_ERR(eb))
5144 return;
5145
5146 if (btrfs_buffer_uptodate(eb, gen, 1)) {
5147 free_extent_buffer(eb);
5148 return;
5149 }
5150
947a6299 5151 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
bfb484d9
JB
5152 if (ret < 0)
5153 free_extent_buffer_stale(eb);
5154 else
5155 free_extent_buffer(eb);
5156}
5157
5158/*
9580503b
DS
5159 * Readahead a node's child block.
5160 *
bfb484d9
JB
5161 * @node: parent node we're reading from
5162 * @slot: slot in the parent node for the child we want to read
5163 *
5164 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
5165 * the slot in the node provided.
5166 */
5167void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
5168{
5169 btrfs_readahead_tree_block(node->fs_info,
5170 btrfs_node_blockptr(node, slot),
3fbaf258
JB
5171 btrfs_header_owner(node),
5172 btrfs_node_ptr_generation(node, slot),
5173 btrfs_header_level(node) - 1);
bfb484d9 5174}