1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/slab.h>
8 * Subpage (sectorsize < PAGE_SIZE) support overview:
12 * - Only support 64K page size for now
13 * This is to make metadata handling easier, as 64K page would ensure
14 * all nodesize would fit inside one page, thus we don't need to handle
15 * cases where a tree block crosses several pages.
17 * - Only metadata read-write for now
18 * The data read-write part is in development.
20 * - Metadata can't cross 64K page boundary
21 * btrfs-progs and kernel have done that for a while, thus only ancient
22 * filesystems could have such problem. For such case, do a graceful
28 * Metadata read is fully supported.
29 * Meaning when reading one tree block will only trigger the read for the
30 * needed range, other unrelated range in the same page will not be touched.
32 * Metadata write support is partial.
33 * The writeback is still for the full page, but we will only submit
34 * the dirty extent buffers in the page.
36 * This means, if we have a metadata page like this:
40 * |/////////| |///////////|
41 * \- Tree block A \- Tree block B
43 * Even if we just want to writeback tree block A, we will also writeback
44 * tree block B if it's also dirty.
46 * This may cause extra metadata writeback which results more COW.
51 * Both metadata and data will use a new structure, btrfs_subpage, to
52 * record the status of each sector inside a page. This provides the extra
56 * Since we have multiple tree blocks inside one page, we can't rely on page
57 * locking anymore, or we will have greatly reduced concurrency or even
58 * deadlocks (hold one tree lock while trying to lock another tree lock in
61 * Thus for metadata locking, subpage support relies on io_tree locking only.
62 * This means a slightly higher tree locking latency.
65 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
66 struct page *page, enum btrfs_subpage_type type)
68 struct btrfs_subpage *subpage = NULL;
72 * We have cases like a dummy extent buffer page, which is not mappped
73 * and doesn't need to be locked.
76 ASSERT(PageLocked(page));
77 /* Either not subpage, or the page already has private attached */
78 if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
81 ret = btrfs_alloc_subpage(fs_info, &subpage, type);
84 attach_page_private(page, subpage);
88 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
91 struct btrfs_subpage *subpage;
93 /* Either not subpage, or already detached */
94 if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
97 subpage = (struct btrfs_subpage *)detach_page_private(page);
99 btrfs_free_subpage(subpage);
102 int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
103 struct btrfs_subpage **ret,
104 enum btrfs_subpage_type type)
106 if (fs_info->sectorsize == PAGE_SIZE)
109 *ret = kzalloc(sizeof(struct btrfs_subpage), GFP_NOFS);
112 spin_lock_init(&(*ret)->lock);
113 if (type == BTRFS_SUBPAGE_METADATA)
114 atomic_set(&(*ret)->eb_refs, 0);
116 atomic_set(&(*ret)->readers, 0);
120 void btrfs_free_subpage(struct btrfs_subpage *subpage)
126 * Increase the eb_refs of current subpage.
128 * This is important for eb allocation, to prevent race with last eb freeing
130 * With the eb_refs increased before the eb inserted into radix tree,
131 * detach_extent_buffer_page() won't detach the page private while we're still
132 * allocating the extent buffer.
134 void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
137 struct btrfs_subpage *subpage;
139 if (fs_info->sectorsize == PAGE_SIZE)
142 ASSERT(PagePrivate(page) && page->mapping);
143 lockdep_assert_held(&page->mapping->private_lock);
145 subpage = (struct btrfs_subpage *)page->private;
146 atomic_inc(&subpage->eb_refs);
149 void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
152 struct btrfs_subpage *subpage;
154 if (fs_info->sectorsize == PAGE_SIZE)
157 ASSERT(PagePrivate(page) && page->mapping);
158 lockdep_assert_held(&page->mapping->private_lock);
160 subpage = (struct btrfs_subpage *)page->private;
161 ASSERT(atomic_read(&subpage->eb_refs));
162 atomic_dec(&subpage->eb_refs);
165 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
166 struct page *page, u64 start, u32 len)
169 ASSERT(PagePrivate(page) && page->private);
170 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
171 IS_ALIGNED(len, fs_info->sectorsize));
173 * The range check only works for mapped page, we can still have
174 * unmapped page like dummy extent buffer pages.
177 ASSERT(page_offset(page) <= start &&
178 start + len <= page_offset(page) + PAGE_SIZE);
181 void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
182 struct page *page, u64 start, u32 len)
184 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
185 const int nbits = len >> fs_info->sectorsize_bits;
188 btrfs_subpage_assert(fs_info, page, start, len);
190 ret = atomic_add_return(nbits, &subpage->readers);
191 ASSERT(ret == nbits);
194 void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
195 struct page *page, u64 start, u32 len)
197 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
198 const int nbits = len >> fs_info->sectorsize_bits;
200 btrfs_subpage_assert(fs_info, page, start, len);
201 ASSERT(atomic_read(&subpage->readers) >= nbits);
202 if (atomic_sub_and_test(nbits, &subpage->readers))
207 * Convert the [start, start + len) range into a u16 bitmap
209 * For example: if start == page_offset() + 16K, len = 16K, we get 0x00f0.
211 static u16 btrfs_subpage_calc_bitmap(const struct btrfs_fs_info *fs_info,
212 struct page *page, u64 start, u32 len)
214 const int bit_start = offset_in_page(start) >> fs_info->sectorsize_bits;
215 const int nbits = len >> fs_info->sectorsize_bits;
217 btrfs_subpage_assert(fs_info, page, start, len);
220 * Here nbits can be 16, thus can go beyond u16 range. We make the
221 * first left shift to be calculate in unsigned long (at least u32),
222 * then truncate the result to u16.
224 return (u16)(((1UL << nbits) - 1) << bit_start);
227 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
228 struct page *page, u64 start, u32 len)
230 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
231 const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
234 spin_lock_irqsave(&subpage->lock, flags);
235 subpage->uptodate_bitmap |= tmp;
236 if (subpage->uptodate_bitmap == U16_MAX)
237 SetPageUptodate(page);
238 spin_unlock_irqrestore(&subpage->lock, flags);
241 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
242 struct page *page, u64 start, u32 len)
244 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
245 const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
248 spin_lock_irqsave(&subpage->lock, flags);
249 subpage->uptodate_bitmap &= ~tmp;
250 ClearPageUptodate(page);
251 spin_unlock_irqrestore(&subpage->lock, flags);
254 void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
255 struct page *page, u64 start, u32 len)
257 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
258 const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
261 spin_lock_irqsave(&subpage->lock, flags);
262 subpage->error_bitmap |= tmp;
264 spin_unlock_irqrestore(&subpage->lock, flags);
267 void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
268 struct page *page, u64 start, u32 len)
270 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
271 const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
274 spin_lock_irqsave(&subpage->lock, flags);
275 subpage->error_bitmap &= ~tmp;
276 if (subpage->error_bitmap == 0)
277 ClearPageError(page);
278 spin_unlock_irqrestore(&subpage->lock, flags);
281 void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
282 struct page *page, u64 start, u32 len)
284 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
285 u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
288 spin_lock_irqsave(&subpage->lock, flags);
289 subpage->dirty_bitmap |= tmp;
290 spin_unlock_irqrestore(&subpage->lock, flags);
291 set_page_dirty(page);
295 * Extra clear_and_test function for subpage dirty bitmap.
297 * Return true if we're the last bits in the dirty_bitmap and clear the
299 * Return false otherwise.
301 * NOTE: Callers should manually clear page dirty for true case, as we have
302 * extra handling for tree blocks.
304 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
305 struct page *page, u64 start, u32 len)
307 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
308 u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
312 spin_lock_irqsave(&subpage->lock, flags);
313 subpage->dirty_bitmap &= ~tmp;
314 if (subpage->dirty_bitmap == 0)
316 spin_unlock_irqrestore(&subpage->lock, flags);
320 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
321 struct page *page, u64 start, u32 len)
325 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
327 clear_page_dirty_for_io(page);
330 void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
331 struct page *page, u64 start, u32 len)
333 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
334 u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
337 spin_lock_irqsave(&subpage->lock, flags);
338 subpage->writeback_bitmap |= tmp;
339 set_page_writeback(page);
340 spin_unlock_irqrestore(&subpage->lock, flags);
343 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
344 struct page *page, u64 start, u32 len)
346 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
347 u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
350 spin_lock_irqsave(&subpage->lock, flags);
351 subpage->writeback_bitmap &= ~tmp;
352 if (subpage->writeback_bitmap == 0)
353 end_page_writeback(page);
354 spin_unlock_irqrestore(&subpage->lock, flags);
358 * Unlike set/clear which is dependent on each page status, for test all bits
359 * are tested in the same way.
361 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
362 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
363 struct page *page, u64 start, u32 len) \
365 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
366 const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); \
367 unsigned long flags; \
370 spin_lock_irqsave(&subpage->lock, flags); \
371 ret = ((subpage->name##_bitmap & tmp) == tmp); \
372 spin_unlock_irqrestore(&subpage->lock, flags); \
375 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
376 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
377 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
378 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
381 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
382 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
383 * back to regular sectorsize branch.
385 #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \
387 void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
388 struct page *page, u64 start, u32 len) \
390 if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
391 set_page_func(page); \
394 btrfs_subpage_set_##name(fs_info, page, start, len); \
396 void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
397 struct page *page, u64 start, u32 len) \
399 if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
400 clear_page_func(page); \
403 btrfs_subpage_clear_##name(fs_info, page, start, len); \
405 bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
406 struct page *page, u64 start, u32 len) \
408 if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \
409 return test_page_func(page); \
410 return btrfs_subpage_test_##name(fs_info, page, start, len); \
412 IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
414 IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
415 IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
417 IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,