Merge tag 'sched_ext-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[linux-2.6-block.git] / fs / nilfs2 / page.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Buffer/page management specific to NILFS
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi and Seiji Kihara.
8 */
9
10#include <linux/pagemap.h>
11#include <linux/writeback.h>
12#include <linux/swap.h>
13#include <linux/bitops.h>
14#include <linux/page-flags.h>
15#include <linux/list.h>
16#include <linux/highmem.h>
17#include <linux/pagevec.h>
18#include <linux/gfp.h>
19#include "nilfs.h"
20#include "page.h"
21#include "mdt.h"
22
23
24#define NILFS_BUFFER_INHERENT_BITS \
25 (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \
26 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
27
28static struct buffer_head *__nilfs_get_folio_block(struct folio *folio,
29 unsigned long block, pgoff_t index, int blkbits,
30 unsigned long b_state)
31
32{
33 unsigned long first_block;
34 struct buffer_head *bh = folio_buffers(folio);
35
36 if (!bh)
37 bh = create_empty_buffers(folio, 1 << blkbits, b_state);
38
39 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
40 bh = get_nth_bh(bh, block - first_block);
41
42 touch_buffer(bh);
43 wait_on_buffer(bh);
44 return bh;
45}
46
47struct buffer_head *nilfs_grab_buffer(struct inode *inode,
48 struct address_space *mapping,
49 unsigned long blkoff,
50 unsigned long b_state)
51{
52 int blkbits = inode->i_blkbits;
53 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
54 struct folio *folio;
55 struct buffer_head *bh;
56
57 folio = filemap_grab_folio(mapping, index);
58 if (IS_ERR(folio))
59 return NULL;
60
61 bh = __nilfs_get_folio_block(folio, blkoff, index, blkbits, b_state);
62 if (unlikely(!bh)) {
63 folio_unlock(folio);
64 folio_put(folio);
65 return NULL;
66 }
67 return bh;
68}
69
70/**
71 * nilfs_forget_buffer - discard dirty state
72 * @bh: buffer head of the buffer to be discarded
73 */
74void nilfs_forget_buffer(struct buffer_head *bh)
75{
76 struct folio *folio = bh->b_folio;
77 const unsigned long clear_bits =
78 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
79 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
80 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
81
82 lock_buffer(bh);
83 set_mask_bits(&bh->b_state, clear_bits, 0);
84 if (nilfs_folio_buffers_clean(folio))
85 __nilfs_clear_folio_dirty(folio);
86
87 bh->b_blocknr = -1;
88 folio_clear_uptodate(folio);
89 folio_clear_mappedtodisk(folio);
90 unlock_buffer(bh);
91 brelse(bh);
92}
93
94/**
95 * nilfs_copy_buffer -- copy buffer data and flags
96 * @dbh: destination buffer
97 * @sbh: source buffer
98 */
99void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
100{
101 void *kaddr0, *kaddr1;
102 unsigned long bits;
103 struct page *spage = sbh->b_page, *dpage = dbh->b_page;
104 struct buffer_head *bh;
105
106 kaddr0 = kmap_local_page(spage);
107 kaddr1 = kmap_local_page(dpage);
108 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
109 kunmap_local(kaddr1);
110 kunmap_local(kaddr0);
111
112 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
113 dbh->b_blocknr = sbh->b_blocknr;
114 dbh->b_bdev = sbh->b_bdev;
115
116 bh = dbh;
117 bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
118 while ((bh = bh->b_this_page) != dbh) {
119 lock_buffer(bh);
120 bits &= bh->b_state;
121 unlock_buffer(bh);
122 }
123 if (bits & BIT(BH_Uptodate))
124 SetPageUptodate(dpage);
125 else
126 ClearPageUptodate(dpage);
127 if (bits & BIT(BH_Mapped))
128 SetPageMappedToDisk(dpage);
129 else
130 ClearPageMappedToDisk(dpage);
131}
132
133/**
134 * nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not.
135 * @folio: Folio to be checked.
136 *
137 * nilfs_folio_buffers_clean() returns false if the folio has dirty buffers.
138 * Otherwise, it returns true.
139 */
140bool nilfs_folio_buffers_clean(struct folio *folio)
141{
142 struct buffer_head *bh, *head;
143
144 bh = head = folio_buffers(folio);
145 do {
146 if (buffer_dirty(bh))
147 return false;
148 bh = bh->b_this_page;
149 } while (bh != head);
150 return true;
151}
152
153void nilfs_folio_bug(struct folio *folio)
154{
155 struct buffer_head *bh, *head;
156 struct address_space *m;
157 unsigned long ino;
158
159 if (unlikely(!folio)) {
160 printk(KERN_CRIT "NILFS_FOLIO_BUG(NULL)\n");
161 return;
162 }
163
164 m = folio->mapping;
165 ino = m ? m->host->i_ino : 0;
166
167 printk(KERN_CRIT "NILFS_FOLIO_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
168 "mapping=%p ino=%lu\n",
169 folio, folio_ref_count(folio),
170 (unsigned long long)folio->index, folio->flags, m, ino);
171
172 head = folio_buffers(folio);
173 if (head) {
174 int i = 0;
175
176 bh = head;
177 do {
178 printk(KERN_CRIT
179 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
180 i++, bh, atomic_read(&bh->b_count),
181 (unsigned long long)bh->b_blocknr, bh->b_state);
182 bh = bh->b_this_page;
183 } while (bh != head);
184 }
185}
186
187/**
188 * nilfs_copy_folio -- copy the folio with buffers
189 * @dst: destination folio
190 * @src: source folio
191 * @copy_dirty: flag whether to copy dirty states on the folio's buffer heads.
192 *
193 * This function is for both data folios and btnode folios. The dirty flag
194 * should be treated by caller. The folio must not be under i/o.
195 * Both src and dst folio must be locked
196 */
197static void nilfs_copy_folio(struct folio *dst, struct folio *src,
198 bool copy_dirty)
199{
200 struct buffer_head *dbh, *dbufs, *sbh;
201 unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
202
203 BUG_ON(folio_test_writeback(dst));
204
205 sbh = folio_buffers(src);
206 dbh = folio_buffers(dst);
207 if (!dbh)
208 dbh = create_empty_buffers(dst, sbh->b_size, 0);
209
210 if (copy_dirty)
211 mask |= BIT(BH_Dirty);
212
213 dbufs = dbh;
214 do {
215 lock_buffer(sbh);
216 lock_buffer(dbh);
217 dbh->b_state = sbh->b_state & mask;
218 dbh->b_blocknr = sbh->b_blocknr;
219 dbh->b_bdev = sbh->b_bdev;
220 sbh = sbh->b_this_page;
221 dbh = dbh->b_this_page;
222 } while (dbh != dbufs);
223
224 folio_copy(dst, src);
225
226 if (folio_test_uptodate(src) && !folio_test_uptodate(dst))
227 folio_mark_uptodate(dst);
228 else if (!folio_test_uptodate(src) && folio_test_uptodate(dst))
229 folio_clear_uptodate(dst);
230 if (folio_test_mappedtodisk(src) && !folio_test_mappedtodisk(dst))
231 folio_set_mappedtodisk(dst);
232 else if (!folio_test_mappedtodisk(src) && folio_test_mappedtodisk(dst))
233 folio_clear_mappedtodisk(dst);
234
235 do {
236 unlock_buffer(sbh);
237 unlock_buffer(dbh);
238 sbh = sbh->b_this_page;
239 dbh = dbh->b_this_page;
240 } while (dbh != dbufs);
241}
242
243int nilfs_copy_dirty_pages(struct address_space *dmap,
244 struct address_space *smap)
245{
246 struct folio_batch fbatch;
247 unsigned int i;
248 pgoff_t index = 0;
249 int err = 0;
250
251 folio_batch_init(&fbatch);
252repeat:
253 if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1,
254 PAGECACHE_TAG_DIRTY, &fbatch))
255 return 0;
256
257 for (i = 0; i < folio_batch_count(&fbatch); i++) {
258 struct folio *folio = fbatch.folios[i], *dfolio;
259
260 folio_lock(folio);
261 if (unlikely(!folio_test_dirty(folio)))
262 NILFS_FOLIO_BUG(folio, "inconsistent dirty state");
263
264 dfolio = filemap_grab_folio(dmap, folio->index);
265 if (IS_ERR(dfolio)) {
266 /* No empty page is added to the page cache */
267 folio_unlock(folio);
268 err = PTR_ERR(dfolio);
269 break;
270 }
271 if (unlikely(!folio_buffers(folio)))
272 NILFS_FOLIO_BUG(folio,
273 "found empty page in dat page cache");
274
275 nilfs_copy_folio(dfolio, folio, true);
276 filemap_dirty_folio(folio_mapping(dfolio), dfolio);
277
278 folio_unlock(dfolio);
279 folio_put(dfolio);
280 folio_unlock(folio);
281 }
282 folio_batch_release(&fbatch);
283 cond_resched();
284
285 if (likely(!err))
286 goto repeat;
287 return err;
288}
289
290/**
291 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
292 * @dmap: destination page cache
293 * @smap: source page cache
294 *
295 * No pages must be added to the cache during this process.
296 * This must be ensured by the caller.
297 */
298void nilfs_copy_back_pages(struct address_space *dmap,
299 struct address_space *smap)
300{
301 struct folio_batch fbatch;
302 unsigned int i, n;
303 pgoff_t start = 0;
304
305 folio_batch_init(&fbatch);
306repeat:
307 n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
308 if (!n)
309 return;
310
311 for (i = 0; i < folio_batch_count(&fbatch); i++) {
312 struct folio *folio = fbatch.folios[i], *dfolio;
313 pgoff_t index = folio->index;
314
315 folio_lock(folio);
316 dfolio = filemap_lock_folio(dmap, index);
317 if (!IS_ERR(dfolio)) {
318 /* overwrite existing folio in the destination cache */
319 WARN_ON(folio_test_dirty(dfolio));
320 nilfs_copy_folio(dfolio, folio, false);
321 folio_unlock(dfolio);
322 folio_put(dfolio);
323 /* Do we not need to remove folio from smap here? */
324 } else {
325 struct folio *f;
326
327 /* move the folio to the destination cache */
328 xa_lock_irq(&smap->i_pages);
329 f = __xa_erase(&smap->i_pages, index);
330 WARN_ON(folio != f);
331 smap->nrpages--;
332 xa_unlock_irq(&smap->i_pages);
333
334 xa_lock_irq(&dmap->i_pages);
335 f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
336 if (unlikely(f)) {
337 /* Probably -ENOMEM */
338 folio->mapping = NULL;
339 folio_put(folio);
340 } else {
341 folio->mapping = dmap;
342 dmap->nrpages++;
343 if (folio_test_dirty(folio))
344 __xa_set_mark(&dmap->i_pages, index,
345 PAGECACHE_TAG_DIRTY);
346 }
347 xa_unlock_irq(&dmap->i_pages);
348 }
349 folio_unlock(folio);
350 }
351 folio_batch_release(&fbatch);
352 cond_resched();
353
354 goto repeat;
355}
356
357/**
358 * nilfs_clear_dirty_pages - discard dirty pages in address space
359 * @mapping: address space with dirty pages for discarding
360 */
361void nilfs_clear_dirty_pages(struct address_space *mapping)
362{
363 struct folio_batch fbatch;
364 unsigned int i;
365 pgoff_t index = 0;
366
367 folio_batch_init(&fbatch);
368
369 while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1,
370 PAGECACHE_TAG_DIRTY, &fbatch)) {
371 for (i = 0; i < folio_batch_count(&fbatch); i++) {
372 struct folio *folio = fbatch.folios[i];
373
374 folio_lock(folio);
375
376 /*
377 * This folio may have been removed from the address
378 * space by truncation or invalidation when the lock
379 * was acquired. Skip processing in that case.
380 */
381 if (likely(folio->mapping == mapping))
382 nilfs_clear_folio_dirty(folio);
383
384 folio_unlock(folio);
385 }
386 folio_batch_release(&fbatch);
387 cond_resched();
388 }
389}
390
391/**
392 * nilfs_clear_folio_dirty - discard dirty folio
393 * @folio: dirty folio that will be discarded
394 */
395void nilfs_clear_folio_dirty(struct folio *folio)
396{
397 struct buffer_head *bh, *head;
398
399 BUG_ON(!folio_test_locked(folio));
400
401 folio_clear_uptodate(folio);
402 folio_clear_mappedtodisk(folio);
403
404 head = folio_buffers(folio);
405 if (head) {
406 const unsigned long clear_bits =
407 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
408 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
409 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
410
411 bh = head;
412 do {
413 lock_buffer(bh);
414 set_mask_bits(&bh->b_state, clear_bits, 0);
415 unlock_buffer(bh);
416 } while (bh = bh->b_this_page, bh != head);
417 }
418
419 __nilfs_clear_folio_dirty(folio);
420}
421
422unsigned int nilfs_page_count_clean_buffers(struct page *page,
423 unsigned int from, unsigned int to)
424{
425 unsigned int block_start, block_end;
426 struct buffer_head *bh, *head;
427 unsigned int nc = 0;
428
429 for (bh = head = page_buffers(page), block_start = 0;
430 bh != head || !block_start;
431 block_start = block_end, bh = bh->b_this_page) {
432 block_end = block_start + bh->b_size;
433 if (block_end > from && block_start < to && !buffer_dirty(bh))
434 nc++;
435 }
436 return nc;
437}
438
439/*
440 * NILFS2 needs clear_page_dirty() in the following two cases:
441 *
442 * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
443 * flag of pages when it copies back pages from shadow cache to the
444 * original cache.
445 *
446 * 2) Some B-tree operations like insertion or deletion may dispose buffers
447 * in dirty state, and this needs to cancel the dirty state of their pages.
448 */
449void __nilfs_clear_folio_dirty(struct folio *folio)
450{
451 struct address_space *mapping = folio->mapping;
452
453 if (mapping) {
454 xa_lock_irq(&mapping->i_pages);
455 if (folio_test_dirty(folio)) {
456 __xa_clear_mark(&mapping->i_pages, folio->index,
457 PAGECACHE_TAG_DIRTY);
458 xa_unlock_irq(&mapping->i_pages);
459 folio_clear_dirty_for_io(folio);
460 return;
461 }
462 xa_unlock_irq(&mapping->i_pages);
463 return;
464 }
465 folio_clear_dirty(folio);
466}
467
468/**
469 * nilfs_find_uncommitted_extent - find extent of uncommitted data
470 * @inode: inode
471 * @start_blk: start block offset (in)
472 * @blkoff: start offset of the found extent (out)
473 *
474 * This function searches an extent of buffers marked "delayed" which
475 * starts from a block offset equal to or larger than @start_blk. If
476 * such an extent was found, this will store the start offset in
477 * @blkoff and return its length in blocks. Otherwise, zero is
478 * returned.
479 */
480unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
481 sector_t start_blk,
482 sector_t *blkoff)
483{
484 unsigned int i, nr_folios;
485 pgoff_t index;
486 unsigned long length = 0;
487 struct folio_batch fbatch;
488 struct folio *folio;
489
490 if (inode->i_mapping->nrpages == 0)
491 return 0;
492
493 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
494
495 folio_batch_init(&fbatch);
496
497repeat:
498 nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX,
499 &fbatch);
500 if (nr_folios == 0)
501 return length;
502
503 i = 0;
504 do {
505 folio = fbatch.folios[i];
506
507 folio_lock(folio);
508 if (folio_buffers(folio)) {
509 struct buffer_head *bh, *head;
510 sector_t b;
511
512 b = folio->index << (PAGE_SHIFT - inode->i_blkbits);
513 bh = head = folio_buffers(folio);
514 do {
515 if (b < start_blk)
516 continue;
517 if (buffer_delay(bh)) {
518 if (length == 0)
519 *blkoff = b;
520 length++;
521 } else if (length > 0) {
522 goto out_locked;
523 }
524 } while (++b, bh = bh->b_this_page, bh != head);
525 } else {
526 if (length > 0)
527 goto out_locked;
528 }
529 folio_unlock(folio);
530
531 } while (++i < nr_folios);
532
533 folio_batch_release(&fbatch);
534 cond_resched();
535 goto repeat;
536
537out_locked:
538 folio_unlock(folio);
539 folio_batch_release(&fbatch);
540 return length;
541}