Merge tag 'sched-core-2024-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / fs / buffer.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/buffer.c
4  *
5  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
6  */
7
8 /*
9  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10  *
11  * Removed a lot of unnecessary code and simplified things now that
12  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13  *
14  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
15  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
16  *
17  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18  *
19  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 #include <linux/sched/isolation.h>
53
54 #include "internal.h"
55
56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58                           enum rw_hint hint, struct writeback_control *wbc);
59
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61
62 inline void touch_buffer(struct buffer_head *bh)
63 {
64         trace_block_touch_buffer(bh);
65         folio_mark_accessed(bh->b_folio);
66 }
67 EXPORT_SYMBOL(touch_buffer);
68
69 void __lock_buffer(struct buffer_head *bh)
70 {
71         wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72 }
73 EXPORT_SYMBOL(__lock_buffer);
74
75 void unlock_buffer(struct buffer_head *bh)
76 {
77         clear_bit_unlock(BH_Lock, &bh->b_state);
78         smp_mb__after_atomic();
79         wake_up_bit(&bh->b_state, BH_Lock);
80 }
81 EXPORT_SYMBOL(unlock_buffer);
82
83 /*
84  * Returns if the folio has dirty or writeback buffers. If all the buffers
85  * are unlocked and clean then the folio_test_dirty information is stale. If
86  * any of the buffers are locked, it is assumed they are locked for IO.
87  */
88 void buffer_check_dirty_writeback(struct folio *folio,
89                                      bool *dirty, bool *writeback)
90 {
91         struct buffer_head *head, *bh;
92         *dirty = false;
93         *writeback = false;
94
95         BUG_ON(!folio_test_locked(folio));
96
97         head = folio_buffers(folio);
98         if (!head)
99                 return;
100
101         if (folio_test_writeback(folio))
102                 *writeback = true;
103
104         bh = head;
105         do {
106                 if (buffer_locked(bh))
107                         *writeback = true;
108
109                 if (buffer_dirty(bh))
110                         *dirty = true;
111
112                 bh = bh->b_this_page;
113         } while (bh != head);
114 }
115
116 /*
117  * Block until a buffer comes unlocked.  This doesn't stop it
118  * from becoming locked again - you have to lock it yourself
119  * if you want to preserve its state.
120  */
121 void __wait_on_buffer(struct buffer_head * bh)
122 {
123         wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 }
125 EXPORT_SYMBOL(__wait_on_buffer);
126
127 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 {
129         if (!test_bit(BH_Quiet, &bh->b_state))
130                 printk_ratelimited(KERN_ERR
131                         "Buffer I/O error on dev %pg, logical block %llu%s\n",
132                         bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133 }
134
135 /*
136  * End-of-IO handler helper function which does not touch the bh after
137  * unlocking it.
138  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139  * a race there is benign: unlock_buffer() only use the bh's address for
140  * hashing after unlocking the buffer, so it doesn't actually touch the bh
141  * itself.
142  */
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144 {
145         if (uptodate) {
146                 set_buffer_uptodate(bh);
147         } else {
148                 /* This happens, due to failed read-ahead attempts. */
149                 clear_buffer_uptodate(bh);
150         }
151         unlock_buffer(bh);
152 }
153
154 /*
155  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
156  * unlock the buffer.
157  */
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 {
160         __end_buffer_read_notouch(bh, uptodate);
161         put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_read_sync);
164
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166 {
167         if (uptodate) {
168                 set_buffer_uptodate(bh);
169         } else {
170                 buffer_io_error(bh, ", lost sync page write");
171                 mark_buffer_write_io_error(bh);
172                 clear_buffer_uptodate(bh);
173         }
174         unlock_buffer(bh);
175         put_bh(bh);
176 }
177 EXPORT_SYMBOL(end_buffer_write_sync);
178
179 /*
180  * Various filesystems appear to want __find_get_block to be non-blocking.
181  * But it's the page lock which protects the buffers.  To get around this,
182  * we get exclusion from try_to_free_buffers with the blockdev mapping's
183  * i_private_lock.
184  *
185  * Hack idea: for the blockdev mapping, i_private_lock contention
186  * may be quite high.  This code could TryLock the page, and if that
187  * succeeds, there is no need to take i_private_lock.
188  */
189 static struct buffer_head *
190 __find_get_block_slow(struct block_device *bdev, sector_t block)
191 {
192         struct address_space *bd_mapping = bdev->bd_mapping;
193         const int blkbits = bd_mapping->host->i_blkbits;
194         struct buffer_head *ret = NULL;
195         pgoff_t index;
196         struct buffer_head *bh;
197         struct buffer_head *head;
198         struct folio *folio;
199         int all_mapped = 1;
200         static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
201
202         index = ((loff_t)block << blkbits) / PAGE_SIZE;
203         folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204         if (IS_ERR(folio))
205                 goto out;
206
207         spin_lock(&bd_mapping->i_private_lock);
208         head = folio_buffers(folio);
209         if (!head)
210                 goto out_unlock;
211         bh = head;
212         do {
213                 if (!buffer_mapped(bh))
214                         all_mapped = 0;
215                 else if (bh->b_blocknr == block) {
216                         ret = bh;
217                         get_bh(bh);
218                         goto out_unlock;
219                 }
220                 bh = bh->b_this_page;
221         } while (bh != head);
222
223         /* we might be here because some of the buffers on this page are
224          * not mapped.  This is due to various races between
225          * file io on the block device and getblk.  It gets dealt with
226          * elsewhere, don't buffer_error if we had some unmapped buffers
227          */
228         ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
229         if (all_mapped && __ratelimit(&last_warned)) {
230                 printk("__find_get_block_slow() failed. block=%llu, "
231                        "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232                        "device %pg blocksize: %d\n",
233                        (unsigned long long)block,
234                        (unsigned long long)bh->b_blocknr,
235                        bh->b_state, bh->b_size, bdev,
236                        1 << blkbits);
237         }
238 out_unlock:
239         spin_unlock(&bd_mapping->i_private_lock);
240         folio_put(folio);
241 out:
242         return ret;
243 }
244
245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
246 {
247         unsigned long flags;
248         struct buffer_head *first;
249         struct buffer_head *tmp;
250         struct folio *folio;
251         int folio_uptodate = 1;
252
253         BUG_ON(!buffer_async_read(bh));
254
255         folio = bh->b_folio;
256         if (uptodate) {
257                 set_buffer_uptodate(bh);
258         } else {
259                 clear_buffer_uptodate(bh);
260                 buffer_io_error(bh, ", async page read");
261         }
262
263         /*
264          * Be _very_ careful from here on. Bad things can happen if
265          * two buffer heads end IO at almost the same time and both
266          * decide that the page is now completely done.
267          */
268         first = folio_buffers(folio);
269         spin_lock_irqsave(&first->b_uptodate_lock, flags);
270         clear_buffer_async_read(bh);
271         unlock_buffer(bh);
272         tmp = bh;
273         do {
274                 if (!buffer_uptodate(tmp))
275                         folio_uptodate = 0;
276                 if (buffer_async_read(tmp)) {
277                         BUG_ON(!buffer_locked(tmp));
278                         goto still_busy;
279                 }
280                 tmp = tmp->b_this_page;
281         } while (tmp != bh);
282         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
283
284         folio_end_read(folio, folio_uptodate);
285         return;
286
287 still_busy:
288         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
289         return;
290 }
291
292 struct postprocess_bh_ctx {
293         struct work_struct work;
294         struct buffer_head *bh;
295 };
296
297 static void verify_bh(struct work_struct *work)
298 {
299         struct postprocess_bh_ctx *ctx =
300                 container_of(work, struct postprocess_bh_ctx, work);
301         struct buffer_head *bh = ctx->bh;
302         bool valid;
303
304         valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
305         end_buffer_async_read(bh, valid);
306         kfree(ctx);
307 }
308
309 static bool need_fsverity(struct buffer_head *bh)
310 {
311         struct folio *folio = bh->b_folio;
312         struct inode *inode = folio->mapping->host;
313
314         return fsverity_active(inode) &&
315                 /* needed by ext4 */
316                 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
317 }
318
319 static void decrypt_bh(struct work_struct *work)
320 {
321         struct postprocess_bh_ctx *ctx =
322                 container_of(work, struct postprocess_bh_ctx, work);
323         struct buffer_head *bh = ctx->bh;
324         int err;
325
326         err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
327                                                bh_offset(bh));
328         if (err == 0 && need_fsverity(bh)) {
329                 /*
330                  * We use different work queues for decryption and for verity
331                  * because verity may require reading metadata pages that need
332                  * decryption, and we shouldn't recurse to the same workqueue.
333                  */
334                 INIT_WORK(&ctx->work, verify_bh);
335                 fsverity_enqueue_verify_work(&ctx->work);
336                 return;
337         }
338         end_buffer_async_read(bh, err == 0);
339         kfree(ctx);
340 }
341
342 /*
343  * I/O completion handler for block_read_full_folio() - pages
344  * which come unlocked at the end of I/O.
345  */
346 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
347 {
348         struct inode *inode = bh->b_folio->mapping->host;
349         bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
350         bool verify = need_fsverity(bh);
351
352         /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
353         if (uptodate && (decrypt || verify)) {
354                 struct postprocess_bh_ctx *ctx =
355                         kmalloc(sizeof(*ctx), GFP_ATOMIC);
356
357                 if (ctx) {
358                         ctx->bh = bh;
359                         if (decrypt) {
360                                 INIT_WORK(&ctx->work, decrypt_bh);
361                                 fscrypt_enqueue_decrypt_work(&ctx->work);
362                         } else {
363                                 INIT_WORK(&ctx->work, verify_bh);
364                                 fsverity_enqueue_verify_work(&ctx->work);
365                         }
366                         return;
367                 }
368                 uptodate = 0;
369         }
370         end_buffer_async_read(bh, uptodate);
371 }
372
373 /*
374  * Completion handler for block_write_full_folio() - folios which are unlocked
375  * during I/O, and which have the writeback flag cleared upon I/O completion.
376  */
377 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
378 {
379         unsigned long flags;
380         struct buffer_head *first;
381         struct buffer_head *tmp;
382         struct folio *folio;
383
384         BUG_ON(!buffer_async_write(bh));
385
386         folio = bh->b_folio;
387         if (uptodate) {
388                 set_buffer_uptodate(bh);
389         } else {
390                 buffer_io_error(bh, ", lost async page write");
391                 mark_buffer_write_io_error(bh);
392                 clear_buffer_uptodate(bh);
393         }
394
395         first = folio_buffers(folio);
396         spin_lock_irqsave(&first->b_uptodate_lock, flags);
397
398         clear_buffer_async_write(bh);
399         unlock_buffer(bh);
400         tmp = bh->b_this_page;
401         while (tmp != bh) {
402                 if (buffer_async_write(tmp)) {
403                         BUG_ON(!buffer_locked(tmp));
404                         goto still_busy;
405                 }
406                 tmp = tmp->b_this_page;
407         }
408         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
409         folio_end_writeback(folio);
410         return;
411
412 still_busy:
413         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
414         return;
415 }
416
417 /*
418  * If a page's buffers are under async readin (end_buffer_async_read
419  * completion) then there is a possibility that another thread of
420  * control could lock one of the buffers after it has completed
421  * but while some of the other buffers have not completed.  This
422  * locked buffer would confuse end_buffer_async_read() into not unlocking
423  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
424  * that this buffer is not under async I/O.
425  *
426  * The page comes unlocked when it has no locked buffer_async buffers
427  * left.
428  *
429  * PageLocked prevents anyone starting new async I/O reads any of
430  * the buffers.
431  *
432  * PageWriteback is used to prevent simultaneous writeout of the same
433  * page.
434  *
435  * PageLocked prevents anyone from starting writeback of a page which is
436  * under read I/O (PageWriteback is only ever set against a locked page).
437  */
438 static void mark_buffer_async_read(struct buffer_head *bh)
439 {
440         bh->b_end_io = end_buffer_async_read_io;
441         set_buffer_async_read(bh);
442 }
443
444 static void mark_buffer_async_write_endio(struct buffer_head *bh,
445                                           bh_end_io_t *handler)
446 {
447         bh->b_end_io = handler;
448         set_buffer_async_write(bh);
449 }
450
451 void mark_buffer_async_write(struct buffer_head *bh)
452 {
453         mark_buffer_async_write_endio(bh, end_buffer_async_write);
454 }
455 EXPORT_SYMBOL(mark_buffer_async_write);
456
457
458 /*
459  * fs/buffer.c contains helper functions for buffer-backed address space's
460  * fsync functions.  A common requirement for buffer-based filesystems is
461  * that certain data from the backing blockdev needs to be written out for
462  * a successful fsync().  For example, ext2 indirect blocks need to be
463  * written back and waited upon before fsync() returns.
464  *
465  * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
466  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
467  * management of a list of dependent buffers at ->i_mapping->i_private_list.
468  *
469  * Locking is a little subtle: try_to_free_buffers() will remove buffers
470  * from their controlling inode's queue when they are being freed.  But
471  * try_to_free_buffers() will be operating against the *blockdev* mapping
472  * at the time, not against the S_ISREG file which depends on those buffers.
473  * So the locking for i_private_list is via the i_private_lock in the address_space
474  * which backs the buffers.  Which is different from the address_space 
475  * against which the buffers are listed.  So for a particular address_space,
476  * mapping->i_private_lock does *not* protect mapping->i_private_list!  In fact,
477  * mapping->i_private_list will always be protected by the backing blockdev's
478  * ->i_private_lock.
479  *
480  * Which introduces a requirement: all buffers on an address_space's
481  * ->i_private_list must be from the same address_space: the blockdev's.
482  *
483  * address_spaces which do not place buffers at ->i_private_list via these
484  * utility functions are free to use i_private_lock and i_private_list for
485  * whatever they want.  The only requirement is that list_empty(i_private_list)
486  * be true at clear_inode() time.
487  *
488  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
489  * filesystems should do that.  invalidate_inode_buffers() should just go
490  * BUG_ON(!list_empty).
491  *
492  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
493  * take an address_space, not an inode.  And it should be called
494  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
495  * queued up.
496  *
497  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
498  * list if it is already on a list.  Because if the buffer is on a list,
499  * it *must* already be on the right one.  If not, the filesystem is being
500  * silly.  This will save a ton of locking.  But first we have to ensure
501  * that buffers are taken *off* the old inode's list when they are freed
502  * (presumably in truncate).  That requires careful auditing of all
503  * filesystems (do it inside bforget()).  It could also be done by bringing
504  * b_inode back.
505  */
506
507 /*
508  * The buffer's backing address_space's i_private_lock must be held
509  */
510 static void __remove_assoc_queue(struct buffer_head *bh)
511 {
512         list_del_init(&bh->b_assoc_buffers);
513         WARN_ON(!bh->b_assoc_map);
514         bh->b_assoc_map = NULL;
515 }
516
517 int inode_has_buffers(struct inode *inode)
518 {
519         return !list_empty(&inode->i_data.i_private_list);
520 }
521
522 /*
523  * osync is designed to support O_SYNC io.  It waits synchronously for
524  * all already-submitted IO to complete, but does not queue any new
525  * writes to the disk.
526  *
527  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
528  * as you dirty the buffers, and then use osync_inode_buffers to wait for
529  * completion.  Any other dirty buffers which are not yet queued for
530  * write will not be flushed to disk by the osync.
531  */
532 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
533 {
534         struct buffer_head *bh;
535         struct list_head *p;
536         int err = 0;
537
538         spin_lock(lock);
539 repeat:
540         list_for_each_prev(p, list) {
541                 bh = BH_ENTRY(p);
542                 if (buffer_locked(bh)) {
543                         get_bh(bh);
544                         spin_unlock(lock);
545                         wait_on_buffer(bh);
546                         if (!buffer_uptodate(bh))
547                                 err = -EIO;
548                         brelse(bh);
549                         spin_lock(lock);
550                         goto repeat;
551                 }
552         }
553         spin_unlock(lock);
554         return err;
555 }
556
557 /**
558  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
559  * @mapping: the mapping which wants those buffers written
560  *
561  * Starts I/O against the buffers at mapping->i_private_list, and waits upon
562  * that I/O.
563  *
564  * Basically, this is a convenience function for fsync().
565  * @mapping is a file or directory which needs those buffers to be written for
566  * a successful fsync().
567  */
568 int sync_mapping_buffers(struct address_space *mapping)
569 {
570         struct address_space *buffer_mapping = mapping->i_private_data;
571
572         if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
573                 return 0;
574
575         return fsync_buffers_list(&buffer_mapping->i_private_lock,
576                                         &mapping->i_private_list);
577 }
578 EXPORT_SYMBOL(sync_mapping_buffers);
579
580 /**
581  * generic_buffers_fsync_noflush - generic buffer fsync implementation
582  * for simple filesystems with no inode lock
583  *
584  * @file:       file to synchronize
585  * @start:      start offset in bytes
586  * @end:        end offset in bytes (inclusive)
587  * @datasync:   only synchronize essential metadata if true
588  *
589  * This is a generic implementation of the fsync method for simple
590  * filesystems which track all non-inode metadata in the buffers list
591  * hanging off the address_space structure.
592  */
593 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
594                                   bool datasync)
595 {
596         struct inode *inode = file->f_mapping->host;
597         int err;
598         int ret;
599
600         err = file_write_and_wait_range(file, start, end);
601         if (err)
602                 return err;
603
604         ret = sync_mapping_buffers(inode->i_mapping);
605         if (!(inode->i_state & I_DIRTY_ALL))
606                 goto out;
607         if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
608                 goto out;
609
610         err = sync_inode_metadata(inode, 1);
611         if (ret == 0)
612                 ret = err;
613
614 out:
615         /* check and advance again to catch errors after syncing out buffers */
616         err = file_check_and_advance_wb_err(file);
617         if (ret == 0)
618                 ret = err;
619         return ret;
620 }
621 EXPORT_SYMBOL(generic_buffers_fsync_noflush);
622
623 /**
624  * generic_buffers_fsync - generic buffer fsync implementation
625  * for simple filesystems with no inode lock
626  *
627  * @file:       file to synchronize
628  * @start:      start offset in bytes
629  * @end:        end offset in bytes (inclusive)
630  * @datasync:   only synchronize essential metadata if true
631  *
632  * This is a generic implementation of the fsync method for simple
633  * filesystems which track all non-inode metadata in the buffers list
634  * hanging off the address_space structure. This also makes sure that
635  * a device cache flush operation is called at the end.
636  */
637 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
638                           bool datasync)
639 {
640         struct inode *inode = file->f_mapping->host;
641         int ret;
642
643         ret = generic_buffers_fsync_noflush(file, start, end, datasync);
644         if (!ret)
645                 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
646         return ret;
647 }
648 EXPORT_SYMBOL(generic_buffers_fsync);
649
650 /*
651  * Called when we've recently written block `bblock', and it is known that
652  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
653  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
654  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
655  */
656 void write_boundary_block(struct block_device *bdev,
657                         sector_t bblock, unsigned blocksize)
658 {
659         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
660         if (bh) {
661                 if (buffer_dirty(bh))
662                         write_dirty_buffer(bh, 0);
663                 put_bh(bh);
664         }
665 }
666
667 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
668 {
669         struct address_space *mapping = inode->i_mapping;
670         struct address_space *buffer_mapping = bh->b_folio->mapping;
671
672         mark_buffer_dirty(bh);
673         if (!mapping->i_private_data) {
674                 mapping->i_private_data = buffer_mapping;
675         } else {
676                 BUG_ON(mapping->i_private_data != buffer_mapping);
677         }
678         if (!bh->b_assoc_map) {
679                 spin_lock(&buffer_mapping->i_private_lock);
680                 list_move_tail(&bh->b_assoc_buffers,
681                                 &mapping->i_private_list);
682                 bh->b_assoc_map = mapping;
683                 spin_unlock(&buffer_mapping->i_private_lock);
684         }
685 }
686 EXPORT_SYMBOL(mark_buffer_dirty_inode);
687
688 /**
689  * block_dirty_folio - Mark a folio as dirty.
690  * @mapping: The address space containing this folio.
691  * @folio: The folio to mark dirty.
692  *
693  * Filesystems which use buffer_heads can use this function as their
694  * ->dirty_folio implementation.  Some filesystems need to do a little
695  * work before calling this function.  Filesystems which do not use
696  * buffer_heads should call filemap_dirty_folio() instead.
697  *
698  * If the folio has buffers, the uptodate buffers are set dirty, to
699  * preserve dirty-state coherency between the folio and the buffers.
700  * Buffers added to a dirty folio are created dirty.
701  *
702  * The buffers are dirtied before the folio is dirtied.  There's a small
703  * race window in which writeback may see the folio cleanness but not the
704  * buffer dirtiness.  That's fine.  If this code were to set the folio
705  * dirty before the buffers, writeback could clear the folio dirty flag,
706  * see a bunch of clean buffers and we'd end up with dirty buffers/clean
707  * folio on the dirty folio list.
708  *
709  * We use i_private_lock to lock against try_to_free_buffers() while
710  * using the folio's buffer list.  This also prevents clean buffers
711  * being added to the folio after it was set dirty.
712  *
713  * Context: May only be called from process context.  Does not sleep.
714  * Caller must ensure that @folio cannot be truncated during this call,
715  * typically by holding the folio lock or having a page in the folio
716  * mapped and holding the page table lock.
717  *
718  * Return: True if the folio was dirtied; false if it was already dirtied.
719  */
720 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
721 {
722         struct buffer_head *head;
723         bool newly_dirty;
724
725         spin_lock(&mapping->i_private_lock);
726         head = folio_buffers(folio);
727         if (head) {
728                 struct buffer_head *bh = head;
729
730                 do {
731                         set_buffer_dirty(bh);
732                         bh = bh->b_this_page;
733                 } while (bh != head);
734         }
735         /*
736          * Lock out page's memcg migration to keep PageDirty
737          * synchronized with per-memcg dirty page counters.
738          */
739         folio_memcg_lock(folio);
740         newly_dirty = !folio_test_set_dirty(folio);
741         spin_unlock(&mapping->i_private_lock);
742
743         if (newly_dirty)
744                 __folio_mark_dirty(folio, mapping, 1);
745
746         folio_memcg_unlock(folio);
747
748         if (newly_dirty)
749                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
750
751         return newly_dirty;
752 }
753 EXPORT_SYMBOL(block_dirty_folio);
754
755 /*
756  * Write out and wait upon a list of buffers.
757  *
758  * We have conflicting pressures: we want to make sure that all
759  * initially dirty buffers get waited on, but that any subsequently
760  * dirtied buffers don't.  After all, we don't want fsync to last
761  * forever if somebody is actively writing to the file.
762  *
763  * Do this in two main stages: first we copy dirty buffers to a
764  * temporary inode list, queueing the writes as we go.  Then we clean
765  * up, waiting for those writes to complete.
766  * 
767  * During this second stage, any subsequent updates to the file may end
768  * up refiling the buffer on the original inode's dirty list again, so
769  * there is a chance we will end up with a buffer queued for write but
770  * not yet completed on that list.  So, as a final cleanup we go through
771  * the osync code to catch these locked, dirty buffers without requeuing
772  * any newly dirty buffers for write.
773  */
774 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
775 {
776         struct buffer_head *bh;
777         struct address_space *mapping;
778         int err = 0, err2;
779         struct blk_plug plug;
780         LIST_HEAD(tmp);
781
782         blk_start_plug(&plug);
783
784         spin_lock(lock);
785         while (!list_empty(list)) {
786                 bh = BH_ENTRY(list->next);
787                 mapping = bh->b_assoc_map;
788                 __remove_assoc_queue(bh);
789                 /* Avoid race with mark_buffer_dirty_inode() which does
790                  * a lockless check and we rely on seeing the dirty bit */
791                 smp_mb();
792                 if (buffer_dirty(bh) || buffer_locked(bh)) {
793                         list_add(&bh->b_assoc_buffers, &tmp);
794                         bh->b_assoc_map = mapping;
795                         if (buffer_dirty(bh)) {
796                                 get_bh(bh);
797                                 spin_unlock(lock);
798                                 /*
799                                  * Ensure any pending I/O completes so that
800                                  * write_dirty_buffer() actually writes the
801                                  * current contents - it is a noop if I/O is
802                                  * still in flight on potentially older
803                                  * contents.
804                                  */
805                                 write_dirty_buffer(bh, REQ_SYNC);
806
807                                 /*
808                                  * Kick off IO for the previous mapping. Note
809                                  * that we will not run the very last mapping,
810                                  * wait_on_buffer() will do that for us
811                                  * through sync_buffer().
812                                  */
813                                 brelse(bh);
814                                 spin_lock(lock);
815                         }
816                 }
817         }
818
819         spin_unlock(lock);
820         blk_finish_plug(&plug);
821         spin_lock(lock);
822
823         while (!list_empty(&tmp)) {
824                 bh = BH_ENTRY(tmp.prev);
825                 get_bh(bh);
826                 mapping = bh->b_assoc_map;
827                 __remove_assoc_queue(bh);
828                 /* Avoid race with mark_buffer_dirty_inode() which does
829                  * a lockless check and we rely on seeing the dirty bit */
830                 smp_mb();
831                 if (buffer_dirty(bh)) {
832                         list_add(&bh->b_assoc_buffers,
833                                  &mapping->i_private_list);
834                         bh->b_assoc_map = mapping;
835                 }
836                 spin_unlock(lock);
837                 wait_on_buffer(bh);
838                 if (!buffer_uptodate(bh))
839                         err = -EIO;
840                 brelse(bh);
841                 spin_lock(lock);
842         }
843         
844         spin_unlock(lock);
845         err2 = osync_buffers_list(lock, list);
846         if (err)
847                 return err;
848         else
849                 return err2;
850 }
851
852 /*
853  * Invalidate any and all dirty buffers on a given inode.  We are
854  * probably unmounting the fs, but that doesn't mean we have already
855  * done a sync().  Just drop the buffers from the inode list.
856  *
857  * NOTE: we take the inode's blockdev's mapping's i_private_lock.  Which
858  * assumes that all the buffers are against the blockdev.  Not true
859  * for reiserfs.
860  */
861 void invalidate_inode_buffers(struct inode *inode)
862 {
863         if (inode_has_buffers(inode)) {
864                 struct address_space *mapping = &inode->i_data;
865                 struct list_head *list = &mapping->i_private_list;
866                 struct address_space *buffer_mapping = mapping->i_private_data;
867
868                 spin_lock(&buffer_mapping->i_private_lock);
869                 while (!list_empty(list))
870                         __remove_assoc_queue(BH_ENTRY(list->next));
871                 spin_unlock(&buffer_mapping->i_private_lock);
872         }
873 }
874 EXPORT_SYMBOL(invalidate_inode_buffers);
875
876 /*
877  * Remove any clean buffers from the inode's buffer list.  This is called
878  * when we're trying to free the inode itself.  Those buffers can pin it.
879  *
880  * Returns true if all buffers were removed.
881  */
882 int remove_inode_buffers(struct inode *inode)
883 {
884         int ret = 1;
885
886         if (inode_has_buffers(inode)) {
887                 struct address_space *mapping = &inode->i_data;
888                 struct list_head *list = &mapping->i_private_list;
889                 struct address_space *buffer_mapping = mapping->i_private_data;
890
891                 spin_lock(&buffer_mapping->i_private_lock);
892                 while (!list_empty(list)) {
893                         struct buffer_head *bh = BH_ENTRY(list->next);
894                         if (buffer_dirty(bh)) {
895                                 ret = 0;
896                                 break;
897                         }
898                         __remove_assoc_queue(bh);
899                 }
900                 spin_unlock(&buffer_mapping->i_private_lock);
901         }
902         return ret;
903 }
904
905 /*
906  * Create the appropriate buffers when given a folio for data area and
907  * the size of each buffer.. Use the bh->b_this_page linked list to
908  * follow the buffers created.  Return NULL if unable to create more
909  * buffers.
910  *
911  * The retry flag is used to differentiate async IO (paging, swapping)
912  * which may not fail from ordinary buffer allocations.
913  */
914 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
915                                         gfp_t gfp)
916 {
917         struct buffer_head *bh, *head;
918         long offset;
919         struct mem_cgroup *memcg, *old_memcg;
920
921         /* The folio lock pins the memcg */
922         memcg = folio_memcg(folio);
923         old_memcg = set_active_memcg(memcg);
924
925         head = NULL;
926         offset = folio_size(folio);
927         while ((offset -= size) >= 0) {
928                 bh = alloc_buffer_head(gfp);
929                 if (!bh)
930                         goto no_grow;
931
932                 bh->b_this_page = head;
933                 bh->b_blocknr = -1;
934                 head = bh;
935
936                 bh->b_size = size;
937
938                 /* Link the buffer to its folio */
939                 folio_set_bh(bh, folio, offset);
940         }
941 out:
942         set_active_memcg(old_memcg);
943         return head;
944 /*
945  * In case anything failed, we just free everything we got.
946  */
947 no_grow:
948         if (head) {
949                 do {
950                         bh = head;
951                         head = head->b_this_page;
952                         free_buffer_head(bh);
953                 } while (head);
954         }
955
956         goto out;
957 }
958 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
959
960 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
961 {
962         gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
963
964         return folio_alloc_buffers(page_folio(page), size, gfp);
965 }
966 EXPORT_SYMBOL_GPL(alloc_page_buffers);
967
968 static inline void link_dev_buffers(struct folio *folio,
969                 struct buffer_head *head)
970 {
971         struct buffer_head *bh, *tail;
972
973         bh = head;
974         do {
975                 tail = bh;
976                 bh = bh->b_this_page;
977         } while (bh);
978         tail->b_this_page = head;
979         folio_attach_private(folio, head);
980 }
981
982 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
983 {
984         sector_t retval = ~((sector_t)0);
985         loff_t sz = bdev_nr_bytes(bdev);
986
987         if (sz) {
988                 unsigned int sizebits = blksize_bits(size);
989                 retval = (sz >> sizebits);
990         }
991         return retval;
992 }
993
994 /*
995  * Initialise the state of a blockdev folio's buffers.
996  */ 
997 static sector_t folio_init_buffers(struct folio *folio,
998                 struct block_device *bdev, unsigned size)
999 {
1000         struct buffer_head *head = folio_buffers(folio);
1001         struct buffer_head *bh = head;
1002         bool uptodate = folio_test_uptodate(folio);
1003         sector_t block = div_u64(folio_pos(folio), size);
1004         sector_t end_block = blkdev_max_block(bdev, size);
1005
1006         do {
1007                 if (!buffer_mapped(bh)) {
1008                         bh->b_end_io = NULL;
1009                         bh->b_private = NULL;
1010                         bh->b_bdev = bdev;
1011                         bh->b_blocknr = block;
1012                         if (uptodate)
1013                                 set_buffer_uptodate(bh);
1014                         if (block < end_block)
1015                                 set_buffer_mapped(bh);
1016                 }
1017                 block++;
1018                 bh = bh->b_this_page;
1019         } while (bh != head);
1020
1021         /*
1022          * Caller needs to validate requested block against end of device.
1023          */
1024         return end_block;
1025 }
1026
1027 /*
1028  * Create the page-cache folio that contains the requested block.
1029  *
1030  * This is used purely for blockdev mappings.
1031  *
1032  * Returns false if we have a failure which cannot be cured by retrying
1033  * without sleeping.  Returns true if we succeeded, or the caller should retry.
1034  */
1035 static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1036                 pgoff_t index, unsigned size, gfp_t gfp)
1037 {
1038         struct address_space *mapping = bdev->bd_mapping;
1039         struct folio *folio;
1040         struct buffer_head *bh;
1041         sector_t end_block = 0;
1042
1043         folio = __filemap_get_folio(mapping, index,
1044                         FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1045         if (IS_ERR(folio))
1046                 return false;
1047
1048         bh = folio_buffers(folio);
1049         if (bh) {
1050                 if (bh->b_size == size) {
1051                         end_block = folio_init_buffers(folio, bdev, size);
1052                         goto unlock;
1053                 }
1054
1055                 /*
1056                  * Retrying may succeed; for example the folio may finish
1057                  * writeback, or buffers may be cleaned.  This should not
1058                  * happen very often; maybe we have old buffers attached to
1059                  * this blockdev's page cache and we're trying to change
1060                  * the block size?
1061                  */
1062                 if (!try_to_free_buffers(folio)) {
1063                         end_block = ~0ULL;
1064                         goto unlock;
1065                 }
1066         }
1067
1068         bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
1069         if (!bh)
1070                 goto unlock;
1071
1072         /*
1073          * Link the folio to the buffers and initialise them.  Take the
1074          * lock to be atomic wrt __find_get_block(), which does not
1075          * run under the folio lock.
1076          */
1077         spin_lock(&mapping->i_private_lock);
1078         link_dev_buffers(folio, bh);
1079         end_block = folio_init_buffers(folio, bdev, size);
1080         spin_unlock(&mapping->i_private_lock);
1081 unlock:
1082         folio_unlock(folio);
1083         folio_put(folio);
1084         return block < end_block;
1085 }
1086
1087 /*
1088  * Create buffers for the specified block device block's folio.  If
1089  * that folio was dirty, the buffers are set dirty also.  Returns false
1090  * if we've hit a permanent error.
1091  */
1092 static bool grow_buffers(struct block_device *bdev, sector_t block,
1093                 unsigned size, gfp_t gfp)
1094 {
1095         loff_t pos;
1096
1097         /*
1098          * Check for a block which lies outside our maximum possible
1099          * pagecache index.
1100          */
1101         if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1102                 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
1103                         __func__, (unsigned long long)block,
1104                         bdev);
1105                 return false;
1106         }
1107
1108         /* Create a folio with the proper size buffers */
1109         return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1110 }
1111
1112 static struct buffer_head *
1113 __getblk_slow(struct block_device *bdev, sector_t block,
1114              unsigned size, gfp_t gfp)
1115 {
1116         /* Size must be multiple of hard sectorsize */
1117         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1118                         (size < 512 || size > PAGE_SIZE))) {
1119                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1120                                         size);
1121                 printk(KERN_ERR "logical block size: %d\n",
1122                                         bdev_logical_block_size(bdev));
1123
1124                 dump_stack();
1125                 return NULL;
1126         }
1127
1128         for (;;) {
1129                 struct buffer_head *bh;
1130
1131                 bh = __find_get_block(bdev, block, size);
1132                 if (bh)
1133                         return bh;
1134
1135                 if (!grow_buffers(bdev, block, size, gfp))
1136                         return NULL;
1137         }
1138 }
1139
1140 /*
1141  * The relationship between dirty buffers and dirty pages:
1142  *
1143  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1144  * the page is tagged dirty in the page cache.
1145  *
1146  * At all times, the dirtiness of the buffers represents the dirtiness of
1147  * subsections of the page.  If the page has buffers, the page dirty bit is
1148  * merely a hint about the true dirty state.
1149  *
1150  * When a page is set dirty in its entirety, all its buffers are marked dirty
1151  * (if the page has buffers).
1152  *
1153  * When a buffer is marked dirty, its page is dirtied, but the page's other
1154  * buffers are not.
1155  *
1156  * Also.  When blockdev buffers are explicitly read with bread(), they
1157  * individually become uptodate.  But their backing page remains not
1158  * uptodate - even if all of its buffers are uptodate.  A subsequent
1159  * block_read_full_folio() against that folio will discover all the uptodate
1160  * buffers, will set the folio uptodate and will perform no I/O.
1161  */
1162
1163 /**
1164  * mark_buffer_dirty - mark a buffer_head as needing writeout
1165  * @bh: the buffer_head to mark dirty
1166  *
1167  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1168  * its backing page dirty, then tag the page as dirty in the page cache
1169  * and then attach the address_space's inode to its superblock's dirty
1170  * inode list.
1171  *
1172  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->i_private_lock,
1173  * i_pages lock and mapping->host->i_lock.
1174  */
1175 void mark_buffer_dirty(struct buffer_head *bh)
1176 {
1177         WARN_ON_ONCE(!buffer_uptodate(bh));
1178
1179         trace_block_dirty_buffer(bh);
1180
1181         /*
1182          * Very *carefully* optimize the it-is-already-dirty case.
1183          *
1184          * Don't let the final "is it dirty" escape to before we
1185          * perhaps modified the buffer.
1186          */
1187         if (buffer_dirty(bh)) {
1188                 smp_mb();
1189                 if (buffer_dirty(bh))
1190                         return;
1191         }
1192
1193         if (!test_set_buffer_dirty(bh)) {
1194                 struct folio *folio = bh->b_folio;
1195                 struct address_space *mapping = NULL;
1196
1197                 folio_memcg_lock(folio);
1198                 if (!folio_test_set_dirty(folio)) {
1199                         mapping = folio->mapping;
1200                         if (mapping)
1201                                 __folio_mark_dirty(folio, mapping, 0);
1202                 }
1203                 folio_memcg_unlock(folio);
1204                 if (mapping)
1205                         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1206         }
1207 }
1208 EXPORT_SYMBOL(mark_buffer_dirty);
1209
1210 void mark_buffer_write_io_error(struct buffer_head *bh)
1211 {
1212         set_buffer_write_io_error(bh);
1213         /* FIXME: do we need to set this in both places? */
1214         if (bh->b_folio && bh->b_folio->mapping)
1215                 mapping_set_error(bh->b_folio->mapping, -EIO);
1216         if (bh->b_assoc_map) {
1217                 mapping_set_error(bh->b_assoc_map, -EIO);
1218                 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1219         }
1220 }
1221 EXPORT_SYMBOL(mark_buffer_write_io_error);
1222
1223 /**
1224  * __brelse - Release a buffer.
1225  * @bh: The buffer to release.
1226  *
1227  * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
1228  */
1229 void __brelse(struct buffer_head *bh)
1230 {
1231         if (atomic_read(&bh->b_count)) {
1232                 put_bh(bh);
1233                 return;
1234         }
1235         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1236 }
1237 EXPORT_SYMBOL(__brelse);
1238
1239 /**
1240  * __bforget - Discard any dirty data in a buffer.
1241  * @bh: The buffer to forget.
1242  *
1243  * This variant of bforget() can be called if @bh is guaranteed to not
1244  * be NULL.
1245  */
1246 void __bforget(struct buffer_head *bh)
1247 {
1248         clear_buffer_dirty(bh);
1249         if (bh->b_assoc_map) {
1250                 struct address_space *buffer_mapping = bh->b_folio->mapping;
1251
1252                 spin_lock(&buffer_mapping->i_private_lock);
1253                 list_del_init(&bh->b_assoc_buffers);
1254                 bh->b_assoc_map = NULL;
1255                 spin_unlock(&buffer_mapping->i_private_lock);
1256         }
1257         __brelse(bh);
1258 }
1259 EXPORT_SYMBOL(__bforget);
1260
1261 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1262 {
1263         lock_buffer(bh);
1264         if (buffer_uptodate(bh)) {
1265                 unlock_buffer(bh);
1266                 return bh;
1267         } else {
1268                 get_bh(bh);
1269                 bh->b_end_io = end_buffer_read_sync;
1270                 submit_bh(REQ_OP_READ, bh);
1271                 wait_on_buffer(bh);
1272                 if (buffer_uptodate(bh))
1273                         return bh;
1274         }
1275         brelse(bh);
1276         return NULL;
1277 }
1278
1279 /*
1280  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1281  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1282  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1283  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1284  * CPU's LRUs at the same time.
1285  *
1286  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1287  * sb_find_get_block().
1288  *
1289  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1290  * a local interrupt disable for that.
1291  */
1292
1293 #define BH_LRU_SIZE     16
1294
1295 struct bh_lru {
1296         struct buffer_head *bhs[BH_LRU_SIZE];
1297 };
1298
1299 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1300
1301 #ifdef CONFIG_SMP
1302 #define bh_lru_lock()   local_irq_disable()
1303 #define bh_lru_unlock() local_irq_enable()
1304 #else
1305 #define bh_lru_lock()   preempt_disable()
1306 #define bh_lru_unlock() preempt_enable()
1307 #endif
1308
1309 static inline void check_irqs_on(void)
1310 {
1311 #ifdef irqs_disabled
1312         BUG_ON(irqs_disabled());
1313 #endif
1314 }
1315
1316 /*
1317  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1318  * inserted at the front, and the buffer_head at the back if any is evicted.
1319  * Or, if already in the LRU it is moved to the front.
1320  */
1321 static void bh_lru_install(struct buffer_head *bh)
1322 {
1323         struct buffer_head *evictee = bh;
1324         struct bh_lru *b;
1325         int i;
1326
1327         check_irqs_on();
1328         bh_lru_lock();
1329
1330         /*
1331          * the refcount of buffer_head in bh_lru prevents dropping the
1332          * attached page(i.e., try_to_free_buffers) so it could cause
1333          * failing page migration.
1334          * Skip putting upcoming bh into bh_lru until migration is done.
1335          */
1336         if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1337                 bh_lru_unlock();
1338                 return;
1339         }
1340
1341         b = this_cpu_ptr(&bh_lrus);
1342         for (i = 0; i < BH_LRU_SIZE; i++) {
1343                 swap(evictee, b->bhs[i]);
1344                 if (evictee == bh) {
1345                         bh_lru_unlock();
1346                         return;
1347                 }
1348         }
1349
1350         get_bh(bh);
1351         bh_lru_unlock();
1352         brelse(evictee);
1353 }
1354
1355 /*
1356  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1357  */
1358 static struct buffer_head *
1359 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1360 {
1361         struct buffer_head *ret = NULL;
1362         unsigned int i;
1363
1364         check_irqs_on();
1365         bh_lru_lock();
1366         if (cpu_is_isolated(smp_processor_id())) {
1367                 bh_lru_unlock();
1368                 return NULL;
1369         }
1370         for (i = 0; i < BH_LRU_SIZE; i++) {
1371                 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1372
1373                 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1374                     bh->b_size == size) {
1375                         if (i) {
1376                                 while (i) {
1377                                         __this_cpu_write(bh_lrus.bhs[i],
1378                                                 __this_cpu_read(bh_lrus.bhs[i - 1]));
1379                                         i--;
1380                                 }
1381                                 __this_cpu_write(bh_lrus.bhs[0], bh);
1382                         }
1383                         get_bh(bh);
1384                         ret = bh;
1385                         break;
1386                 }
1387         }
1388         bh_lru_unlock();
1389         return ret;
1390 }
1391
1392 /*
1393  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1394  * it in the LRU and mark it as accessed.  If it is not present then return
1395  * NULL
1396  */
1397 struct buffer_head *
1398 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1399 {
1400         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1401
1402         if (bh == NULL) {
1403                 /* __find_get_block_slow will mark the page accessed */
1404                 bh = __find_get_block_slow(bdev, block);
1405                 if (bh)
1406                         bh_lru_install(bh);
1407         } else
1408                 touch_buffer(bh);
1409
1410         return bh;
1411 }
1412 EXPORT_SYMBOL(__find_get_block);
1413
1414 /**
1415  * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1416  * @bdev: The block device.
1417  * @block: The block number.
1418  * @size: The size of buffer_heads for this @bdev.
1419  * @gfp: The memory allocation flags to use.
1420  *
1421  * The returned buffer head has its reference count incremented, but is
1422  * not locked.  The caller should call brelse() when it has finished
1423  * with the buffer.  The buffer may not be uptodate.  If needed, the
1424  * caller can bring it uptodate either by reading it or overwriting it.
1425  *
1426  * Return: The buffer head, or NULL if memory could not be allocated.
1427  */
1428 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1429                 unsigned size, gfp_t gfp)
1430 {
1431         struct buffer_head *bh = __find_get_block(bdev, block, size);
1432
1433         might_alloc(gfp);
1434         if (bh)
1435                 return bh;
1436
1437         return __getblk_slow(bdev, block, size, gfp);
1438 }
1439 EXPORT_SYMBOL(bdev_getblk);
1440
1441 /*
1442  * Do async read-ahead on a buffer..
1443  */
1444 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1445 {
1446         struct buffer_head *bh = bdev_getblk(bdev, block, size,
1447                         GFP_NOWAIT | __GFP_MOVABLE);
1448
1449         if (likely(bh)) {
1450                 bh_readahead(bh, REQ_RAHEAD);
1451                 brelse(bh);
1452         }
1453 }
1454 EXPORT_SYMBOL(__breadahead);
1455
1456 /**
1457  * __bread_gfp() - Read a block.
1458  * @bdev: The block device to read from.
1459  * @block: Block number in units of block size.
1460  * @size: The block size of this device in bytes.
1461  * @gfp: Not page allocation flags; see below.
1462  *
1463  * You are not expected to call this function.  You should use one of
1464  * sb_bread(), sb_bread_unmovable() or __bread().
1465  *
1466  * Read a specified block, and return the buffer head that refers to it.
1467  * If @gfp is 0, the memory will be allocated using the block device's
1468  * default GFP flags.  If @gfp is __GFP_MOVABLE, the memory may be
1469  * allocated from a movable area.  Do not pass in a complete set of
1470  * GFP flags.
1471  *
1472  * The returned buffer head has its refcount increased.  The caller should
1473  * call brelse() when it has finished with the buffer.
1474  *
1475  * Context: May sleep waiting for I/O.
1476  * Return: NULL if the block was unreadable.
1477  */
1478 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
1479                 unsigned size, gfp_t gfp)
1480 {
1481         struct buffer_head *bh;
1482
1483         gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
1484
1485         /*
1486          * Prefer looping in the allocator rather than here, at least that
1487          * code knows what it's doing.
1488          */
1489         gfp |= __GFP_NOFAIL;
1490
1491         bh = bdev_getblk(bdev, block, size, gfp);
1492
1493         if (likely(bh) && !buffer_uptodate(bh))
1494                 bh = __bread_slow(bh);
1495         return bh;
1496 }
1497 EXPORT_SYMBOL(__bread_gfp);
1498
1499 static void __invalidate_bh_lrus(struct bh_lru *b)
1500 {
1501         int i;
1502
1503         for (i = 0; i < BH_LRU_SIZE; i++) {
1504                 brelse(b->bhs[i]);
1505                 b->bhs[i] = NULL;
1506         }
1507 }
1508 /*
1509  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1510  * This doesn't race because it runs in each cpu either in irq
1511  * or with preempt disabled.
1512  */
1513 static void invalidate_bh_lru(void *arg)
1514 {
1515         struct bh_lru *b = &get_cpu_var(bh_lrus);
1516
1517         __invalidate_bh_lrus(b);
1518         put_cpu_var(bh_lrus);
1519 }
1520
1521 bool has_bh_in_lru(int cpu, void *dummy)
1522 {
1523         struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1524         int i;
1525         
1526         for (i = 0; i < BH_LRU_SIZE; i++) {
1527                 if (b->bhs[i])
1528                         return true;
1529         }
1530
1531         return false;
1532 }
1533
1534 void invalidate_bh_lrus(void)
1535 {
1536         on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1537 }
1538 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1539
1540 /*
1541  * It's called from workqueue context so we need a bh_lru_lock to close
1542  * the race with preemption/irq.
1543  */
1544 void invalidate_bh_lrus_cpu(void)
1545 {
1546         struct bh_lru *b;
1547
1548         bh_lru_lock();
1549         b = this_cpu_ptr(&bh_lrus);
1550         __invalidate_bh_lrus(b);
1551         bh_lru_unlock();
1552 }
1553
1554 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1555                   unsigned long offset)
1556 {
1557         bh->b_folio = folio;
1558         BUG_ON(offset >= folio_size(folio));
1559         if (folio_test_highmem(folio))
1560                 /*
1561                  * This catches illegal uses and preserves the offset:
1562                  */
1563                 bh->b_data = (char *)(0 + offset);
1564         else
1565                 bh->b_data = folio_address(folio) + offset;
1566 }
1567 EXPORT_SYMBOL(folio_set_bh);
1568
1569 /*
1570  * Called when truncating a buffer on a page completely.
1571  */
1572
1573 /* Bits that are cleared during an invalidate */
1574 #define BUFFER_FLAGS_DISCARD \
1575         (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1576          1 << BH_Delay | 1 << BH_Unwritten)
1577
1578 static void discard_buffer(struct buffer_head * bh)
1579 {
1580         unsigned long b_state;
1581
1582         lock_buffer(bh);
1583         clear_buffer_dirty(bh);
1584         bh->b_bdev = NULL;
1585         b_state = READ_ONCE(bh->b_state);
1586         do {
1587         } while (!try_cmpxchg(&bh->b_state, &b_state,
1588                               b_state & ~BUFFER_FLAGS_DISCARD));
1589         unlock_buffer(bh);
1590 }
1591
1592 /**
1593  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1594  * @folio: The folio which is affected.
1595  * @offset: start of the range to invalidate
1596  * @length: length of the range to invalidate
1597  *
1598  * block_invalidate_folio() is called when all or part of the folio has been
1599  * invalidated by a truncate operation.
1600  *
1601  * block_invalidate_folio() does not have to release all buffers, but it must
1602  * ensure that no dirty buffer is left outside @offset and that no I/O
1603  * is underway against any of the blocks which are outside the truncation
1604  * point.  Because the caller is about to free (and possibly reuse) those
1605  * blocks on-disk.
1606  */
1607 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1608 {
1609         struct buffer_head *head, *bh, *next;
1610         size_t curr_off = 0;
1611         size_t stop = length + offset;
1612
1613         BUG_ON(!folio_test_locked(folio));
1614
1615         /*
1616          * Check for overflow
1617          */
1618         BUG_ON(stop > folio_size(folio) || stop < length);
1619
1620         head = folio_buffers(folio);
1621         if (!head)
1622                 return;
1623
1624         bh = head;
1625         do {
1626                 size_t next_off = curr_off + bh->b_size;
1627                 next = bh->b_this_page;
1628
1629                 /*
1630                  * Are we still fully in range ?
1631                  */
1632                 if (next_off > stop)
1633                         goto out;
1634
1635                 /*
1636                  * is this block fully invalidated?
1637                  */
1638                 if (offset <= curr_off)
1639                         discard_buffer(bh);
1640                 curr_off = next_off;
1641                 bh = next;
1642         } while (bh != head);
1643
1644         /*
1645          * We release buffers only if the entire folio is being invalidated.
1646          * The get_block cached value has been unconditionally invalidated,
1647          * so real IO is not possible anymore.
1648          */
1649         if (length == folio_size(folio))
1650                 filemap_release_folio(folio, 0);
1651 out:
1652         return;
1653 }
1654 EXPORT_SYMBOL(block_invalidate_folio);
1655
1656 /*
1657  * We attach and possibly dirty the buffers atomically wrt
1658  * block_dirty_folio() via i_private_lock.  try_to_free_buffers
1659  * is already excluded via the folio lock.
1660  */
1661 struct buffer_head *create_empty_buffers(struct folio *folio,
1662                 unsigned long blocksize, unsigned long b_state)
1663 {
1664         struct buffer_head *bh, *head, *tail;
1665         gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1666
1667         head = folio_alloc_buffers(folio, blocksize, gfp);
1668         bh = head;
1669         do {
1670                 bh->b_state |= b_state;
1671                 tail = bh;
1672                 bh = bh->b_this_page;
1673         } while (bh);
1674         tail->b_this_page = head;
1675
1676         spin_lock(&folio->mapping->i_private_lock);
1677         if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1678                 bh = head;
1679                 do {
1680                         if (folio_test_dirty(folio))
1681                                 set_buffer_dirty(bh);
1682                         if (folio_test_uptodate(folio))
1683                                 set_buffer_uptodate(bh);
1684                         bh = bh->b_this_page;
1685                 } while (bh != head);
1686         }
1687         folio_attach_private(folio, head);
1688         spin_unlock(&folio->mapping->i_private_lock);
1689
1690         return head;
1691 }
1692 EXPORT_SYMBOL(create_empty_buffers);
1693
1694 /**
1695  * clean_bdev_aliases: clean a range of buffers in block device
1696  * @bdev: Block device to clean buffers in
1697  * @block: Start of a range of blocks to clean
1698  * @len: Number of blocks to clean
1699  *
1700  * We are taking a range of blocks for data and we don't want writeback of any
1701  * buffer-cache aliases starting from return from this function and until the
1702  * moment when something will explicitly mark the buffer dirty (hopefully that
1703  * will not happen until we will free that block ;-) We don't even need to mark
1704  * it not-uptodate - nobody can expect anything from a newly allocated buffer
1705  * anyway. We used to use unmap_buffer() for such invalidation, but that was
1706  * wrong. We definitely don't want to mark the alias unmapped, for example - it
1707  * would confuse anyone who might pick it with bread() afterwards...
1708  *
1709  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1710  * writeout I/O going on against recently-freed buffers.  We don't wait on that
1711  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1712  * need to.  That happens here.
1713  */
1714 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1715 {
1716         struct address_space *bd_mapping = bdev->bd_mapping;
1717         const int blkbits = bd_mapping->host->i_blkbits;
1718         struct folio_batch fbatch;
1719         pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
1720         pgoff_t end;
1721         int i, count;
1722         struct buffer_head *bh;
1723         struct buffer_head *head;
1724
1725         end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
1726         folio_batch_init(&fbatch);
1727         while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1728                 count = folio_batch_count(&fbatch);
1729                 for (i = 0; i < count; i++) {
1730                         struct folio *folio = fbatch.folios[i];
1731
1732                         if (!folio_buffers(folio))
1733                                 continue;
1734                         /*
1735                          * We use folio lock instead of bd_mapping->i_private_lock
1736                          * to pin buffers here since we can afford to sleep and
1737                          * it scales better than a global spinlock lock.
1738                          */
1739                         folio_lock(folio);
1740                         /* Recheck when the folio is locked which pins bhs */
1741                         head = folio_buffers(folio);
1742                         if (!head)
1743                                 goto unlock_page;
1744                         bh = head;
1745                         do {
1746                                 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1747                                         goto next;
1748                                 if (bh->b_blocknr >= block + len)
1749                                         break;
1750                                 clear_buffer_dirty(bh);
1751                                 wait_on_buffer(bh);
1752                                 clear_buffer_req(bh);
1753 next:
1754                                 bh = bh->b_this_page;
1755                         } while (bh != head);
1756 unlock_page:
1757                         folio_unlock(folio);
1758                 }
1759                 folio_batch_release(&fbatch);
1760                 cond_resched();
1761                 /* End of range already reached? */
1762                 if (index > end || !index)
1763                         break;
1764         }
1765 }
1766 EXPORT_SYMBOL(clean_bdev_aliases);
1767
1768 static struct buffer_head *folio_create_buffers(struct folio *folio,
1769                                                 struct inode *inode,
1770                                                 unsigned int b_state)
1771 {
1772         struct buffer_head *bh;
1773
1774         BUG_ON(!folio_test_locked(folio));
1775
1776         bh = folio_buffers(folio);
1777         if (!bh)
1778                 bh = create_empty_buffers(folio,
1779                                 1 << READ_ONCE(inode->i_blkbits), b_state);
1780         return bh;
1781 }
1782
1783 /*
1784  * NOTE! All mapped/uptodate combinations are valid:
1785  *
1786  *      Mapped  Uptodate        Meaning
1787  *
1788  *      No      No              "unknown" - must do get_block()
1789  *      No      Yes             "hole" - zero-filled
1790  *      Yes     No              "allocated" - allocated on disk, not read in
1791  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1792  *
1793  * "Dirty" is valid only with the last case (mapped+uptodate).
1794  */
1795
1796 /*
1797  * While block_write_full_folio is writing back the dirty buffers under
1798  * the page lock, whoever dirtied the buffers may decide to clean them
1799  * again at any time.  We handle that by only looking at the buffer
1800  * state inside lock_buffer().
1801  *
1802  * If block_write_full_folio() is called for regular writeback
1803  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1804  * locked buffer.   This only can happen if someone has written the buffer
1805  * directly, with submit_bh().  At the address_space level PageWriteback
1806  * prevents this contention from occurring.
1807  *
1808  * If block_write_full_folio() is called with wbc->sync_mode ==
1809  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1810  * causes the writes to be flagged as synchronous writes.
1811  */
1812 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1813                         get_block_t *get_block, struct writeback_control *wbc)
1814 {
1815         int err;
1816         sector_t block;
1817         sector_t last_block;
1818         struct buffer_head *bh, *head;
1819         size_t blocksize;
1820         int nr_underway = 0;
1821         blk_opf_t write_flags = wbc_to_write_flags(wbc);
1822
1823         head = folio_create_buffers(folio, inode,
1824                                     (1 << BH_Dirty) | (1 << BH_Uptodate));
1825
1826         /*
1827          * Be very careful.  We have no exclusion from block_dirty_folio
1828          * here, and the (potentially unmapped) buffers may become dirty at
1829          * any time.  If a buffer becomes dirty here after we've inspected it
1830          * then we just miss that fact, and the folio stays dirty.
1831          *
1832          * Buffers outside i_size may be dirtied by block_dirty_folio;
1833          * handle that here by just cleaning them.
1834          */
1835
1836         bh = head;
1837         blocksize = bh->b_size;
1838
1839         block = div_u64(folio_pos(folio), blocksize);
1840         last_block = div_u64(i_size_read(inode) - 1, blocksize);
1841
1842         /*
1843          * Get all the dirty buffers mapped to disk addresses and
1844          * handle any aliases from the underlying blockdev's mapping.
1845          */
1846         do {
1847                 if (block > last_block) {
1848                         /*
1849                          * mapped buffers outside i_size will occur, because
1850                          * this folio can be outside i_size when there is a
1851                          * truncate in progress.
1852                          */
1853                         /*
1854                          * The buffer was zeroed by block_write_full_folio()
1855                          */
1856                         clear_buffer_dirty(bh);
1857                         set_buffer_uptodate(bh);
1858                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1859                            buffer_dirty(bh)) {
1860                         WARN_ON(bh->b_size != blocksize);
1861                         err = get_block(inode, block, bh, 1);
1862                         if (err)
1863                                 goto recover;
1864                         clear_buffer_delay(bh);
1865                         if (buffer_new(bh)) {
1866                                 /* blockdev mappings never come here */
1867                                 clear_buffer_new(bh);
1868                                 clean_bdev_bh_alias(bh);
1869                         }
1870                 }
1871                 bh = bh->b_this_page;
1872                 block++;
1873         } while (bh != head);
1874
1875         do {
1876                 if (!buffer_mapped(bh))
1877                         continue;
1878                 /*
1879                  * If it's a fully non-blocking write attempt and we cannot
1880                  * lock the buffer then redirty the folio.  Note that this can
1881                  * potentially cause a busy-wait loop from writeback threads
1882                  * and kswapd activity, but those code paths have their own
1883                  * higher-level throttling.
1884                  */
1885                 if (wbc->sync_mode != WB_SYNC_NONE) {
1886                         lock_buffer(bh);
1887                 } else if (!trylock_buffer(bh)) {
1888                         folio_redirty_for_writepage(wbc, folio);
1889                         continue;
1890                 }
1891                 if (test_clear_buffer_dirty(bh)) {
1892                         mark_buffer_async_write_endio(bh,
1893                                 end_buffer_async_write);
1894                 } else {
1895                         unlock_buffer(bh);
1896                 }
1897         } while ((bh = bh->b_this_page) != head);
1898
1899         /*
1900          * The folio and its buffers are protected by the writeback flag,
1901          * so we can drop the bh refcounts early.
1902          */
1903         BUG_ON(folio_test_writeback(folio));
1904         folio_start_writeback(folio);
1905
1906         do {
1907                 struct buffer_head *next = bh->b_this_page;
1908                 if (buffer_async_write(bh)) {
1909                         submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1910                                       inode->i_write_hint, wbc);
1911                         nr_underway++;
1912                 }
1913                 bh = next;
1914         } while (bh != head);
1915         folio_unlock(folio);
1916
1917         err = 0;
1918 done:
1919         if (nr_underway == 0) {
1920                 /*
1921                  * The folio was marked dirty, but the buffers were
1922                  * clean.  Someone wrote them back by hand with
1923                  * write_dirty_buffer/submit_bh.  A rare case.
1924                  */
1925                 folio_end_writeback(folio);
1926
1927                 /*
1928                  * The folio and buffer_heads can be released at any time from
1929                  * here on.
1930                  */
1931         }
1932         return err;
1933
1934 recover:
1935         /*
1936          * ENOSPC, or some other error.  We may already have added some
1937          * blocks to the file, so we need to write these out to avoid
1938          * exposing stale data.
1939          * The folio is currently locked and not marked for writeback
1940          */
1941         bh = head;
1942         /* Recovery: lock and submit the mapped buffers */
1943         do {
1944                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1945                     !buffer_delay(bh)) {
1946                         lock_buffer(bh);
1947                         mark_buffer_async_write_endio(bh,
1948                                 end_buffer_async_write);
1949                 } else {
1950                         /*
1951                          * The buffer may have been set dirty during
1952                          * attachment to a dirty folio.
1953                          */
1954                         clear_buffer_dirty(bh);
1955                 }
1956         } while ((bh = bh->b_this_page) != head);
1957         BUG_ON(folio_test_writeback(folio));
1958         mapping_set_error(folio->mapping, err);
1959         folio_start_writeback(folio);
1960         do {
1961                 struct buffer_head *next = bh->b_this_page;
1962                 if (buffer_async_write(bh)) {
1963                         clear_buffer_dirty(bh);
1964                         submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1965                                       inode->i_write_hint, wbc);
1966                         nr_underway++;
1967                 }
1968                 bh = next;
1969         } while (bh != head);
1970         folio_unlock(folio);
1971         goto done;
1972 }
1973 EXPORT_SYMBOL(__block_write_full_folio);
1974
1975 /*
1976  * If a folio has any new buffers, zero them out here, and mark them uptodate
1977  * and dirty so they'll be written out (in order to prevent uninitialised
1978  * block data from leaking). And clear the new bit.
1979  */
1980 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1981 {
1982         size_t block_start, block_end;
1983         struct buffer_head *head, *bh;
1984
1985         BUG_ON(!folio_test_locked(folio));
1986         head = folio_buffers(folio);
1987         if (!head)
1988                 return;
1989
1990         bh = head;
1991         block_start = 0;
1992         do {
1993                 block_end = block_start + bh->b_size;
1994
1995                 if (buffer_new(bh)) {
1996                         if (block_end > from && block_start < to) {
1997                                 if (!folio_test_uptodate(folio)) {
1998                                         size_t start, xend;
1999
2000                                         start = max(from, block_start);
2001                                         xend = min(to, block_end);
2002
2003                                         folio_zero_segment(folio, start, xend);
2004                                         set_buffer_uptodate(bh);
2005                                 }
2006
2007                                 clear_buffer_new(bh);
2008                                 mark_buffer_dirty(bh);
2009                         }
2010                 }
2011
2012                 block_start = block_end;
2013                 bh = bh->b_this_page;
2014         } while (bh != head);
2015 }
2016 EXPORT_SYMBOL(folio_zero_new_buffers);
2017
2018 static int
2019 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2020                 const struct iomap *iomap)
2021 {
2022         loff_t offset = (loff_t)block << inode->i_blkbits;
2023
2024         bh->b_bdev = iomap->bdev;
2025
2026         /*
2027          * Block points to offset in file we need to map, iomap contains
2028          * the offset at which the map starts. If the map ends before the
2029          * current block, then do not map the buffer and let the caller
2030          * handle it.
2031          */
2032         if (offset >= iomap->offset + iomap->length)
2033                 return -EIO;
2034
2035         switch (iomap->type) {
2036         case IOMAP_HOLE:
2037                 /*
2038                  * If the buffer is not up to date or beyond the current EOF,
2039                  * we need to mark it as new to ensure sub-block zeroing is
2040                  * executed if necessary.
2041                  */
2042                 if (!buffer_uptodate(bh) ||
2043                     (offset >= i_size_read(inode)))
2044                         set_buffer_new(bh);
2045                 return 0;
2046         case IOMAP_DELALLOC:
2047                 if (!buffer_uptodate(bh) ||
2048                     (offset >= i_size_read(inode)))
2049                         set_buffer_new(bh);
2050                 set_buffer_uptodate(bh);
2051                 set_buffer_mapped(bh);
2052                 set_buffer_delay(bh);
2053                 return 0;
2054         case IOMAP_UNWRITTEN:
2055                 /*
2056                  * For unwritten regions, we always need to ensure that regions
2057                  * in the block we are not writing to are zeroed. Mark the
2058                  * buffer as new to ensure this.
2059                  */
2060                 set_buffer_new(bh);
2061                 set_buffer_unwritten(bh);
2062                 fallthrough;
2063         case IOMAP_MAPPED:
2064                 if ((iomap->flags & IOMAP_F_NEW) ||
2065                     offset >= i_size_read(inode)) {
2066                         /*
2067                          * This can happen if truncating the block device races
2068                          * with the check in the caller as i_size updates on
2069                          * block devices aren't synchronized by i_rwsem for
2070                          * block devices.
2071                          */
2072                         if (S_ISBLK(inode->i_mode))
2073                                 return -EIO;
2074                         set_buffer_new(bh);
2075                 }
2076                 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2077                                 inode->i_blkbits;
2078                 set_buffer_mapped(bh);
2079                 return 0;
2080         default:
2081                 WARN_ON_ONCE(1);
2082                 return -EIO;
2083         }
2084 }
2085
2086 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2087                 get_block_t *get_block, const struct iomap *iomap)
2088 {
2089         size_t from = offset_in_folio(folio, pos);
2090         size_t to = from + len;
2091         struct inode *inode = folio->mapping->host;
2092         size_t block_start, block_end;
2093         sector_t block;
2094         int err = 0;
2095         size_t blocksize;
2096         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2097
2098         BUG_ON(!folio_test_locked(folio));
2099         BUG_ON(to > folio_size(folio));
2100         BUG_ON(from > to);
2101
2102         head = folio_create_buffers(folio, inode, 0);
2103         blocksize = head->b_size;
2104         block = div_u64(folio_pos(folio), blocksize);
2105
2106         for (bh = head, block_start = 0; bh != head || !block_start;
2107             block++, block_start=block_end, bh = bh->b_this_page) {
2108                 block_end = block_start + blocksize;
2109                 if (block_end <= from || block_start >= to) {
2110                         if (folio_test_uptodate(folio)) {
2111                                 if (!buffer_uptodate(bh))
2112                                         set_buffer_uptodate(bh);
2113                         }
2114                         continue;
2115                 }
2116                 if (buffer_new(bh))
2117                         clear_buffer_new(bh);
2118                 if (!buffer_mapped(bh)) {
2119                         WARN_ON(bh->b_size != blocksize);
2120                         if (get_block)
2121                                 err = get_block(inode, block, bh, 1);
2122                         else
2123                                 err = iomap_to_bh(inode, block, bh, iomap);
2124                         if (err)
2125                                 break;
2126
2127                         if (buffer_new(bh)) {
2128                                 clean_bdev_bh_alias(bh);
2129                                 if (folio_test_uptodate(folio)) {
2130                                         clear_buffer_new(bh);
2131                                         set_buffer_uptodate(bh);
2132                                         mark_buffer_dirty(bh);
2133                                         continue;
2134                                 }
2135                                 if (block_end > to || block_start < from)
2136                                         folio_zero_segments(folio,
2137                                                 to, block_end,
2138                                                 block_start, from);
2139                                 continue;
2140                         }
2141                 }
2142                 if (folio_test_uptodate(folio)) {
2143                         if (!buffer_uptodate(bh))
2144                                 set_buffer_uptodate(bh);
2145                         continue; 
2146                 }
2147                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2148                     !buffer_unwritten(bh) &&
2149                      (block_start < from || block_end > to)) {
2150                         bh_read_nowait(bh, 0);
2151                         *wait_bh++=bh;
2152                 }
2153         }
2154         /*
2155          * If we issued read requests - let them complete.
2156          */
2157         while(wait_bh > wait) {
2158                 wait_on_buffer(*--wait_bh);
2159                 if (!buffer_uptodate(*wait_bh))
2160                         err = -EIO;
2161         }
2162         if (unlikely(err))
2163                 folio_zero_new_buffers(folio, from, to);
2164         return err;
2165 }
2166
2167 int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2168                 get_block_t *get_block)
2169 {
2170         return __block_write_begin_int(folio, pos, len, get_block, NULL);
2171 }
2172 EXPORT_SYMBOL(__block_write_begin);
2173
2174 static void __block_commit_write(struct folio *folio, size_t from, size_t to)
2175 {
2176         size_t block_start, block_end;
2177         bool partial = false;
2178         unsigned blocksize;
2179         struct buffer_head *bh, *head;
2180
2181         bh = head = folio_buffers(folio);
2182         if (!bh)
2183                 return;
2184         blocksize = bh->b_size;
2185
2186         block_start = 0;
2187         do {
2188                 block_end = block_start + blocksize;
2189                 if (block_end <= from || block_start >= to) {
2190                         if (!buffer_uptodate(bh))
2191                                 partial = true;
2192                 } else {
2193                         set_buffer_uptodate(bh);
2194                         mark_buffer_dirty(bh);
2195                 }
2196                 if (buffer_new(bh))
2197                         clear_buffer_new(bh);
2198
2199                 block_start = block_end;
2200                 bh = bh->b_this_page;
2201         } while (bh != head);
2202
2203         /*
2204          * If this is a partial write which happened to make all buffers
2205          * uptodate then we can optimize away a bogus read_folio() for
2206          * the next read(). Here we 'discover' whether the folio went
2207          * uptodate as a result of this (potentially partial) write.
2208          */
2209         if (!partial)
2210                 folio_mark_uptodate(folio);
2211 }
2212
2213 /*
2214  * block_write_begin takes care of the basic task of block allocation and
2215  * bringing partial write blocks uptodate first.
2216  *
2217  * The filesystem needs to handle block truncation upon failure.
2218  */
2219 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2220                 struct folio **foliop, get_block_t *get_block)
2221 {
2222         pgoff_t index = pos >> PAGE_SHIFT;
2223         struct folio *folio;
2224         int status;
2225
2226         folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2227                         mapping_gfp_mask(mapping));
2228         if (IS_ERR(folio))
2229                 return PTR_ERR(folio);
2230
2231         status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2232         if (unlikely(status)) {
2233                 folio_unlock(folio);
2234                 folio_put(folio);
2235                 folio = NULL;
2236         }
2237
2238         *foliop = folio;
2239         return status;
2240 }
2241 EXPORT_SYMBOL(block_write_begin);
2242
2243 int block_write_end(struct file *file, struct address_space *mapping,
2244                         loff_t pos, unsigned len, unsigned copied,
2245                         struct folio *folio, void *fsdata)
2246 {
2247         size_t start = pos - folio_pos(folio);
2248
2249         if (unlikely(copied < len)) {
2250                 /*
2251                  * The buffers that were written will now be uptodate, so
2252                  * we don't have to worry about a read_folio reading them
2253                  * and overwriting a partial write. However if we have
2254                  * encountered a short write and only partially written
2255                  * into a buffer, it will not be marked uptodate, so a
2256                  * read_folio might come in and destroy our partial write.
2257                  *
2258                  * Do the simplest thing, and just treat any short write to a
2259                  * non uptodate folio as a zero-length write, and force the
2260                  * caller to redo the whole thing.
2261                  */
2262                 if (!folio_test_uptodate(folio))
2263                         copied = 0;
2264
2265                 folio_zero_new_buffers(folio, start+copied, start+len);
2266         }
2267         flush_dcache_folio(folio);
2268
2269         /* This could be a short (even 0-length) commit */
2270         __block_commit_write(folio, start, start + copied);
2271
2272         return copied;
2273 }
2274 EXPORT_SYMBOL(block_write_end);
2275
2276 int generic_write_end(struct file *file, struct address_space *mapping,
2277                         loff_t pos, unsigned len, unsigned copied,
2278                         struct folio *folio, void *fsdata)
2279 {
2280         struct inode *inode = mapping->host;
2281         loff_t old_size = inode->i_size;
2282         bool i_size_changed = false;
2283
2284         copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
2285
2286         /*
2287          * No need to use i_size_read() here, the i_size cannot change under us
2288          * because we hold i_rwsem.
2289          *
2290          * But it's important to update i_size while still holding folio lock:
2291          * page writeout could otherwise come in and zero beyond i_size.
2292          */
2293         if (pos + copied > inode->i_size) {
2294                 i_size_write(inode, pos + copied);
2295                 i_size_changed = true;
2296         }
2297
2298         folio_unlock(folio);
2299         folio_put(folio);
2300
2301         if (old_size < pos)
2302                 pagecache_isize_extended(inode, old_size, pos);
2303         /*
2304          * Don't mark the inode dirty under page lock. First, it unnecessarily
2305          * makes the holding time of page lock longer. Second, it forces lock
2306          * ordering of page lock and transaction start for journaling
2307          * filesystems.
2308          */
2309         if (i_size_changed)
2310                 mark_inode_dirty(inode);
2311         return copied;
2312 }
2313 EXPORT_SYMBOL(generic_write_end);
2314
2315 /*
2316  * block_is_partially_uptodate checks whether buffers within a folio are
2317  * uptodate or not.
2318  *
2319  * Returns true if all buffers which correspond to the specified part
2320  * of the folio are uptodate.
2321  */
2322 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2323 {
2324         unsigned block_start, block_end, blocksize;
2325         unsigned to;
2326         struct buffer_head *bh, *head;
2327         bool ret = true;
2328
2329         head = folio_buffers(folio);
2330         if (!head)
2331                 return false;
2332         blocksize = head->b_size;
2333         to = min_t(unsigned, folio_size(folio) - from, count);
2334         to = from + to;
2335         if (from < blocksize && to > folio_size(folio) - blocksize)
2336                 return false;
2337
2338         bh = head;
2339         block_start = 0;
2340         do {
2341                 block_end = block_start + blocksize;
2342                 if (block_end > from && block_start < to) {
2343                         if (!buffer_uptodate(bh)) {
2344                                 ret = false;
2345                                 break;
2346                         }
2347                         if (block_end >= to)
2348                                 break;
2349                 }
2350                 block_start = block_end;
2351                 bh = bh->b_this_page;
2352         } while (bh != head);
2353
2354         return ret;
2355 }
2356 EXPORT_SYMBOL(block_is_partially_uptodate);
2357
2358 /*
2359  * Generic "read_folio" function for block devices that have the normal
2360  * get_block functionality. This is most of the block device filesystems.
2361  * Reads the folio asynchronously --- the unlock_buffer() and
2362  * set/clear_buffer_uptodate() functions propagate buffer state into the
2363  * folio once IO has completed.
2364  */
2365 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2366 {
2367         struct inode *inode = folio->mapping->host;
2368         sector_t iblock, lblock;
2369         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2370         size_t blocksize;
2371         int nr, i;
2372         int fully_mapped = 1;
2373         bool page_error = false;
2374         loff_t limit = i_size_read(inode);
2375
2376         /* This is needed for ext4. */
2377         if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2378                 limit = inode->i_sb->s_maxbytes;
2379
2380         VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2381
2382         head = folio_create_buffers(folio, inode, 0);
2383         blocksize = head->b_size;
2384
2385         iblock = div_u64(folio_pos(folio), blocksize);
2386         lblock = div_u64(limit + blocksize - 1, blocksize);
2387         bh = head;
2388         nr = 0;
2389         i = 0;
2390
2391         do {
2392                 if (buffer_uptodate(bh))
2393                         continue;
2394
2395                 if (!buffer_mapped(bh)) {
2396                         int err = 0;
2397
2398                         fully_mapped = 0;
2399                         if (iblock < lblock) {
2400                                 WARN_ON(bh->b_size != blocksize);
2401                                 err = get_block(inode, iblock, bh, 0);
2402                                 if (err)
2403                                         page_error = true;
2404                         }
2405                         if (!buffer_mapped(bh)) {
2406                                 folio_zero_range(folio, i * blocksize,
2407                                                 blocksize);
2408                                 if (!err)
2409                                         set_buffer_uptodate(bh);
2410                                 continue;
2411                         }
2412                         /*
2413                          * get_block() might have updated the buffer
2414                          * synchronously
2415                          */
2416                         if (buffer_uptodate(bh))
2417                                 continue;
2418                 }
2419                 arr[nr++] = bh;
2420         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2421
2422         if (fully_mapped)
2423                 folio_set_mappedtodisk(folio);
2424
2425         if (!nr) {
2426                 /*
2427                  * All buffers are uptodate or get_block() returned an
2428                  * error when trying to map them - we can finish the read.
2429                  */
2430                 folio_end_read(folio, !page_error);
2431                 return 0;
2432         }
2433
2434         /* Stage two: lock the buffers */
2435         for (i = 0; i < nr; i++) {
2436                 bh = arr[i];
2437                 lock_buffer(bh);
2438                 mark_buffer_async_read(bh);
2439         }
2440
2441         /*
2442          * Stage 3: start the IO.  Check for uptodateness
2443          * inside the buffer lock in case another process reading
2444          * the underlying blockdev brought it uptodate (the sct fix).
2445          */
2446         for (i = 0; i < nr; i++) {
2447                 bh = arr[i];
2448                 if (buffer_uptodate(bh))
2449                         end_buffer_async_read(bh, 1);
2450                 else
2451                         submit_bh(REQ_OP_READ, bh);
2452         }
2453         return 0;
2454 }
2455 EXPORT_SYMBOL(block_read_full_folio);
2456
2457 /* utility function for filesystems that need to do work on expanding
2458  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2459  * deal with the hole.  
2460  */
2461 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2462 {
2463         struct address_space *mapping = inode->i_mapping;
2464         const struct address_space_operations *aops = mapping->a_ops;
2465         struct folio *folio;
2466         void *fsdata = NULL;
2467         int err;
2468
2469         err = inode_newsize_ok(inode, size);
2470         if (err)
2471                 goto out;
2472
2473         err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
2474         if (err)
2475                 goto out;
2476
2477         err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
2478         BUG_ON(err > 0);
2479
2480 out:
2481         return err;
2482 }
2483 EXPORT_SYMBOL(generic_cont_expand_simple);
2484
2485 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2486                             loff_t pos, loff_t *bytes)
2487 {
2488         struct inode *inode = mapping->host;
2489         const struct address_space_operations *aops = mapping->a_ops;
2490         unsigned int blocksize = i_blocksize(inode);
2491         struct folio *folio;
2492         void *fsdata = NULL;
2493         pgoff_t index, curidx;
2494         loff_t curpos;
2495         unsigned zerofrom, offset, len;
2496         int err = 0;
2497
2498         index = pos >> PAGE_SHIFT;
2499         offset = pos & ~PAGE_MASK;
2500
2501         while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2502                 zerofrom = curpos & ~PAGE_MASK;
2503                 if (zerofrom & (blocksize-1)) {
2504                         *bytes |= (blocksize-1);
2505                         (*bytes)++;
2506                 }
2507                 len = PAGE_SIZE - zerofrom;
2508
2509                 err = aops->write_begin(file, mapping, curpos, len,
2510                                             &folio, &fsdata);
2511                 if (err)
2512                         goto out;
2513                 folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2514                 err = aops->write_end(file, mapping, curpos, len, len,
2515                                                 folio, fsdata);
2516                 if (err < 0)
2517                         goto out;
2518                 BUG_ON(err != len);
2519                 err = 0;
2520
2521                 balance_dirty_pages_ratelimited(mapping);
2522
2523                 if (fatal_signal_pending(current)) {
2524                         err = -EINTR;
2525                         goto out;
2526                 }
2527         }
2528
2529         /* page covers the boundary, find the boundary offset */
2530         if (index == curidx) {
2531                 zerofrom = curpos & ~PAGE_MASK;
2532                 /* if we will expand the thing last block will be filled */
2533                 if (offset <= zerofrom) {
2534                         goto out;
2535                 }
2536                 if (zerofrom & (blocksize-1)) {
2537                         *bytes |= (blocksize-1);
2538                         (*bytes)++;
2539                 }
2540                 len = offset - zerofrom;
2541
2542                 err = aops->write_begin(file, mapping, curpos, len,
2543                                             &folio, &fsdata);
2544                 if (err)
2545                         goto out;
2546                 folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2547                 err = aops->write_end(file, mapping, curpos, len, len,
2548                                                 folio, fsdata);
2549                 if (err < 0)
2550                         goto out;
2551                 BUG_ON(err != len);
2552                 err = 0;
2553         }
2554 out:
2555         return err;
2556 }
2557
2558 /*
2559  * For moronic filesystems that do not allow holes in file.
2560  * We may have to extend the file.
2561  */
2562 int cont_write_begin(struct file *file, struct address_space *mapping,
2563                         loff_t pos, unsigned len,
2564                         struct folio **foliop, void **fsdata,
2565                         get_block_t *get_block, loff_t *bytes)
2566 {
2567         struct inode *inode = mapping->host;
2568         unsigned int blocksize = i_blocksize(inode);
2569         unsigned int zerofrom;
2570         int err;
2571
2572         err = cont_expand_zero(file, mapping, pos, bytes);
2573         if (err)
2574                 return err;
2575
2576         zerofrom = *bytes & ~PAGE_MASK;
2577         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2578                 *bytes |= (blocksize-1);
2579                 (*bytes)++;
2580         }
2581
2582         return block_write_begin(mapping, pos, len, foliop, get_block);
2583 }
2584 EXPORT_SYMBOL(cont_write_begin);
2585
2586 void block_commit_write(struct page *page, unsigned from, unsigned to)
2587 {
2588         struct folio *folio = page_folio(page);
2589         __block_commit_write(folio, from, to);
2590 }
2591 EXPORT_SYMBOL(block_commit_write);
2592
2593 /*
2594  * block_page_mkwrite() is not allowed to change the file size as it gets
2595  * called from a page fault handler when a page is first dirtied. Hence we must
2596  * be careful to check for EOF conditions here. We set the page up correctly
2597  * for a written page which means we get ENOSPC checking when writing into
2598  * holes and correct delalloc and unwritten extent mapping on filesystems that
2599  * support these features.
2600  *
2601  * We are not allowed to take the i_mutex here so we have to play games to
2602  * protect against truncate races as the page could now be beyond EOF.  Because
2603  * truncate writes the inode size before removing pages, once we have the
2604  * page lock we can determine safely if the page is beyond EOF. If it is not
2605  * beyond EOF, then the page is guaranteed safe against truncation until we
2606  * unlock the page.
2607  *
2608  * Direct callers of this function should protect against filesystem freezing
2609  * using sb_start_pagefault() - sb_end_pagefault() functions.
2610  */
2611 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2612                          get_block_t get_block)
2613 {
2614         struct folio *folio = page_folio(vmf->page);
2615         struct inode *inode = file_inode(vma->vm_file);
2616         unsigned long end;
2617         loff_t size;
2618         int ret;
2619
2620         folio_lock(folio);
2621         size = i_size_read(inode);
2622         if ((folio->mapping != inode->i_mapping) ||
2623             (folio_pos(folio) >= size)) {
2624                 /* We overload EFAULT to mean page got truncated */
2625                 ret = -EFAULT;
2626                 goto out_unlock;
2627         }
2628
2629         end = folio_size(folio);
2630         /* folio is wholly or partially inside EOF */
2631         if (folio_pos(folio) + end > size)
2632                 end = size - folio_pos(folio);
2633
2634         ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2635         if (unlikely(ret))
2636                 goto out_unlock;
2637
2638         __block_commit_write(folio, 0, end);
2639
2640         folio_mark_dirty(folio);
2641         folio_wait_stable(folio);
2642         return 0;
2643 out_unlock:
2644         folio_unlock(folio);
2645         return ret;
2646 }
2647 EXPORT_SYMBOL(block_page_mkwrite);
2648
2649 int block_truncate_page(struct address_space *mapping,
2650                         loff_t from, get_block_t *get_block)
2651 {
2652         pgoff_t index = from >> PAGE_SHIFT;
2653         unsigned blocksize;
2654         sector_t iblock;
2655         size_t offset, length, pos;
2656         struct inode *inode = mapping->host;
2657         struct folio *folio;
2658         struct buffer_head *bh;
2659         int err = 0;
2660
2661         blocksize = i_blocksize(inode);
2662         length = from & (blocksize - 1);
2663
2664         /* Block boundary? Nothing to do */
2665         if (!length)
2666                 return 0;
2667
2668         length = blocksize - length;
2669         iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
2670
2671         folio = filemap_grab_folio(mapping, index);
2672         if (IS_ERR(folio))
2673                 return PTR_ERR(folio);
2674
2675         bh = folio_buffers(folio);
2676         if (!bh)
2677                 bh = create_empty_buffers(folio, blocksize, 0);
2678
2679         /* Find the buffer that contains "offset" */
2680         offset = offset_in_folio(folio, from);
2681         pos = blocksize;
2682         while (offset >= pos) {
2683                 bh = bh->b_this_page;
2684                 iblock++;
2685                 pos += blocksize;
2686         }
2687
2688         if (!buffer_mapped(bh)) {
2689                 WARN_ON(bh->b_size != blocksize);
2690                 err = get_block(inode, iblock, bh, 0);
2691                 if (err)
2692                         goto unlock;
2693                 /* unmapped? It's a hole - nothing to do */
2694                 if (!buffer_mapped(bh))
2695                         goto unlock;
2696         }
2697
2698         /* Ok, it's mapped. Make sure it's up-to-date */
2699         if (folio_test_uptodate(folio))
2700                 set_buffer_uptodate(bh);
2701
2702         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2703                 err = bh_read(bh, 0);
2704                 /* Uhhuh. Read error. Complain and punt. */
2705                 if (err < 0)
2706                         goto unlock;
2707         }
2708
2709         folio_zero_range(folio, offset, length);
2710         mark_buffer_dirty(bh);
2711
2712 unlock:
2713         folio_unlock(folio);
2714         folio_put(folio);
2715
2716         return err;
2717 }
2718 EXPORT_SYMBOL(block_truncate_page);
2719
2720 /*
2721  * The generic ->writepage function for buffer-backed address_spaces
2722  */
2723 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2724                 void *get_block)
2725 {
2726         struct inode * const inode = folio->mapping->host;
2727         loff_t i_size = i_size_read(inode);
2728
2729         /* Is the folio fully inside i_size? */
2730         if (folio_pos(folio) + folio_size(folio) <= i_size)
2731                 return __block_write_full_folio(inode, folio, get_block, wbc);
2732
2733         /* Is the folio fully outside i_size? (truncate in progress) */
2734         if (folio_pos(folio) >= i_size) {
2735                 folio_unlock(folio);
2736                 return 0; /* don't care */
2737         }
2738
2739         /*
2740          * The folio straddles i_size.  It must be zeroed out on each and every
2741          * writepage invocation because it may be mmapped.  "A file is mapped
2742          * in multiples of the page size.  For a file that is not a multiple of
2743          * the page size, the remaining memory is zeroed when mapped, and
2744          * writes to that region are not written out to the file."
2745          */
2746         folio_zero_segment(folio, offset_in_folio(folio, i_size),
2747                         folio_size(folio));
2748         return __block_write_full_folio(inode, folio, get_block, wbc);
2749 }
2750
2751 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2752                             get_block_t *get_block)
2753 {
2754         struct inode *inode = mapping->host;
2755         struct buffer_head tmp = {
2756                 .b_size = i_blocksize(inode),
2757         };
2758
2759         get_block(inode, block, &tmp, 0);
2760         return tmp.b_blocknr;
2761 }
2762 EXPORT_SYMBOL(generic_block_bmap);
2763
2764 static void end_bio_bh_io_sync(struct bio *bio)
2765 {
2766         struct buffer_head *bh = bio->bi_private;
2767
2768         if (unlikely(bio_flagged(bio, BIO_QUIET)))
2769                 set_bit(BH_Quiet, &bh->b_state);
2770
2771         bh->b_end_io(bh, !bio->bi_status);
2772         bio_put(bio);
2773 }
2774
2775 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2776                           enum rw_hint write_hint,
2777                           struct writeback_control *wbc)
2778 {
2779         const enum req_op op = opf & REQ_OP_MASK;
2780         struct bio *bio;
2781
2782         BUG_ON(!buffer_locked(bh));
2783         BUG_ON(!buffer_mapped(bh));
2784         BUG_ON(!bh->b_end_io);
2785         BUG_ON(buffer_delay(bh));
2786         BUG_ON(buffer_unwritten(bh));
2787
2788         /*
2789          * Only clear out a write error when rewriting
2790          */
2791         if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2792                 clear_buffer_write_io_error(bh);
2793
2794         if (buffer_meta(bh))
2795                 opf |= REQ_META;
2796         if (buffer_prio(bh))
2797                 opf |= REQ_PRIO;
2798
2799         bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2800
2801         fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2802
2803         bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2804         bio->bi_write_hint = write_hint;
2805
2806         __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2807
2808         bio->bi_end_io = end_bio_bh_io_sync;
2809         bio->bi_private = bh;
2810
2811         /* Take care of bh's that straddle the end of the device */
2812         guard_bio_eod(bio);
2813
2814         if (wbc) {
2815                 wbc_init_bio(wbc, bio);
2816                 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2817         }
2818
2819         submit_bio(bio);
2820 }
2821
2822 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2823 {
2824         submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
2825 }
2826 EXPORT_SYMBOL(submit_bh);
2827
2828 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2829 {
2830         lock_buffer(bh);
2831         if (!test_clear_buffer_dirty(bh)) {
2832                 unlock_buffer(bh);
2833                 return;
2834         }
2835         bh->b_end_io = end_buffer_write_sync;
2836         get_bh(bh);
2837         submit_bh(REQ_OP_WRITE | op_flags, bh);
2838 }
2839 EXPORT_SYMBOL(write_dirty_buffer);
2840
2841 /*
2842  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2843  * and then start new I/O and then wait upon it.  The caller must have a ref on
2844  * the buffer_head.
2845  */
2846 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2847 {
2848         WARN_ON(atomic_read(&bh->b_count) < 1);
2849         lock_buffer(bh);
2850         if (test_clear_buffer_dirty(bh)) {
2851                 /*
2852                  * The bh should be mapped, but it might not be if the
2853                  * device was hot-removed. Not much we can do but fail the I/O.
2854                  */
2855                 if (!buffer_mapped(bh)) {
2856                         unlock_buffer(bh);
2857                         return -EIO;
2858                 }
2859
2860                 get_bh(bh);
2861                 bh->b_end_io = end_buffer_write_sync;
2862                 submit_bh(REQ_OP_WRITE | op_flags, bh);
2863                 wait_on_buffer(bh);
2864                 if (!buffer_uptodate(bh))
2865                         return -EIO;
2866         } else {
2867                 unlock_buffer(bh);
2868         }
2869         return 0;
2870 }
2871 EXPORT_SYMBOL(__sync_dirty_buffer);
2872
2873 int sync_dirty_buffer(struct buffer_head *bh)
2874 {
2875         return __sync_dirty_buffer(bh, REQ_SYNC);
2876 }
2877 EXPORT_SYMBOL(sync_dirty_buffer);
2878
2879 static inline int buffer_busy(struct buffer_head *bh)
2880 {
2881         return atomic_read(&bh->b_count) |
2882                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2883 }
2884
2885 static bool
2886 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2887 {
2888         struct buffer_head *head = folio_buffers(folio);
2889         struct buffer_head *bh;
2890
2891         bh = head;
2892         do {
2893                 if (buffer_busy(bh))
2894                         goto failed;
2895                 bh = bh->b_this_page;
2896         } while (bh != head);
2897
2898         do {
2899                 struct buffer_head *next = bh->b_this_page;
2900
2901                 if (bh->b_assoc_map)
2902                         __remove_assoc_queue(bh);
2903                 bh = next;
2904         } while (bh != head);
2905         *buffers_to_free = head;
2906         folio_detach_private(folio);
2907         return true;
2908 failed:
2909         return false;
2910 }
2911
2912 /**
2913  * try_to_free_buffers - Release buffers attached to this folio.
2914  * @folio: The folio.
2915  *
2916  * If any buffers are in use (dirty, under writeback, elevated refcount),
2917  * no buffers will be freed.
2918  *
2919  * If the folio is dirty but all the buffers are clean then we need to
2920  * be sure to mark the folio clean as well.  This is because the folio
2921  * may be against a block device, and a later reattachment of buffers
2922  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2923  * filesystem data on the same device.
2924  *
2925  * The same applies to regular filesystem folios: if all the buffers are
2926  * clean then we set the folio clean and proceed.  To do that, we require
2927  * total exclusion from block_dirty_folio().  That is obtained with
2928  * i_private_lock.
2929  *
2930  * Exclusion against try_to_free_buffers may be obtained by either
2931  * locking the folio or by holding its mapping's i_private_lock.
2932  *
2933  * Context: Process context.  @folio must be locked.  Will not sleep.
2934  * Return: true if all buffers attached to this folio were freed.
2935  */
2936 bool try_to_free_buffers(struct folio *folio)
2937 {
2938         struct address_space * const mapping = folio->mapping;
2939         struct buffer_head *buffers_to_free = NULL;
2940         bool ret = 0;
2941
2942         BUG_ON(!folio_test_locked(folio));
2943         if (folio_test_writeback(folio))
2944                 return false;
2945
2946         if (mapping == NULL) {          /* can this still happen? */
2947                 ret = drop_buffers(folio, &buffers_to_free);
2948                 goto out;
2949         }
2950
2951         spin_lock(&mapping->i_private_lock);
2952         ret = drop_buffers(folio, &buffers_to_free);
2953
2954         /*
2955          * If the filesystem writes its buffers by hand (eg ext3)
2956          * then we can have clean buffers against a dirty folio.  We
2957          * clean the folio here; otherwise the VM will never notice
2958          * that the filesystem did any IO at all.
2959          *
2960          * Also, during truncate, discard_buffer will have marked all
2961          * the folio's buffers clean.  We discover that here and clean
2962          * the folio also.
2963          *
2964          * i_private_lock must be held over this entire operation in order
2965          * to synchronise against block_dirty_folio and prevent the
2966          * dirty bit from being lost.
2967          */
2968         if (ret)
2969                 folio_cancel_dirty(folio);
2970         spin_unlock(&mapping->i_private_lock);
2971 out:
2972         if (buffers_to_free) {
2973                 struct buffer_head *bh = buffers_to_free;
2974
2975                 do {
2976                         struct buffer_head *next = bh->b_this_page;
2977                         free_buffer_head(bh);
2978                         bh = next;
2979                 } while (bh != buffers_to_free);
2980         }
2981         return ret;
2982 }
2983 EXPORT_SYMBOL(try_to_free_buffers);
2984
2985 /*
2986  * Buffer-head allocation
2987  */
2988 static struct kmem_cache *bh_cachep __ro_after_init;
2989
2990 /*
2991  * Once the number of bh's in the machine exceeds this level, we start
2992  * stripping them in writeback.
2993  */
2994 static unsigned long max_buffer_heads __ro_after_init;
2995
2996 int buffer_heads_over_limit;
2997
2998 struct bh_accounting {
2999         int nr;                 /* Number of live bh's */
3000         int ratelimit;          /* Limit cacheline bouncing */
3001 };
3002
3003 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3004
3005 static void recalc_bh_state(void)
3006 {
3007         int i;
3008         int tot = 0;
3009
3010         if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3011                 return;
3012         __this_cpu_write(bh_accounting.ratelimit, 0);
3013         for_each_online_cpu(i)
3014                 tot += per_cpu(bh_accounting, i).nr;
3015         buffer_heads_over_limit = (tot > max_buffer_heads);
3016 }
3017
3018 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3019 {
3020         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3021         if (ret) {
3022                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3023                 spin_lock_init(&ret->b_uptodate_lock);
3024                 preempt_disable();
3025                 __this_cpu_inc(bh_accounting.nr);
3026                 recalc_bh_state();
3027                 preempt_enable();
3028         }
3029         return ret;
3030 }
3031 EXPORT_SYMBOL(alloc_buffer_head);
3032
3033 void free_buffer_head(struct buffer_head *bh)
3034 {
3035         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3036         kmem_cache_free(bh_cachep, bh);
3037         preempt_disable();
3038         __this_cpu_dec(bh_accounting.nr);
3039         recalc_bh_state();
3040         preempt_enable();
3041 }
3042 EXPORT_SYMBOL(free_buffer_head);
3043
3044 static int buffer_exit_cpu_dead(unsigned int cpu)
3045 {
3046         int i;
3047         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3048
3049         for (i = 0; i < BH_LRU_SIZE; i++) {
3050                 brelse(b->bhs[i]);
3051                 b->bhs[i] = NULL;
3052         }
3053         this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3054         per_cpu(bh_accounting, cpu).nr = 0;
3055         return 0;
3056 }
3057
3058 /**
3059  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3060  * @bh: struct buffer_head
3061  *
3062  * Return true if the buffer is up-to-date and false,
3063  * with the buffer locked, if not.
3064  */
3065 int bh_uptodate_or_lock(struct buffer_head *bh)
3066 {
3067         if (!buffer_uptodate(bh)) {
3068                 lock_buffer(bh);
3069                 if (!buffer_uptodate(bh))
3070                         return 0;
3071                 unlock_buffer(bh);
3072         }
3073         return 1;
3074 }
3075 EXPORT_SYMBOL(bh_uptodate_or_lock);
3076
3077 /**
3078  * __bh_read - Submit read for a locked buffer
3079  * @bh: struct buffer_head
3080  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3081  * @wait: wait until reading finish
3082  *
3083  * Returns zero on success or don't wait, and -EIO on error.
3084  */
3085 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3086 {
3087         int ret = 0;
3088
3089         BUG_ON(!buffer_locked(bh));
3090
3091         get_bh(bh);
3092         bh->b_end_io = end_buffer_read_sync;
3093         submit_bh(REQ_OP_READ | op_flags, bh);
3094         if (wait) {
3095                 wait_on_buffer(bh);
3096                 if (!buffer_uptodate(bh))
3097                         ret = -EIO;
3098         }
3099         return ret;
3100 }
3101 EXPORT_SYMBOL(__bh_read);
3102
3103 /**
3104  * __bh_read_batch - Submit read for a batch of unlocked buffers
3105  * @nr: entry number of the buffer batch
3106  * @bhs: a batch of struct buffer_head
3107  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3108  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3109  *              buffer that cannot lock.
3110  *
3111  * Returns zero on success or don't wait, and -EIO on error.
3112  */
3113 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3114                      blk_opf_t op_flags, bool force_lock)
3115 {
3116         int i;
3117
3118         for (i = 0; i < nr; i++) {
3119                 struct buffer_head *bh = bhs[i];
3120
3121                 if (buffer_uptodate(bh))
3122                         continue;
3123
3124                 if (force_lock)
3125                         lock_buffer(bh);
3126                 else
3127                         if (!trylock_buffer(bh))
3128                                 continue;
3129
3130                 if (buffer_uptodate(bh)) {
3131                         unlock_buffer(bh);
3132                         continue;
3133                 }
3134
3135                 bh->b_end_io = end_buffer_read_sync;
3136                 get_bh(bh);
3137                 submit_bh(REQ_OP_READ | op_flags, bh);
3138         }
3139 }
3140 EXPORT_SYMBOL(__bh_read_batch);
3141
3142 void __init buffer_init(void)
3143 {
3144         unsigned long nrpages;
3145         int ret;
3146
3147         bh_cachep = KMEM_CACHE(buffer_head,
3148                                 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
3149         /*
3150          * Limit the bh occupancy to 10% of ZONE_NORMAL
3151          */
3152         nrpages = (nr_free_buffer_pages() * 10) / 100;
3153         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3154         ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3155                                         NULL, buffer_exit_cpu_dead);
3156         WARN_ON(ret < 0);
3157 }