Merge tag 'sh-for-v6.4-tag1' of git://git.kernel.org/pub/scm/linux/kernel/git/glaubit...
[linux-block.git] / fs / buffer.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/buffer.c
4  *
5  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
6  */
7
8 /*
9  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10  *
11  * Removed a lot of unnecessary code and simplified things now that
12  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13  *
14  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
15  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
16  *
17  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18  *
19  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52
53 #include "internal.h"
54
55 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
56 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
57                           struct writeback_control *wbc);
58
59 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
60
61 inline void touch_buffer(struct buffer_head *bh)
62 {
63         trace_block_touch_buffer(bh);
64         folio_mark_accessed(bh->b_folio);
65 }
66 EXPORT_SYMBOL(touch_buffer);
67
68 void __lock_buffer(struct buffer_head *bh)
69 {
70         wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
71 }
72 EXPORT_SYMBOL(__lock_buffer);
73
74 void unlock_buffer(struct buffer_head *bh)
75 {
76         clear_bit_unlock(BH_Lock, &bh->b_state);
77         smp_mb__after_atomic();
78         wake_up_bit(&bh->b_state, BH_Lock);
79 }
80 EXPORT_SYMBOL(unlock_buffer);
81
82 /*
83  * Returns if the folio has dirty or writeback buffers. If all the buffers
84  * are unlocked and clean then the folio_test_dirty information is stale. If
85  * any of the buffers are locked, it is assumed they are locked for IO.
86  */
87 void buffer_check_dirty_writeback(struct folio *folio,
88                                      bool *dirty, bool *writeback)
89 {
90         struct buffer_head *head, *bh;
91         *dirty = false;
92         *writeback = false;
93
94         BUG_ON(!folio_test_locked(folio));
95
96         head = folio_buffers(folio);
97         if (!head)
98                 return;
99
100         if (folio_test_writeback(folio))
101                 *writeback = true;
102
103         bh = head;
104         do {
105                 if (buffer_locked(bh))
106                         *writeback = true;
107
108                 if (buffer_dirty(bh))
109                         *dirty = true;
110
111                 bh = bh->b_this_page;
112         } while (bh != head);
113 }
114 EXPORT_SYMBOL(buffer_check_dirty_writeback);
115
116 /*
117  * Block until a buffer comes unlocked.  This doesn't stop it
118  * from becoming locked again - you have to lock it yourself
119  * if you want to preserve its state.
120  */
121 void __wait_on_buffer(struct buffer_head * bh)
122 {
123         wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 }
125 EXPORT_SYMBOL(__wait_on_buffer);
126
127 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 {
129         if (!test_bit(BH_Quiet, &bh->b_state))
130                 printk_ratelimited(KERN_ERR
131                         "Buffer I/O error on dev %pg, logical block %llu%s\n",
132                         bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133 }
134
135 /*
136  * End-of-IO handler helper function which does not touch the bh after
137  * unlocking it.
138  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139  * a race there is benign: unlock_buffer() only use the bh's address for
140  * hashing after unlocking the buffer, so it doesn't actually touch the bh
141  * itself.
142  */
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144 {
145         if (uptodate) {
146                 set_buffer_uptodate(bh);
147         } else {
148                 /* This happens, due to failed read-ahead attempts. */
149                 clear_buffer_uptodate(bh);
150         }
151         unlock_buffer(bh);
152 }
153
154 /*
155  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
156  * unlock the buffer.
157  */
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 {
160         __end_buffer_read_notouch(bh, uptodate);
161         put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_read_sync);
164
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166 {
167         if (uptodate) {
168                 set_buffer_uptodate(bh);
169         } else {
170                 buffer_io_error(bh, ", lost sync page write");
171                 mark_buffer_write_io_error(bh);
172                 clear_buffer_uptodate(bh);
173         }
174         unlock_buffer(bh);
175         put_bh(bh);
176 }
177 EXPORT_SYMBOL(end_buffer_write_sync);
178
179 /*
180  * Various filesystems appear to want __find_get_block to be non-blocking.
181  * But it's the page lock which protects the buffers.  To get around this,
182  * we get exclusion from try_to_free_buffers with the blockdev mapping's
183  * private_lock.
184  *
185  * Hack idea: for the blockdev mapping, private_lock contention
186  * may be quite high.  This code could TryLock the page, and if that
187  * succeeds, there is no need to take private_lock.
188  */
189 static struct buffer_head *
190 __find_get_block_slow(struct block_device *bdev, sector_t block)
191 {
192         struct inode *bd_inode = bdev->bd_inode;
193         struct address_space *bd_mapping = bd_inode->i_mapping;
194         struct buffer_head *ret = NULL;
195         pgoff_t index;
196         struct buffer_head *bh;
197         struct buffer_head *head;
198         struct page *page;
199         int all_mapped = 1;
200         static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
201
202         index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
203         page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
204         if (!page)
205                 goto out;
206
207         spin_lock(&bd_mapping->private_lock);
208         if (!page_has_buffers(page))
209                 goto out_unlock;
210         head = page_buffers(page);
211         bh = head;
212         do {
213                 if (!buffer_mapped(bh))
214                         all_mapped = 0;
215                 else if (bh->b_blocknr == block) {
216                         ret = bh;
217                         get_bh(bh);
218                         goto out_unlock;
219                 }
220                 bh = bh->b_this_page;
221         } while (bh != head);
222
223         /* we might be here because some of the buffers on this page are
224          * not mapped.  This is due to various races between
225          * file io on the block device and getblk.  It gets dealt with
226          * elsewhere, don't buffer_error if we had some unmapped buffers
227          */
228         ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
229         if (all_mapped && __ratelimit(&last_warned)) {
230                 printk("__find_get_block_slow() failed. block=%llu, "
231                        "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232                        "device %pg blocksize: %d\n",
233                        (unsigned long long)block,
234                        (unsigned long long)bh->b_blocknr,
235                        bh->b_state, bh->b_size, bdev,
236                        1 << bd_inode->i_blkbits);
237         }
238 out_unlock:
239         spin_unlock(&bd_mapping->private_lock);
240         put_page(page);
241 out:
242         return ret;
243 }
244
245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
246 {
247         unsigned long flags;
248         struct buffer_head *first;
249         struct buffer_head *tmp;
250         struct folio *folio;
251         int folio_uptodate = 1;
252
253         BUG_ON(!buffer_async_read(bh));
254
255         folio = bh->b_folio;
256         if (uptodate) {
257                 set_buffer_uptodate(bh);
258         } else {
259                 clear_buffer_uptodate(bh);
260                 buffer_io_error(bh, ", async page read");
261                 folio_set_error(folio);
262         }
263
264         /*
265          * Be _very_ careful from here on. Bad things can happen if
266          * two buffer heads end IO at almost the same time and both
267          * decide that the page is now completely done.
268          */
269         first = folio_buffers(folio);
270         spin_lock_irqsave(&first->b_uptodate_lock, flags);
271         clear_buffer_async_read(bh);
272         unlock_buffer(bh);
273         tmp = bh;
274         do {
275                 if (!buffer_uptodate(tmp))
276                         folio_uptodate = 0;
277                 if (buffer_async_read(tmp)) {
278                         BUG_ON(!buffer_locked(tmp));
279                         goto still_busy;
280                 }
281                 tmp = tmp->b_this_page;
282         } while (tmp != bh);
283         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
284
285         /*
286          * If all of the buffers are uptodate then we can set the page
287          * uptodate.
288          */
289         if (folio_uptodate)
290                 folio_mark_uptodate(folio);
291         folio_unlock(folio);
292         return;
293
294 still_busy:
295         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
296         return;
297 }
298
299 struct postprocess_bh_ctx {
300         struct work_struct work;
301         struct buffer_head *bh;
302 };
303
304 static void verify_bh(struct work_struct *work)
305 {
306         struct postprocess_bh_ctx *ctx =
307                 container_of(work, struct postprocess_bh_ctx, work);
308         struct buffer_head *bh = ctx->bh;
309         bool valid;
310
311         valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
312         end_buffer_async_read(bh, valid);
313         kfree(ctx);
314 }
315
316 static bool need_fsverity(struct buffer_head *bh)
317 {
318         struct folio *folio = bh->b_folio;
319         struct inode *inode = folio->mapping->host;
320
321         return fsverity_active(inode) &&
322                 /* needed by ext4 */
323                 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
324 }
325
326 static void decrypt_bh(struct work_struct *work)
327 {
328         struct postprocess_bh_ctx *ctx =
329                 container_of(work, struct postprocess_bh_ctx, work);
330         struct buffer_head *bh = ctx->bh;
331         int err;
332
333         err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
334                                                bh_offset(bh));
335         if (err == 0 && need_fsverity(bh)) {
336                 /*
337                  * We use different work queues for decryption and for verity
338                  * because verity may require reading metadata pages that need
339                  * decryption, and we shouldn't recurse to the same workqueue.
340                  */
341                 INIT_WORK(&ctx->work, verify_bh);
342                 fsverity_enqueue_verify_work(&ctx->work);
343                 return;
344         }
345         end_buffer_async_read(bh, err == 0);
346         kfree(ctx);
347 }
348
349 /*
350  * I/O completion handler for block_read_full_folio() - pages
351  * which come unlocked at the end of I/O.
352  */
353 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
354 {
355         struct inode *inode = bh->b_folio->mapping->host;
356         bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
357         bool verify = need_fsverity(bh);
358
359         /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
360         if (uptodate && (decrypt || verify)) {
361                 struct postprocess_bh_ctx *ctx =
362                         kmalloc(sizeof(*ctx), GFP_ATOMIC);
363
364                 if (ctx) {
365                         ctx->bh = bh;
366                         if (decrypt) {
367                                 INIT_WORK(&ctx->work, decrypt_bh);
368                                 fscrypt_enqueue_decrypt_work(&ctx->work);
369                         } else {
370                                 INIT_WORK(&ctx->work, verify_bh);
371                                 fsverity_enqueue_verify_work(&ctx->work);
372                         }
373                         return;
374                 }
375                 uptodate = 0;
376         }
377         end_buffer_async_read(bh, uptodate);
378 }
379
380 /*
381  * Completion handler for block_write_full_page() - pages which are unlocked
382  * during I/O, and which have PageWriteback cleared upon I/O completion.
383  */
384 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
385 {
386         unsigned long flags;
387         struct buffer_head *first;
388         struct buffer_head *tmp;
389         struct folio *folio;
390
391         BUG_ON(!buffer_async_write(bh));
392
393         folio = bh->b_folio;
394         if (uptodate) {
395                 set_buffer_uptodate(bh);
396         } else {
397                 buffer_io_error(bh, ", lost async page write");
398                 mark_buffer_write_io_error(bh);
399                 clear_buffer_uptodate(bh);
400                 folio_set_error(folio);
401         }
402
403         first = folio_buffers(folio);
404         spin_lock_irqsave(&first->b_uptodate_lock, flags);
405
406         clear_buffer_async_write(bh);
407         unlock_buffer(bh);
408         tmp = bh->b_this_page;
409         while (tmp != bh) {
410                 if (buffer_async_write(tmp)) {
411                         BUG_ON(!buffer_locked(tmp));
412                         goto still_busy;
413                 }
414                 tmp = tmp->b_this_page;
415         }
416         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
417         folio_end_writeback(folio);
418         return;
419
420 still_busy:
421         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
422         return;
423 }
424 EXPORT_SYMBOL(end_buffer_async_write);
425
426 /*
427  * If a page's buffers are under async readin (end_buffer_async_read
428  * completion) then there is a possibility that another thread of
429  * control could lock one of the buffers after it has completed
430  * but while some of the other buffers have not completed.  This
431  * locked buffer would confuse end_buffer_async_read() into not unlocking
432  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
433  * that this buffer is not under async I/O.
434  *
435  * The page comes unlocked when it has no locked buffer_async buffers
436  * left.
437  *
438  * PageLocked prevents anyone starting new async I/O reads any of
439  * the buffers.
440  *
441  * PageWriteback is used to prevent simultaneous writeout of the same
442  * page.
443  *
444  * PageLocked prevents anyone from starting writeback of a page which is
445  * under read I/O (PageWriteback is only ever set against a locked page).
446  */
447 static void mark_buffer_async_read(struct buffer_head *bh)
448 {
449         bh->b_end_io = end_buffer_async_read_io;
450         set_buffer_async_read(bh);
451 }
452
453 static void mark_buffer_async_write_endio(struct buffer_head *bh,
454                                           bh_end_io_t *handler)
455 {
456         bh->b_end_io = handler;
457         set_buffer_async_write(bh);
458 }
459
460 void mark_buffer_async_write(struct buffer_head *bh)
461 {
462         mark_buffer_async_write_endio(bh, end_buffer_async_write);
463 }
464 EXPORT_SYMBOL(mark_buffer_async_write);
465
466
467 /*
468  * fs/buffer.c contains helper functions for buffer-backed address space's
469  * fsync functions.  A common requirement for buffer-based filesystems is
470  * that certain data from the backing blockdev needs to be written out for
471  * a successful fsync().  For example, ext2 indirect blocks need to be
472  * written back and waited upon before fsync() returns.
473  *
474  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
475  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
476  * management of a list of dependent buffers at ->i_mapping->private_list.
477  *
478  * Locking is a little subtle: try_to_free_buffers() will remove buffers
479  * from their controlling inode's queue when they are being freed.  But
480  * try_to_free_buffers() will be operating against the *blockdev* mapping
481  * at the time, not against the S_ISREG file which depends on those buffers.
482  * So the locking for private_list is via the private_lock in the address_space
483  * which backs the buffers.  Which is different from the address_space 
484  * against which the buffers are listed.  So for a particular address_space,
485  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
486  * mapping->private_list will always be protected by the backing blockdev's
487  * ->private_lock.
488  *
489  * Which introduces a requirement: all buffers on an address_space's
490  * ->private_list must be from the same address_space: the blockdev's.
491  *
492  * address_spaces which do not place buffers at ->private_list via these
493  * utility functions are free to use private_lock and private_list for
494  * whatever they want.  The only requirement is that list_empty(private_list)
495  * be true at clear_inode() time.
496  *
497  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
498  * filesystems should do that.  invalidate_inode_buffers() should just go
499  * BUG_ON(!list_empty).
500  *
501  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
502  * take an address_space, not an inode.  And it should be called
503  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
504  * queued up.
505  *
506  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
507  * list if it is already on a list.  Because if the buffer is on a list,
508  * it *must* already be on the right one.  If not, the filesystem is being
509  * silly.  This will save a ton of locking.  But first we have to ensure
510  * that buffers are taken *off* the old inode's list when they are freed
511  * (presumably in truncate).  That requires careful auditing of all
512  * filesystems (do it inside bforget()).  It could also be done by bringing
513  * b_inode back.
514  */
515
516 /*
517  * The buffer's backing address_space's private_lock must be held
518  */
519 static void __remove_assoc_queue(struct buffer_head *bh)
520 {
521         list_del_init(&bh->b_assoc_buffers);
522         WARN_ON(!bh->b_assoc_map);
523         bh->b_assoc_map = NULL;
524 }
525
526 int inode_has_buffers(struct inode *inode)
527 {
528         return !list_empty(&inode->i_data.private_list);
529 }
530
531 /*
532  * osync is designed to support O_SYNC io.  It waits synchronously for
533  * all already-submitted IO to complete, but does not queue any new
534  * writes to the disk.
535  *
536  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
537  * as you dirty the buffers, and then use osync_inode_buffers to wait for
538  * completion.  Any other dirty buffers which are not yet queued for
539  * write will not be flushed to disk by the osync.
540  */
541 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
542 {
543         struct buffer_head *bh;
544         struct list_head *p;
545         int err = 0;
546
547         spin_lock(lock);
548 repeat:
549         list_for_each_prev(p, list) {
550                 bh = BH_ENTRY(p);
551                 if (buffer_locked(bh)) {
552                         get_bh(bh);
553                         spin_unlock(lock);
554                         wait_on_buffer(bh);
555                         if (!buffer_uptodate(bh))
556                                 err = -EIO;
557                         brelse(bh);
558                         spin_lock(lock);
559                         goto repeat;
560                 }
561         }
562         spin_unlock(lock);
563         return err;
564 }
565
566 void emergency_thaw_bdev(struct super_block *sb)
567 {
568         while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
569                 printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
570 }
571
572 /**
573  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
574  * @mapping: the mapping which wants those buffers written
575  *
576  * Starts I/O against the buffers at mapping->private_list, and waits upon
577  * that I/O.
578  *
579  * Basically, this is a convenience function for fsync().
580  * @mapping is a file or directory which needs those buffers to be written for
581  * a successful fsync().
582  */
583 int sync_mapping_buffers(struct address_space *mapping)
584 {
585         struct address_space *buffer_mapping = mapping->private_data;
586
587         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
588                 return 0;
589
590         return fsync_buffers_list(&buffer_mapping->private_lock,
591                                         &mapping->private_list);
592 }
593 EXPORT_SYMBOL(sync_mapping_buffers);
594
595 /*
596  * Called when we've recently written block `bblock', and it is known that
597  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
598  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
599  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
600  */
601 void write_boundary_block(struct block_device *bdev,
602                         sector_t bblock, unsigned blocksize)
603 {
604         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
605         if (bh) {
606                 if (buffer_dirty(bh))
607                         write_dirty_buffer(bh, 0);
608                 put_bh(bh);
609         }
610 }
611
612 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
613 {
614         struct address_space *mapping = inode->i_mapping;
615         struct address_space *buffer_mapping = bh->b_folio->mapping;
616
617         mark_buffer_dirty(bh);
618         if (!mapping->private_data) {
619                 mapping->private_data = buffer_mapping;
620         } else {
621                 BUG_ON(mapping->private_data != buffer_mapping);
622         }
623         if (!bh->b_assoc_map) {
624                 spin_lock(&buffer_mapping->private_lock);
625                 list_move_tail(&bh->b_assoc_buffers,
626                                 &mapping->private_list);
627                 bh->b_assoc_map = mapping;
628                 spin_unlock(&buffer_mapping->private_lock);
629         }
630 }
631 EXPORT_SYMBOL(mark_buffer_dirty_inode);
632
633 /*
634  * Add a page to the dirty page list.
635  *
636  * It is a sad fact of life that this function is called from several places
637  * deeply under spinlocking.  It may not sleep.
638  *
639  * If the page has buffers, the uptodate buffers are set dirty, to preserve
640  * dirty-state coherency between the page and the buffers.  It the page does
641  * not have buffers then when they are later attached they will all be set
642  * dirty.
643  *
644  * The buffers are dirtied before the page is dirtied.  There's a small race
645  * window in which a writepage caller may see the page cleanness but not the
646  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
647  * before the buffers, a concurrent writepage caller could clear the page dirty
648  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
649  * page on the dirty page list.
650  *
651  * We use private_lock to lock against try_to_free_buffers while using the
652  * page's buffer list.  Also use this to protect against clean buffers being
653  * added to the page after it was set dirty.
654  *
655  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
656  * address_space though.
657  */
658 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
659 {
660         struct buffer_head *head;
661         bool newly_dirty;
662
663         spin_lock(&mapping->private_lock);
664         head = folio_buffers(folio);
665         if (head) {
666                 struct buffer_head *bh = head;
667
668                 do {
669                         set_buffer_dirty(bh);
670                         bh = bh->b_this_page;
671                 } while (bh != head);
672         }
673         /*
674          * Lock out page's memcg migration to keep PageDirty
675          * synchronized with per-memcg dirty page counters.
676          */
677         folio_memcg_lock(folio);
678         newly_dirty = !folio_test_set_dirty(folio);
679         spin_unlock(&mapping->private_lock);
680
681         if (newly_dirty)
682                 __folio_mark_dirty(folio, mapping, 1);
683
684         folio_memcg_unlock(folio);
685
686         if (newly_dirty)
687                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
688
689         return newly_dirty;
690 }
691 EXPORT_SYMBOL(block_dirty_folio);
692
693 /*
694  * Write out and wait upon a list of buffers.
695  *
696  * We have conflicting pressures: we want to make sure that all
697  * initially dirty buffers get waited on, but that any subsequently
698  * dirtied buffers don't.  After all, we don't want fsync to last
699  * forever if somebody is actively writing to the file.
700  *
701  * Do this in two main stages: first we copy dirty buffers to a
702  * temporary inode list, queueing the writes as we go.  Then we clean
703  * up, waiting for those writes to complete.
704  * 
705  * During this second stage, any subsequent updates to the file may end
706  * up refiling the buffer on the original inode's dirty list again, so
707  * there is a chance we will end up with a buffer queued for write but
708  * not yet completed on that list.  So, as a final cleanup we go through
709  * the osync code to catch these locked, dirty buffers without requeuing
710  * any newly dirty buffers for write.
711  */
712 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
713 {
714         struct buffer_head *bh;
715         struct list_head tmp;
716         struct address_space *mapping;
717         int err = 0, err2;
718         struct blk_plug plug;
719
720         INIT_LIST_HEAD(&tmp);
721         blk_start_plug(&plug);
722
723         spin_lock(lock);
724         while (!list_empty(list)) {
725                 bh = BH_ENTRY(list->next);
726                 mapping = bh->b_assoc_map;
727                 __remove_assoc_queue(bh);
728                 /* Avoid race with mark_buffer_dirty_inode() which does
729                  * a lockless check and we rely on seeing the dirty bit */
730                 smp_mb();
731                 if (buffer_dirty(bh) || buffer_locked(bh)) {
732                         list_add(&bh->b_assoc_buffers, &tmp);
733                         bh->b_assoc_map = mapping;
734                         if (buffer_dirty(bh)) {
735                                 get_bh(bh);
736                                 spin_unlock(lock);
737                                 /*
738                                  * Ensure any pending I/O completes so that
739                                  * write_dirty_buffer() actually writes the
740                                  * current contents - it is a noop if I/O is
741                                  * still in flight on potentially older
742                                  * contents.
743                                  */
744                                 write_dirty_buffer(bh, REQ_SYNC);
745
746                                 /*
747                                  * Kick off IO for the previous mapping. Note
748                                  * that we will not run the very last mapping,
749                                  * wait_on_buffer() will do that for us
750                                  * through sync_buffer().
751                                  */
752                                 brelse(bh);
753                                 spin_lock(lock);
754                         }
755                 }
756         }
757
758         spin_unlock(lock);
759         blk_finish_plug(&plug);
760         spin_lock(lock);
761
762         while (!list_empty(&tmp)) {
763                 bh = BH_ENTRY(tmp.prev);
764                 get_bh(bh);
765                 mapping = bh->b_assoc_map;
766                 __remove_assoc_queue(bh);
767                 /* Avoid race with mark_buffer_dirty_inode() which does
768                  * a lockless check and we rely on seeing the dirty bit */
769                 smp_mb();
770                 if (buffer_dirty(bh)) {
771                         list_add(&bh->b_assoc_buffers,
772                                  &mapping->private_list);
773                         bh->b_assoc_map = mapping;
774                 }
775                 spin_unlock(lock);
776                 wait_on_buffer(bh);
777                 if (!buffer_uptodate(bh))
778                         err = -EIO;
779                 brelse(bh);
780                 spin_lock(lock);
781         }
782         
783         spin_unlock(lock);
784         err2 = osync_buffers_list(lock, list);
785         if (err)
786                 return err;
787         else
788                 return err2;
789 }
790
791 /*
792  * Invalidate any and all dirty buffers on a given inode.  We are
793  * probably unmounting the fs, but that doesn't mean we have already
794  * done a sync().  Just drop the buffers from the inode list.
795  *
796  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
797  * assumes that all the buffers are against the blockdev.  Not true
798  * for reiserfs.
799  */
800 void invalidate_inode_buffers(struct inode *inode)
801 {
802         if (inode_has_buffers(inode)) {
803                 struct address_space *mapping = &inode->i_data;
804                 struct list_head *list = &mapping->private_list;
805                 struct address_space *buffer_mapping = mapping->private_data;
806
807                 spin_lock(&buffer_mapping->private_lock);
808                 while (!list_empty(list))
809                         __remove_assoc_queue(BH_ENTRY(list->next));
810                 spin_unlock(&buffer_mapping->private_lock);
811         }
812 }
813 EXPORT_SYMBOL(invalidate_inode_buffers);
814
815 /*
816  * Remove any clean buffers from the inode's buffer list.  This is called
817  * when we're trying to free the inode itself.  Those buffers can pin it.
818  *
819  * Returns true if all buffers were removed.
820  */
821 int remove_inode_buffers(struct inode *inode)
822 {
823         int ret = 1;
824
825         if (inode_has_buffers(inode)) {
826                 struct address_space *mapping = &inode->i_data;
827                 struct list_head *list = &mapping->private_list;
828                 struct address_space *buffer_mapping = mapping->private_data;
829
830                 spin_lock(&buffer_mapping->private_lock);
831                 while (!list_empty(list)) {
832                         struct buffer_head *bh = BH_ENTRY(list->next);
833                         if (buffer_dirty(bh)) {
834                                 ret = 0;
835                                 break;
836                         }
837                         __remove_assoc_queue(bh);
838                 }
839                 spin_unlock(&buffer_mapping->private_lock);
840         }
841         return ret;
842 }
843
844 /*
845  * Create the appropriate buffers when given a page for data area and
846  * the size of each buffer.. Use the bh->b_this_page linked list to
847  * follow the buffers created.  Return NULL if unable to create more
848  * buffers.
849  *
850  * The retry flag is used to differentiate async IO (paging, swapping)
851  * which may not fail from ordinary buffer allocations.
852  */
853 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
854                 bool retry)
855 {
856         struct buffer_head *bh, *head;
857         gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
858         long offset;
859         struct mem_cgroup *memcg, *old_memcg;
860
861         if (retry)
862                 gfp |= __GFP_NOFAIL;
863
864         /* The page lock pins the memcg */
865         memcg = page_memcg(page);
866         old_memcg = set_active_memcg(memcg);
867
868         head = NULL;
869         offset = PAGE_SIZE;
870         while ((offset -= size) >= 0) {
871                 bh = alloc_buffer_head(gfp);
872                 if (!bh)
873                         goto no_grow;
874
875                 bh->b_this_page = head;
876                 bh->b_blocknr = -1;
877                 head = bh;
878
879                 bh->b_size = size;
880
881                 /* Link the buffer to its page */
882                 set_bh_page(bh, page, offset);
883         }
884 out:
885         set_active_memcg(old_memcg);
886         return head;
887 /*
888  * In case anything failed, we just free everything we got.
889  */
890 no_grow:
891         if (head) {
892                 do {
893                         bh = head;
894                         head = head->b_this_page;
895                         free_buffer_head(bh);
896                 } while (head);
897         }
898
899         goto out;
900 }
901 EXPORT_SYMBOL_GPL(alloc_page_buffers);
902
903 static inline void
904 link_dev_buffers(struct page *page, struct buffer_head *head)
905 {
906         struct buffer_head *bh, *tail;
907
908         bh = head;
909         do {
910                 tail = bh;
911                 bh = bh->b_this_page;
912         } while (bh);
913         tail->b_this_page = head;
914         attach_page_private(page, head);
915 }
916
917 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
918 {
919         sector_t retval = ~((sector_t)0);
920         loff_t sz = bdev_nr_bytes(bdev);
921
922         if (sz) {
923                 unsigned int sizebits = blksize_bits(size);
924                 retval = (sz >> sizebits);
925         }
926         return retval;
927 }
928
929 /*
930  * Initialise the state of a blockdev page's buffers.
931  */ 
932 static sector_t
933 init_page_buffers(struct page *page, struct block_device *bdev,
934                         sector_t block, int size)
935 {
936         struct buffer_head *head = page_buffers(page);
937         struct buffer_head *bh = head;
938         int uptodate = PageUptodate(page);
939         sector_t end_block = blkdev_max_block(bdev, size);
940
941         do {
942                 if (!buffer_mapped(bh)) {
943                         bh->b_end_io = NULL;
944                         bh->b_private = NULL;
945                         bh->b_bdev = bdev;
946                         bh->b_blocknr = block;
947                         if (uptodate)
948                                 set_buffer_uptodate(bh);
949                         if (block < end_block)
950                                 set_buffer_mapped(bh);
951                 }
952                 block++;
953                 bh = bh->b_this_page;
954         } while (bh != head);
955
956         /*
957          * Caller needs to validate requested block against end of device.
958          */
959         return end_block;
960 }
961
962 /*
963  * Create the page-cache page that contains the requested block.
964  *
965  * This is used purely for blockdev mappings.
966  */
967 static int
968 grow_dev_page(struct block_device *bdev, sector_t block,
969               pgoff_t index, int size, int sizebits, gfp_t gfp)
970 {
971         struct inode *inode = bdev->bd_inode;
972         struct page *page;
973         struct buffer_head *bh;
974         sector_t end_block;
975         int ret = 0;
976         gfp_t gfp_mask;
977
978         gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
979
980         /*
981          * XXX: __getblk_slow() can not really deal with failure and
982          * will endlessly loop on improvised global reclaim.  Prefer
983          * looping in the allocator rather than here, at least that
984          * code knows what it's doing.
985          */
986         gfp_mask |= __GFP_NOFAIL;
987
988         page = find_or_create_page(inode->i_mapping, index, gfp_mask);
989
990         BUG_ON(!PageLocked(page));
991
992         if (page_has_buffers(page)) {
993                 bh = page_buffers(page);
994                 if (bh->b_size == size) {
995                         end_block = init_page_buffers(page, bdev,
996                                                 (sector_t)index << sizebits,
997                                                 size);
998                         goto done;
999                 }
1000                 if (!try_to_free_buffers(page_folio(page)))
1001                         goto failed;
1002         }
1003
1004         /*
1005          * Allocate some buffers for this page
1006          */
1007         bh = alloc_page_buffers(page, size, true);
1008
1009         /*
1010          * Link the page to the buffers and initialise them.  Take the
1011          * lock to be atomic wrt __find_get_block(), which does not
1012          * run under the page lock.
1013          */
1014         spin_lock(&inode->i_mapping->private_lock);
1015         link_dev_buffers(page, bh);
1016         end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
1017                         size);
1018         spin_unlock(&inode->i_mapping->private_lock);
1019 done:
1020         ret = (block < end_block) ? 1 : -ENXIO;
1021 failed:
1022         unlock_page(page);
1023         put_page(page);
1024         return ret;
1025 }
1026
1027 /*
1028  * Create buffers for the specified block device block's page.  If
1029  * that page was dirty, the buffers are set dirty also.
1030  */
1031 static int
1032 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1033 {
1034         pgoff_t index;
1035         int sizebits;
1036
1037         sizebits = PAGE_SHIFT - __ffs(size);
1038         index = block >> sizebits;
1039
1040         /*
1041          * Check for a block which wants to lie outside our maximum possible
1042          * pagecache index.  (this comparison is done using sector_t types).
1043          */
1044         if (unlikely(index != block >> sizebits)) {
1045                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1046                         "device %pg\n",
1047                         __func__, (unsigned long long)block,
1048                         bdev);
1049                 return -EIO;
1050         }
1051
1052         /* Create a page with the proper size buffers.. */
1053         return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1054 }
1055
1056 static struct buffer_head *
1057 __getblk_slow(struct block_device *bdev, sector_t block,
1058              unsigned size, gfp_t gfp)
1059 {
1060         /* Size must be multiple of hard sectorsize */
1061         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1062                         (size < 512 || size > PAGE_SIZE))) {
1063                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1064                                         size);
1065                 printk(KERN_ERR "logical block size: %d\n",
1066                                         bdev_logical_block_size(bdev));
1067
1068                 dump_stack();
1069                 return NULL;
1070         }
1071
1072         for (;;) {
1073                 struct buffer_head *bh;
1074                 int ret;
1075
1076                 bh = __find_get_block(bdev, block, size);
1077                 if (bh)
1078                         return bh;
1079
1080                 ret = grow_buffers(bdev, block, size, gfp);
1081                 if (ret < 0)
1082                         return NULL;
1083         }
1084 }
1085
1086 /*
1087  * The relationship between dirty buffers and dirty pages:
1088  *
1089  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1090  * the page is tagged dirty in the page cache.
1091  *
1092  * At all times, the dirtiness of the buffers represents the dirtiness of
1093  * subsections of the page.  If the page has buffers, the page dirty bit is
1094  * merely a hint about the true dirty state.
1095  *
1096  * When a page is set dirty in its entirety, all its buffers are marked dirty
1097  * (if the page has buffers).
1098  *
1099  * When a buffer is marked dirty, its page is dirtied, but the page's other
1100  * buffers are not.
1101  *
1102  * Also.  When blockdev buffers are explicitly read with bread(), they
1103  * individually become uptodate.  But their backing page remains not
1104  * uptodate - even if all of its buffers are uptodate.  A subsequent
1105  * block_read_full_folio() against that folio will discover all the uptodate
1106  * buffers, will set the folio uptodate and will perform no I/O.
1107  */
1108
1109 /**
1110  * mark_buffer_dirty - mark a buffer_head as needing writeout
1111  * @bh: the buffer_head to mark dirty
1112  *
1113  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1114  * its backing page dirty, then tag the page as dirty in the page cache
1115  * and then attach the address_space's inode to its superblock's dirty
1116  * inode list.
1117  *
1118  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->private_lock,
1119  * i_pages lock and mapping->host->i_lock.
1120  */
1121 void mark_buffer_dirty(struct buffer_head *bh)
1122 {
1123         WARN_ON_ONCE(!buffer_uptodate(bh));
1124
1125         trace_block_dirty_buffer(bh);
1126
1127         /*
1128          * Very *carefully* optimize the it-is-already-dirty case.
1129          *
1130          * Don't let the final "is it dirty" escape to before we
1131          * perhaps modified the buffer.
1132          */
1133         if (buffer_dirty(bh)) {
1134                 smp_mb();
1135                 if (buffer_dirty(bh))
1136                         return;
1137         }
1138
1139         if (!test_set_buffer_dirty(bh)) {
1140                 struct folio *folio = bh->b_folio;
1141                 struct address_space *mapping = NULL;
1142
1143                 folio_memcg_lock(folio);
1144                 if (!folio_test_set_dirty(folio)) {
1145                         mapping = folio->mapping;
1146                         if (mapping)
1147                                 __folio_mark_dirty(folio, mapping, 0);
1148                 }
1149                 folio_memcg_unlock(folio);
1150                 if (mapping)
1151                         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1152         }
1153 }
1154 EXPORT_SYMBOL(mark_buffer_dirty);
1155
1156 void mark_buffer_write_io_error(struct buffer_head *bh)
1157 {
1158         struct super_block *sb;
1159
1160         set_buffer_write_io_error(bh);
1161         /* FIXME: do we need to set this in both places? */
1162         if (bh->b_folio && bh->b_folio->mapping)
1163                 mapping_set_error(bh->b_folio->mapping, -EIO);
1164         if (bh->b_assoc_map)
1165                 mapping_set_error(bh->b_assoc_map, -EIO);
1166         rcu_read_lock();
1167         sb = READ_ONCE(bh->b_bdev->bd_super);
1168         if (sb)
1169                 errseq_set(&sb->s_wb_err, -EIO);
1170         rcu_read_unlock();
1171 }
1172 EXPORT_SYMBOL(mark_buffer_write_io_error);
1173
1174 /*
1175  * Decrement a buffer_head's reference count.  If all buffers against a page
1176  * have zero reference count, are clean and unlocked, and if the page is clean
1177  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1178  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1179  * a page but it ends up not being freed, and buffers may later be reattached).
1180  */
1181 void __brelse(struct buffer_head * buf)
1182 {
1183         if (atomic_read(&buf->b_count)) {
1184                 put_bh(buf);
1185                 return;
1186         }
1187         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1188 }
1189 EXPORT_SYMBOL(__brelse);
1190
1191 /*
1192  * bforget() is like brelse(), except it discards any
1193  * potentially dirty data.
1194  */
1195 void __bforget(struct buffer_head *bh)
1196 {
1197         clear_buffer_dirty(bh);
1198         if (bh->b_assoc_map) {
1199                 struct address_space *buffer_mapping = bh->b_folio->mapping;
1200
1201                 spin_lock(&buffer_mapping->private_lock);
1202                 list_del_init(&bh->b_assoc_buffers);
1203                 bh->b_assoc_map = NULL;
1204                 spin_unlock(&buffer_mapping->private_lock);
1205         }
1206         __brelse(bh);
1207 }
1208 EXPORT_SYMBOL(__bforget);
1209
1210 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1211 {
1212         lock_buffer(bh);
1213         if (buffer_uptodate(bh)) {
1214                 unlock_buffer(bh);
1215                 return bh;
1216         } else {
1217                 get_bh(bh);
1218                 bh->b_end_io = end_buffer_read_sync;
1219                 submit_bh(REQ_OP_READ, bh);
1220                 wait_on_buffer(bh);
1221                 if (buffer_uptodate(bh))
1222                         return bh;
1223         }
1224         brelse(bh);
1225         return NULL;
1226 }
1227
1228 /*
1229  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1230  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1231  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1232  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1233  * CPU's LRUs at the same time.
1234  *
1235  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1236  * sb_find_get_block().
1237  *
1238  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1239  * a local interrupt disable for that.
1240  */
1241
1242 #define BH_LRU_SIZE     16
1243
1244 struct bh_lru {
1245         struct buffer_head *bhs[BH_LRU_SIZE];
1246 };
1247
1248 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1249
1250 #ifdef CONFIG_SMP
1251 #define bh_lru_lock()   local_irq_disable()
1252 #define bh_lru_unlock() local_irq_enable()
1253 #else
1254 #define bh_lru_lock()   preempt_disable()
1255 #define bh_lru_unlock() preempt_enable()
1256 #endif
1257
1258 static inline void check_irqs_on(void)
1259 {
1260 #ifdef irqs_disabled
1261         BUG_ON(irqs_disabled());
1262 #endif
1263 }
1264
1265 /*
1266  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1267  * inserted at the front, and the buffer_head at the back if any is evicted.
1268  * Or, if already in the LRU it is moved to the front.
1269  */
1270 static void bh_lru_install(struct buffer_head *bh)
1271 {
1272         struct buffer_head *evictee = bh;
1273         struct bh_lru *b;
1274         int i;
1275
1276         check_irqs_on();
1277         bh_lru_lock();
1278
1279         /*
1280          * the refcount of buffer_head in bh_lru prevents dropping the
1281          * attached page(i.e., try_to_free_buffers) so it could cause
1282          * failing page migration.
1283          * Skip putting upcoming bh into bh_lru until migration is done.
1284          */
1285         if (lru_cache_disabled()) {
1286                 bh_lru_unlock();
1287                 return;
1288         }
1289
1290         b = this_cpu_ptr(&bh_lrus);
1291         for (i = 0; i < BH_LRU_SIZE; i++) {
1292                 swap(evictee, b->bhs[i]);
1293                 if (evictee == bh) {
1294                         bh_lru_unlock();
1295                         return;
1296                 }
1297         }
1298
1299         get_bh(bh);
1300         bh_lru_unlock();
1301         brelse(evictee);
1302 }
1303
1304 /*
1305  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1306  */
1307 static struct buffer_head *
1308 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1309 {
1310         struct buffer_head *ret = NULL;
1311         unsigned int i;
1312
1313         check_irqs_on();
1314         bh_lru_lock();
1315         for (i = 0; i < BH_LRU_SIZE; i++) {
1316                 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1317
1318                 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1319                     bh->b_size == size) {
1320                         if (i) {
1321                                 while (i) {
1322                                         __this_cpu_write(bh_lrus.bhs[i],
1323                                                 __this_cpu_read(bh_lrus.bhs[i - 1]));
1324                                         i--;
1325                                 }
1326                                 __this_cpu_write(bh_lrus.bhs[0], bh);
1327                         }
1328                         get_bh(bh);
1329                         ret = bh;
1330                         break;
1331                 }
1332         }
1333         bh_lru_unlock();
1334         return ret;
1335 }
1336
1337 /*
1338  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1339  * it in the LRU and mark it as accessed.  If it is not present then return
1340  * NULL
1341  */
1342 struct buffer_head *
1343 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1344 {
1345         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1346
1347         if (bh == NULL) {
1348                 /* __find_get_block_slow will mark the page accessed */
1349                 bh = __find_get_block_slow(bdev, block);
1350                 if (bh)
1351                         bh_lru_install(bh);
1352         } else
1353                 touch_buffer(bh);
1354
1355         return bh;
1356 }
1357 EXPORT_SYMBOL(__find_get_block);
1358
1359 /*
1360  * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1361  * which corresponds to the passed block_device, block and size. The
1362  * returned buffer has its reference count incremented.
1363  *
1364  * __getblk_gfp() will lock up the machine if grow_dev_page's
1365  * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
1366  */
1367 struct buffer_head *
1368 __getblk_gfp(struct block_device *bdev, sector_t block,
1369              unsigned size, gfp_t gfp)
1370 {
1371         struct buffer_head *bh = __find_get_block(bdev, block, size);
1372
1373         might_sleep();
1374         if (bh == NULL)
1375                 bh = __getblk_slow(bdev, block, size, gfp);
1376         return bh;
1377 }
1378 EXPORT_SYMBOL(__getblk_gfp);
1379
1380 /*
1381  * Do async read-ahead on a buffer..
1382  */
1383 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1384 {
1385         struct buffer_head *bh = __getblk(bdev, block, size);
1386         if (likely(bh)) {
1387                 bh_readahead(bh, REQ_RAHEAD);
1388                 brelse(bh);
1389         }
1390 }
1391 EXPORT_SYMBOL(__breadahead);
1392
1393 /**
1394  *  __bread_gfp() - reads a specified block and returns the bh
1395  *  @bdev: the block_device to read from
1396  *  @block: number of block
1397  *  @size: size (in bytes) to read
1398  *  @gfp: page allocation flag
1399  *
1400  *  Reads a specified block, and returns buffer head that contains it.
1401  *  The page cache can be allocated from non-movable area
1402  *  not to prevent page migration if you set gfp to zero.
1403  *  It returns NULL if the block was unreadable.
1404  */
1405 struct buffer_head *
1406 __bread_gfp(struct block_device *bdev, sector_t block,
1407                    unsigned size, gfp_t gfp)
1408 {
1409         struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1410
1411         if (likely(bh) && !buffer_uptodate(bh))
1412                 bh = __bread_slow(bh);
1413         return bh;
1414 }
1415 EXPORT_SYMBOL(__bread_gfp);
1416
1417 static void __invalidate_bh_lrus(struct bh_lru *b)
1418 {
1419         int i;
1420
1421         for (i = 0; i < BH_LRU_SIZE; i++) {
1422                 brelse(b->bhs[i]);
1423                 b->bhs[i] = NULL;
1424         }
1425 }
1426 /*
1427  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1428  * This doesn't race because it runs in each cpu either in irq
1429  * or with preempt disabled.
1430  */
1431 static void invalidate_bh_lru(void *arg)
1432 {
1433         struct bh_lru *b = &get_cpu_var(bh_lrus);
1434
1435         __invalidate_bh_lrus(b);
1436         put_cpu_var(bh_lrus);
1437 }
1438
1439 bool has_bh_in_lru(int cpu, void *dummy)
1440 {
1441         struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1442         int i;
1443         
1444         for (i = 0; i < BH_LRU_SIZE; i++) {
1445                 if (b->bhs[i])
1446                         return true;
1447         }
1448
1449         return false;
1450 }
1451
1452 void invalidate_bh_lrus(void)
1453 {
1454         on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1455 }
1456 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1457
1458 /*
1459  * It's called from workqueue context so we need a bh_lru_lock to close
1460  * the race with preemption/irq.
1461  */
1462 void invalidate_bh_lrus_cpu(void)
1463 {
1464         struct bh_lru *b;
1465
1466         bh_lru_lock();
1467         b = this_cpu_ptr(&bh_lrus);
1468         __invalidate_bh_lrus(b);
1469         bh_lru_unlock();
1470 }
1471
1472 void set_bh_page(struct buffer_head *bh,
1473                 struct page *page, unsigned long offset)
1474 {
1475         bh->b_page = page;
1476         BUG_ON(offset >= PAGE_SIZE);
1477         if (PageHighMem(page))
1478                 /*
1479                  * This catches illegal uses and preserves the offset:
1480                  */
1481                 bh->b_data = (char *)(0 + offset);
1482         else
1483                 bh->b_data = page_address(page) + offset;
1484 }
1485 EXPORT_SYMBOL(set_bh_page);
1486
1487 /*
1488  * Called when truncating a buffer on a page completely.
1489  */
1490
1491 /* Bits that are cleared during an invalidate */
1492 #define BUFFER_FLAGS_DISCARD \
1493         (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1494          1 << BH_Delay | 1 << BH_Unwritten)
1495
1496 static void discard_buffer(struct buffer_head * bh)
1497 {
1498         unsigned long b_state;
1499
1500         lock_buffer(bh);
1501         clear_buffer_dirty(bh);
1502         bh->b_bdev = NULL;
1503         b_state = READ_ONCE(bh->b_state);
1504         do {
1505         } while (!try_cmpxchg(&bh->b_state, &b_state,
1506                               b_state & ~BUFFER_FLAGS_DISCARD));
1507         unlock_buffer(bh);
1508 }
1509
1510 /**
1511  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1512  * @folio: The folio which is affected.
1513  * @offset: start of the range to invalidate
1514  * @length: length of the range to invalidate
1515  *
1516  * block_invalidate_folio() is called when all or part of the folio has been
1517  * invalidated by a truncate operation.
1518  *
1519  * block_invalidate_folio() does not have to release all buffers, but it must
1520  * ensure that no dirty buffer is left outside @offset and that no I/O
1521  * is underway against any of the blocks which are outside the truncation
1522  * point.  Because the caller is about to free (and possibly reuse) those
1523  * blocks on-disk.
1524  */
1525 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1526 {
1527         struct buffer_head *head, *bh, *next;
1528         size_t curr_off = 0;
1529         size_t stop = length + offset;
1530
1531         BUG_ON(!folio_test_locked(folio));
1532
1533         /*
1534          * Check for overflow
1535          */
1536         BUG_ON(stop > folio_size(folio) || stop < length);
1537
1538         head = folio_buffers(folio);
1539         if (!head)
1540                 return;
1541
1542         bh = head;
1543         do {
1544                 size_t next_off = curr_off + bh->b_size;
1545                 next = bh->b_this_page;
1546
1547                 /*
1548                  * Are we still fully in range ?
1549                  */
1550                 if (next_off > stop)
1551                         goto out;
1552
1553                 /*
1554                  * is this block fully invalidated?
1555                  */
1556                 if (offset <= curr_off)
1557                         discard_buffer(bh);
1558                 curr_off = next_off;
1559                 bh = next;
1560         } while (bh != head);
1561
1562         /*
1563          * We release buffers only if the entire folio is being invalidated.
1564          * The get_block cached value has been unconditionally invalidated,
1565          * so real IO is not possible anymore.
1566          */
1567         if (length == folio_size(folio))
1568                 filemap_release_folio(folio, 0);
1569 out:
1570         return;
1571 }
1572 EXPORT_SYMBOL(block_invalidate_folio);
1573
1574
1575 /*
1576  * We attach and possibly dirty the buffers atomically wrt
1577  * block_dirty_folio() via private_lock.  try_to_free_buffers
1578  * is already excluded via the page lock.
1579  */
1580 void create_empty_buffers(struct page *page,
1581                         unsigned long blocksize, unsigned long b_state)
1582 {
1583         struct buffer_head *bh, *head, *tail;
1584
1585         head = alloc_page_buffers(page, blocksize, true);
1586         bh = head;
1587         do {
1588                 bh->b_state |= b_state;
1589                 tail = bh;
1590                 bh = bh->b_this_page;
1591         } while (bh);
1592         tail->b_this_page = head;
1593
1594         spin_lock(&page->mapping->private_lock);
1595         if (PageUptodate(page) || PageDirty(page)) {
1596                 bh = head;
1597                 do {
1598                         if (PageDirty(page))
1599                                 set_buffer_dirty(bh);
1600                         if (PageUptodate(page))
1601                                 set_buffer_uptodate(bh);
1602                         bh = bh->b_this_page;
1603                 } while (bh != head);
1604         }
1605         attach_page_private(page, head);
1606         spin_unlock(&page->mapping->private_lock);
1607 }
1608 EXPORT_SYMBOL(create_empty_buffers);
1609
1610 /**
1611  * clean_bdev_aliases: clean a range of buffers in block device
1612  * @bdev: Block device to clean buffers in
1613  * @block: Start of a range of blocks to clean
1614  * @len: Number of blocks to clean
1615  *
1616  * We are taking a range of blocks for data and we don't want writeback of any
1617  * buffer-cache aliases starting from return from this function and until the
1618  * moment when something will explicitly mark the buffer dirty (hopefully that
1619  * will not happen until we will free that block ;-) We don't even need to mark
1620  * it not-uptodate - nobody can expect anything from a newly allocated buffer
1621  * anyway. We used to use unmap_buffer() for such invalidation, but that was
1622  * wrong. We definitely don't want to mark the alias unmapped, for example - it
1623  * would confuse anyone who might pick it with bread() afterwards...
1624  *
1625  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1626  * writeout I/O going on against recently-freed buffers.  We don't wait on that
1627  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1628  * need to.  That happens here.
1629  */
1630 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1631 {
1632         struct inode *bd_inode = bdev->bd_inode;
1633         struct address_space *bd_mapping = bd_inode->i_mapping;
1634         struct folio_batch fbatch;
1635         pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
1636         pgoff_t end;
1637         int i, count;
1638         struct buffer_head *bh;
1639         struct buffer_head *head;
1640
1641         end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
1642         folio_batch_init(&fbatch);
1643         while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1644                 count = folio_batch_count(&fbatch);
1645                 for (i = 0; i < count; i++) {
1646                         struct folio *folio = fbatch.folios[i];
1647
1648                         if (!folio_buffers(folio))
1649                                 continue;
1650                         /*
1651                          * We use folio lock instead of bd_mapping->private_lock
1652                          * to pin buffers here since we can afford to sleep and
1653                          * it scales better than a global spinlock lock.
1654                          */
1655                         folio_lock(folio);
1656                         /* Recheck when the folio is locked which pins bhs */
1657                         head = folio_buffers(folio);
1658                         if (!head)
1659                                 goto unlock_page;
1660                         bh = head;
1661                         do {
1662                                 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1663                                         goto next;
1664                                 if (bh->b_blocknr >= block + len)
1665                                         break;
1666                                 clear_buffer_dirty(bh);
1667                                 wait_on_buffer(bh);
1668                                 clear_buffer_req(bh);
1669 next:
1670                                 bh = bh->b_this_page;
1671                         } while (bh != head);
1672 unlock_page:
1673                         folio_unlock(folio);
1674                 }
1675                 folio_batch_release(&fbatch);
1676                 cond_resched();
1677                 /* End of range already reached? */
1678                 if (index > end || !index)
1679                         break;
1680         }
1681 }
1682 EXPORT_SYMBOL(clean_bdev_aliases);
1683
1684 /*
1685  * Size is a power-of-two in the range 512..PAGE_SIZE,
1686  * and the case we care about most is PAGE_SIZE.
1687  *
1688  * So this *could* possibly be written with those
1689  * constraints in mind (relevant mostly if some
1690  * architecture has a slow bit-scan instruction)
1691  */
1692 static inline int block_size_bits(unsigned int blocksize)
1693 {
1694         return ilog2(blocksize);
1695 }
1696
1697 static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
1698 {
1699         BUG_ON(!PageLocked(page));
1700
1701         if (!page_has_buffers(page))
1702                 create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
1703                                      b_state);
1704         return page_buffers(page);
1705 }
1706
1707 /*
1708  * NOTE! All mapped/uptodate combinations are valid:
1709  *
1710  *      Mapped  Uptodate        Meaning
1711  *
1712  *      No      No              "unknown" - must do get_block()
1713  *      No      Yes             "hole" - zero-filled
1714  *      Yes     No              "allocated" - allocated on disk, not read in
1715  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1716  *
1717  * "Dirty" is valid only with the last case (mapped+uptodate).
1718  */
1719
1720 /*
1721  * While block_write_full_page is writing back the dirty buffers under
1722  * the page lock, whoever dirtied the buffers may decide to clean them
1723  * again at any time.  We handle that by only looking at the buffer
1724  * state inside lock_buffer().
1725  *
1726  * If block_write_full_page() is called for regular writeback
1727  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1728  * locked buffer.   This only can happen if someone has written the buffer
1729  * directly, with submit_bh().  At the address_space level PageWriteback
1730  * prevents this contention from occurring.
1731  *
1732  * If block_write_full_page() is called with wbc->sync_mode ==
1733  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1734  * causes the writes to be flagged as synchronous writes.
1735  */
1736 int __block_write_full_page(struct inode *inode, struct page *page,
1737                         get_block_t *get_block, struct writeback_control *wbc,
1738                         bh_end_io_t *handler)
1739 {
1740         int err;
1741         sector_t block;
1742         sector_t last_block;
1743         struct buffer_head *bh, *head;
1744         unsigned int blocksize, bbits;
1745         int nr_underway = 0;
1746         blk_opf_t write_flags = wbc_to_write_flags(wbc);
1747
1748         head = create_page_buffers(page, inode,
1749                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1750
1751         /*
1752          * Be very careful.  We have no exclusion from block_dirty_folio
1753          * here, and the (potentially unmapped) buffers may become dirty at
1754          * any time.  If a buffer becomes dirty here after we've inspected it
1755          * then we just miss that fact, and the page stays dirty.
1756          *
1757          * Buffers outside i_size may be dirtied by block_dirty_folio;
1758          * handle that here by just cleaning them.
1759          */
1760
1761         bh = head;
1762         blocksize = bh->b_size;
1763         bbits = block_size_bits(blocksize);
1764
1765         block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1766         last_block = (i_size_read(inode) - 1) >> bbits;
1767
1768         /*
1769          * Get all the dirty buffers mapped to disk addresses and
1770          * handle any aliases from the underlying blockdev's mapping.
1771          */
1772         do {
1773                 if (block > last_block) {
1774                         /*
1775                          * mapped buffers outside i_size will occur, because
1776                          * this page can be outside i_size when there is a
1777                          * truncate in progress.
1778                          */
1779                         /*
1780                          * The buffer was zeroed by block_write_full_page()
1781                          */
1782                         clear_buffer_dirty(bh);
1783                         set_buffer_uptodate(bh);
1784                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1785                            buffer_dirty(bh)) {
1786                         WARN_ON(bh->b_size != blocksize);
1787                         err = get_block(inode, block, bh, 1);
1788                         if (err)
1789                                 goto recover;
1790                         clear_buffer_delay(bh);
1791                         if (buffer_new(bh)) {
1792                                 /* blockdev mappings never come here */
1793                                 clear_buffer_new(bh);
1794                                 clean_bdev_bh_alias(bh);
1795                         }
1796                 }
1797                 bh = bh->b_this_page;
1798                 block++;
1799         } while (bh != head);
1800
1801         do {
1802                 if (!buffer_mapped(bh))
1803                         continue;
1804                 /*
1805                  * If it's a fully non-blocking write attempt and we cannot
1806                  * lock the buffer then redirty the page.  Note that this can
1807                  * potentially cause a busy-wait loop from writeback threads
1808                  * and kswapd activity, but those code paths have their own
1809                  * higher-level throttling.
1810                  */
1811                 if (wbc->sync_mode != WB_SYNC_NONE) {
1812                         lock_buffer(bh);
1813                 } else if (!trylock_buffer(bh)) {
1814                         redirty_page_for_writepage(wbc, page);
1815                         continue;
1816                 }
1817                 if (test_clear_buffer_dirty(bh)) {
1818                         mark_buffer_async_write_endio(bh, handler);
1819                 } else {
1820                         unlock_buffer(bh);
1821                 }
1822         } while ((bh = bh->b_this_page) != head);
1823
1824         /*
1825          * The page and its buffers are protected by PageWriteback(), so we can
1826          * drop the bh refcounts early.
1827          */
1828         BUG_ON(PageWriteback(page));
1829         set_page_writeback(page);
1830
1831         do {
1832                 struct buffer_head *next = bh->b_this_page;
1833                 if (buffer_async_write(bh)) {
1834                         submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1835                         nr_underway++;
1836                 }
1837                 bh = next;
1838         } while (bh != head);
1839         unlock_page(page);
1840
1841         err = 0;
1842 done:
1843         if (nr_underway == 0) {
1844                 /*
1845                  * The page was marked dirty, but the buffers were
1846                  * clean.  Someone wrote them back by hand with
1847                  * write_dirty_buffer/submit_bh.  A rare case.
1848                  */
1849                 end_page_writeback(page);
1850
1851                 /*
1852                  * The page and buffer_heads can be released at any time from
1853                  * here on.
1854                  */
1855         }
1856         return err;
1857
1858 recover:
1859         /*
1860          * ENOSPC, or some other error.  We may already have added some
1861          * blocks to the file, so we need to write these out to avoid
1862          * exposing stale data.
1863          * The page is currently locked and not marked for writeback
1864          */
1865         bh = head;
1866         /* Recovery: lock and submit the mapped buffers */
1867         do {
1868                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1869                     !buffer_delay(bh)) {
1870                         lock_buffer(bh);
1871                         mark_buffer_async_write_endio(bh, handler);
1872                 } else {
1873                         /*
1874                          * The buffer may have been set dirty during
1875                          * attachment to a dirty page.
1876                          */
1877                         clear_buffer_dirty(bh);
1878                 }
1879         } while ((bh = bh->b_this_page) != head);
1880         SetPageError(page);
1881         BUG_ON(PageWriteback(page));
1882         mapping_set_error(page->mapping, err);
1883         set_page_writeback(page);
1884         do {
1885                 struct buffer_head *next = bh->b_this_page;
1886                 if (buffer_async_write(bh)) {
1887                         clear_buffer_dirty(bh);
1888                         submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1889                         nr_underway++;
1890                 }
1891                 bh = next;
1892         } while (bh != head);
1893         unlock_page(page);
1894         goto done;
1895 }
1896 EXPORT_SYMBOL(__block_write_full_page);
1897
1898 /*
1899  * If a page has any new buffers, zero them out here, and mark them uptodate
1900  * and dirty so they'll be written out (in order to prevent uninitialised
1901  * block data from leaking). And clear the new bit.
1902  */
1903 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1904 {
1905         unsigned int block_start, block_end;
1906         struct buffer_head *head, *bh;
1907
1908         BUG_ON(!PageLocked(page));
1909         if (!page_has_buffers(page))
1910                 return;
1911
1912         bh = head = page_buffers(page);
1913         block_start = 0;
1914         do {
1915                 block_end = block_start + bh->b_size;
1916
1917                 if (buffer_new(bh)) {
1918                         if (block_end > from && block_start < to) {
1919                                 if (!PageUptodate(page)) {
1920                                         unsigned start, size;
1921
1922                                         start = max(from, block_start);
1923                                         size = min(to, block_end) - start;
1924
1925                                         zero_user(page, start, size);
1926                                         set_buffer_uptodate(bh);
1927                                 }
1928
1929                                 clear_buffer_new(bh);
1930                                 mark_buffer_dirty(bh);
1931                         }
1932                 }
1933
1934                 block_start = block_end;
1935                 bh = bh->b_this_page;
1936         } while (bh != head);
1937 }
1938 EXPORT_SYMBOL(page_zero_new_buffers);
1939
1940 static void
1941 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
1942                 const struct iomap *iomap)
1943 {
1944         loff_t offset = block << inode->i_blkbits;
1945
1946         bh->b_bdev = iomap->bdev;
1947
1948         /*
1949          * Block points to offset in file we need to map, iomap contains
1950          * the offset at which the map starts. If the map ends before the
1951          * current block, then do not map the buffer and let the caller
1952          * handle it.
1953          */
1954         BUG_ON(offset >= iomap->offset + iomap->length);
1955
1956         switch (iomap->type) {
1957         case IOMAP_HOLE:
1958                 /*
1959                  * If the buffer is not up to date or beyond the current EOF,
1960                  * we need to mark it as new to ensure sub-block zeroing is
1961                  * executed if necessary.
1962                  */
1963                 if (!buffer_uptodate(bh) ||
1964                     (offset >= i_size_read(inode)))
1965                         set_buffer_new(bh);
1966                 break;
1967         case IOMAP_DELALLOC:
1968                 if (!buffer_uptodate(bh) ||
1969                     (offset >= i_size_read(inode)))
1970                         set_buffer_new(bh);
1971                 set_buffer_uptodate(bh);
1972                 set_buffer_mapped(bh);
1973                 set_buffer_delay(bh);
1974                 break;
1975         case IOMAP_UNWRITTEN:
1976                 /*
1977                  * For unwritten regions, we always need to ensure that regions
1978                  * in the block we are not writing to are zeroed. Mark the
1979                  * buffer as new to ensure this.
1980                  */
1981                 set_buffer_new(bh);
1982                 set_buffer_unwritten(bh);
1983                 fallthrough;
1984         case IOMAP_MAPPED:
1985                 if ((iomap->flags & IOMAP_F_NEW) ||
1986                     offset >= i_size_read(inode))
1987                         set_buffer_new(bh);
1988                 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
1989                                 inode->i_blkbits;
1990                 set_buffer_mapped(bh);
1991                 break;
1992         }
1993 }
1994
1995 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
1996                 get_block_t *get_block, const struct iomap *iomap)
1997 {
1998         unsigned from = pos & (PAGE_SIZE - 1);
1999         unsigned to = from + len;
2000         struct inode *inode = folio->mapping->host;
2001         unsigned block_start, block_end;
2002         sector_t block;
2003         int err = 0;
2004         unsigned blocksize, bbits;
2005         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2006
2007         BUG_ON(!folio_test_locked(folio));
2008         BUG_ON(from > PAGE_SIZE);
2009         BUG_ON(to > PAGE_SIZE);
2010         BUG_ON(from > to);
2011
2012         head = create_page_buffers(&folio->page, inode, 0);
2013         blocksize = head->b_size;
2014         bbits = block_size_bits(blocksize);
2015
2016         block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2017
2018         for(bh = head, block_start = 0; bh != head || !block_start;
2019             block++, block_start=block_end, bh = bh->b_this_page) {
2020                 block_end = block_start + blocksize;
2021                 if (block_end <= from || block_start >= to) {
2022                         if (folio_test_uptodate(folio)) {
2023                                 if (!buffer_uptodate(bh))
2024                                         set_buffer_uptodate(bh);
2025                         }
2026                         continue;
2027                 }
2028                 if (buffer_new(bh))
2029                         clear_buffer_new(bh);
2030                 if (!buffer_mapped(bh)) {
2031                         WARN_ON(bh->b_size != blocksize);
2032                         if (get_block) {
2033                                 err = get_block(inode, block, bh, 1);
2034                                 if (err)
2035                                         break;
2036                         } else {
2037                                 iomap_to_bh(inode, block, bh, iomap);
2038                         }
2039
2040                         if (buffer_new(bh)) {
2041                                 clean_bdev_bh_alias(bh);
2042                                 if (folio_test_uptodate(folio)) {
2043                                         clear_buffer_new(bh);
2044                                         set_buffer_uptodate(bh);
2045                                         mark_buffer_dirty(bh);
2046                                         continue;
2047                                 }
2048                                 if (block_end > to || block_start < from)
2049                                         folio_zero_segments(folio,
2050                                                 to, block_end,
2051                                                 block_start, from);
2052                                 continue;
2053                         }
2054                 }
2055                 if (folio_test_uptodate(folio)) {
2056                         if (!buffer_uptodate(bh))
2057                                 set_buffer_uptodate(bh);
2058                         continue; 
2059                 }
2060                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2061                     !buffer_unwritten(bh) &&
2062                      (block_start < from || block_end > to)) {
2063                         bh_read_nowait(bh, 0);
2064                         *wait_bh++=bh;
2065                 }
2066         }
2067         /*
2068          * If we issued read requests - let them complete.
2069          */
2070         while(wait_bh > wait) {
2071                 wait_on_buffer(*--wait_bh);
2072                 if (!buffer_uptodate(*wait_bh))
2073                         err = -EIO;
2074         }
2075         if (unlikely(err))
2076                 page_zero_new_buffers(&folio->page, from, to);
2077         return err;
2078 }
2079
2080 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2081                 get_block_t *get_block)
2082 {
2083         return __block_write_begin_int(page_folio(page), pos, len, get_block,
2084                                        NULL);
2085 }
2086 EXPORT_SYMBOL(__block_write_begin);
2087
2088 static int __block_commit_write(struct inode *inode, struct page *page,
2089                 unsigned from, unsigned to)
2090 {
2091         unsigned block_start, block_end;
2092         int partial = 0;
2093         unsigned blocksize;
2094         struct buffer_head *bh, *head;
2095
2096         bh = head = page_buffers(page);
2097         blocksize = bh->b_size;
2098
2099         block_start = 0;
2100         do {
2101                 block_end = block_start + blocksize;
2102                 if (block_end <= from || block_start >= to) {
2103                         if (!buffer_uptodate(bh))
2104                                 partial = 1;
2105                 } else {
2106                         set_buffer_uptodate(bh);
2107                         mark_buffer_dirty(bh);
2108                 }
2109                 if (buffer_new(bh))
2110                         clear_buffer_new(bh);
2111
2112                 block_start = block_end;
2113                 bh = bh->b_this_page;
2114         } while (bh != head);
2115
2116         /*
2117          * If this is a partial write which happened to make all buffers
2118          * uptodate then we can optimize away a bogus read_folio() for
2119          * the next read(). Here we 'discover' whether the page went
2120          * uptodate as a result of this (potentially partial) write.
2121          */
2122         if (!partial)
2123                 SetPageUptodate(page);
2124         return 0;
2125 }
2126
2127 /*
2128  * block_write_begin takes care of the basic task of block allocation and
2129  * bringing partial write blocks uptodate first.
2130  *
2131  * The filesystem needs to handle block truncation upon failure.
2132  */
2133 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2134                 struct page **pagep, get_block_t *get_block)
2135 {
2136         pgoff_t index = pos >> PAGE_SHIFT;
2137         struct page *page;
2138         int status;
2139
2140         page = grab_cache_page_write_begin(mapping, index);
2141         if (!page)
2142                 return -ENOMEM;
2143
2144         status = __block_write_begin(page, pos, len, get_block);
2145         if (unlikely(status)) {
2146                 unlock_page(page);
2147                 put_page(page);
2148                 page = NULL;
2149         }
2150
2151         *pagep = page;
2152         return status;
2153 }
2154 EXPORT_SYMBOL(block_write_begin);
2155
2156 int block_write_end(struct file *file, struct address_space *mapping,
2157                         loff_t pos, unsigned len, unsigned copied,
2158                         struct page *page, void *fsdata)
2159 {
2160         struct inode *inode = mapping->host;
2161         unsigned start;
2162
2163         start = pos & (PAGE_SIZE - 1);
2164
2165         if (unlikely(copied < len)) {
2166                 /*
2167                  * The buffers that were written will now be uptodate, so
2168                  * we don't have to worry about a read_folio reading them
2169                  * and overwriting a partial write. However if we have
2170                  * encountered a short write and only partially written
2171                  * into a buffer, it will not be marked uptodate, so a
2172                  * read_folio might come in and destroy our partial write.
2173                  *
2174                  * Do the simplest thing, and just treat any short write to a
2175                  * non uptodate page as a zero-length write, and force the
2176                  * caller to redo the whole thing.
2177                  */
2178                 if (!PageUptodate(page))
2179                         copied = 0;
2180
2181                 page_zero_new_buffers(page, start+copied, start+len);
2182         }
2183         flush_dcache_page(page);
2184
2185         /* This could be a short (even 0-length) commit */
2186         __block_commit_write(inode, page, start, start+copied);
2187
2188         return copied;
2189 }
2190 EXPORT_SYMBOL(block_write_end);
2191
2192 int generic_write_end(struct file *file, struct address_space *mapping,
2193                         loff_t pos, unsigned len, unsigned copied,
2194                         struct page *page, void *fsdata)
2195 {
2196         struct inode *inode = mapping->host;
2197         loff_t old_size = inode->i_size;
2198         bool i_size_changed = false;
2199
2200         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2201
2202         /*
2203          * No need to use i_size_read() here, the i_size cannot change under us
2204          * because we hold i_rwsem.
2205          *
2206          * But it's important to update i_size while still holding page lock:
2207          * page writeout could otherwise come in and zero beyond i_size.
2208          */
2209         if (pos + copied > inode->i_size) {
2210                 i_size_write(inode, pos + copied);
2211                 i_size_changed = true;
2212         }
2213
2214         unlock_page(page);
2215         put_page(page);
2216
2217         if (old_size < pos)
2218                 pagecache_isize_extended(inode, old_size, pos);
2219         /*
2220          * Don't mark the inode dirty under page lock. First, it unnecessarily
2221          * makes the holding time of page lock longer. Second, it forces lock
2222          * ordering of page lock and transaction start for journaling
2223          * filesystems.
2224          */
2225         if (i_size_changed)
2226                 mark_inode_dirty(inode);
2227         return copied;
2228 }
2229 EXPORT_SYMBOL(generic_write_end);
2230
2231 /*
2232  * block_is_partially_uptodate checks whether buffers within a folio are
2233  * uptodate or not.
2234  *
2235  * Returns true if all buffers which correspond to the specified part
2236  * of the folio are uptodate.
2237  */
2238 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2239 {
2240         unsigned block_start, block_end, blocksize;
2241         unsigned to;
2242         struct buffer_head *bh, *head;
2243         bool ret = true;
2244
2245         head = folio_buffers(folio);
2246         if (!head)
2247                 return false;
2248         blocksize = head->b_size;
2249         to = min_t(unsigned, folio_size(folio) - from, count);
2250         to = from + to;
2251         if (from < blocksize && to > folio_size(folio) - blocksize)
2252                 return false;
2253
2254         bh = head;
2255         block_start = 0;
2256         do {
2257                 block_end = block_start + blocksize;
2258                 if (block_end > from && block_start < to) {
2259                         if (!buffer_uptodate(bh)) {
2260                                 ret = false;
2261                                 break;
2262                         }
2263                         if (block_end >= to)
2264                                 break;
2265                 }
2266                 block_start = block_end;
2267                 bh = bh->b_this_page;
2268         } while (bh != head);
2269
2270         return ret;
2271 }
2272 EXPORT_SYMBOL(block_is_partially_uptodate);
2273
2274 /*
2275  * Generic "read_folio" function for block devices that have the normal
2276  * get_block functionality. This is most of the block device filesystems.
2277  * Reads the folio asynchronously --- the unlock_buffer() and
2278  * set/clear_buffer_uptodate() functions propagate buffer state into the
2279  * folio once IO has completed.
2280  */
2281 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2282 {
2283         struct inode *inode = folio->mapping->host;
2284         sector_t iblock, lblock;
2285         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2286         unsigned int blocksize, bbits;
2287         int nr, i;
2288         int fully_mapped = 1;
2289         bool page_error = false;
2290         loff_t limit = i_size_read(inode);
2291
2292         /* This is needed for ext4. */
2293         if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2294                 limit = inode->i_sb->s_maxbytes;
2295
2296         VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2297
2298         head = create_page_buffers(&folio->page, inode, 0);
2299         blocksize = head->b_size;
2300         bbits = block_size_bits(blocksize);
2301
2302         iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2303         lblock = (limit+blocksize-1) >> bbits;
2304         bh = head;
2305         nr = 0;
2306         i = 0;
2307
2308         do {
2309                 if (buffer_uptodate(bh))
2310                         continue;
2311
2312                 if (!buffer_mapped(bh)) {
2313                         int err = 0;
2314
2315                         fully_mapped = 0;
2316                         if (iblock < lblock) {
2317                                 WARN_ON(bh->b_size != blocksize);
2318                                 err = get_block(inode, iblock, bh, 0);
2319                                 if (err) {
2320                                         folio_set_error(folio);
2321                                         page_error = true;
2322                                 }
2323                         }
2324                         if (!buffer_mapped(bh)) {
2325                                 folio_zero_range(folio, i * blocksize,
2326                                                 blocksize);
2327                                 if (!err)
2328                                         set_buffer_uptodate(bh);
2329                                 continue;
2330                         }
2331                         /*
2332                          * get_block() might have updated the buffer
2333                          * synchronously
2334                          */
2335                         if (buffer_uptodate(bh))
2336                                 continue;
2337                 }
2338                 arr[nr++] = bh;
2339         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2340
2341         if (fully_mapped)
2342                 folio_set_mappedtodisk(folio);
2343
2344         if (!nr) {
2345                 /*
2346                  * All buffers are uptodate - we can set the folio uptodate
2347                  * as well. But not if get_block() returned an error.
2348                  */
2349                 if (!page_error)
2350                         folio_mark_uptodate(folio);
2351                 folio_unlock(folio);
2352                 return 0;
2353         }
2354
2355         /* Stage two: lock the buffers */
2356         for (i = 0; i < nr; i++) {
2357                 bh = arr[i];
2358                 lock_buffer(bh);
2359                 mark_buffer_async_read(bh);
2360         }
2361
2362         /*
2363          * Stage 3: start the IO.  Check for uptodateness
2364          * inside the buffer lock in case another process reading
2365          * the underlying blockdev brought it uptodate (the sct fix).
2366          */
2367         for (i = 0; i < nr; i++) {
2368                 bh = arr[i];
2369                 if (buffer_uptodate(bh))
2370                         end_buffer_async_read(bh, 1);
2371                 else
2372                         submit_bh(REQ_OP_READ, bh);
2373         }
2374         return 0;
2375 }
2376 EXPORT_SYMBOL(block_read_full_folio);
2377
2378 /* utility function for filesystems that need to do work on expanding
2379  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2380  * deal with the hole.  
2381  */
2382 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2383 {
2384         struct address_space *mapping = inode->i_mapping;
2385         const struct address_space_operations *aops = mapping->a_ops;
2386         struct page *page;
2387         void *fsdata = NULL;
2388         int err;
2389
2390         err = inode_newsize_ok(inode, size);
2391         if (err)
2392                 goto out;
2393
2394         err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
2395         if (err)
2396                 goto out;
2397
2398         err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
2399         BUG_ON(err > 0);
2400
2401 out:
2402         return err;
2403 }
2404 EXPORT_SYMBOL(generic_cont_expand_simple);
2405
2406 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2407                             loff_t pos, loff_t *bytes)
2408 {
2409         struct inode *inode = mapping->host;
2410         const struct address_space_operations *aops = mapping->a_ops;
2411         unsigned int blocksize = i_blocksize(inode);
2412         struct page *page;
2413         void *fsdata = NULL;
2414         pgoff_t index, curidx;
2415         loff_t curpos;
2416         unsigned zerofrom, offset, len;
2417         int err = 0;
2418
2419         index = pos >> PAGE_SHIFT;
2420         offset = pos & ~PAGE_MASK;
2421
2422         while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2423                 zerofrom = curpos & ~PAGE_MASK;
2424                 if (zerofrom & (blocksize-1)) {
2425                         *bytes |= (blocksize-1);
2426                         (*bytes)++;
2427                 }
2428                 len = PAGE_SIZE - zerofrom;
2429
2430                 err = aops->write_begin(file, mapping, curpos, len,
2431                                             &page, &fsdata);
2432                 if (err)
2433                         goto out;
2434                 zero_user(page, zerofrom, len);
2435                 err = aops->write_end(file, mapping, curpos, len, len,
2436                                                 page, fsdata);
2437                 if (err < 0)
2438                         goto out;
2439                 BUG_ON(err != len);
2440                 err = 0;
2441
2442                 balance_dirty_pages_ratelimited(mapping);
2443
2444                 if (fatal_signal_pending(current)) {
2445                         err = -EINTR;
2446                         goto out;
2447                 }
2448         }
2449
2450         /* page covers the boundary, find the boundary offset */
2451         if (index == curidx) {
2452                 zerofrom = curpos & ~PAGE_MASK;
2453                 /* if we will expand the thing last block will be filled */
2454                 if (offset <= zerofrom) {
2455                         goto out;
2456                 }
2457                 if (zerofrom & (blocksize-1)) {
2458                         *bytes |= (blocksize-1);
2459                         (*bytes)++;
2460                 }
2461                 len = offset - zerofrom;
2462
2463                 err = aops->write_begin(file, mapping, curpos, len,
2464                                             &page, &fsdata);
2465                 if (err)
2466                         goto out;
2467                 zero_user(page, zerofrom, len);
2468                 err = aops->write_end(file, mapping, curpos, len, len,
2469                                                 page, fsdata);
2470                 if (err < 0)
2471                         goto out;
2472                 BUG_ON(err != len);
2473                 err = 0;
2474         }
2475 out:
2476         return err;
2477 }
2478
2479 /*
2480  * For moronic filesystems that do not allow holes in file.
2481  * We may have to extend the file.
2482  */
2483 int cont_write_begin(struct file *file, struct address_space *mapping,
2484                         loff_t pos, unsigned len,
2485                         struct page **pagep, void **fsdata,
2486                         get_block_t *get_block, loff_t *bytes)
2487 {
2488         struct inode *inode = mapping->host;
2489         unsigned int blocksize = i_blocksize(inode);
2490         unsigned int zerofrom;
2491         int err;
2492
2493         err = cont_expand_zero(file, mapping, pos, bytes);
2494         if (err)
2495                 return err;
2496
2497         zerofrom = *bytes & ~PAGE_MASK;
2498         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2499                 *bytes |= (blocksize-1);
2500                 (*bytes)++;
2501         }
2502
2503         return block_write_begin(mapping, pos, len, pagep, get_block);
2504 }
2505 EXPORT_SYMBOL(cont_write_begin);
2506
2507 int block_commit_write(struct page *page, unsigned from, unsigned to)
2508 {
2509         struct inode *inode = page->mapping->host;
2510         __block_commit_write(inode,page,from,to);
2511         return 0;
2512 }
2513 EXPORT_SYMBOL(block_commit_write);
2514
2515 /*
2516  * block_page_mkwrite() is not allowed to change the file size as it gets
2517  * called from a page fault handler when a page is first dirtied. Hence we must
2518  * be careful to check for EOF conditions here. We set the page up correctly
2519  * for a written page which means we get ENOSPC checking when writing into
2520  * holes and correct delalloc and unwritten extent mapping on filesystems that
2521  * support these features.
2522  *
2523  * We are not allowed to take the i_mutex here so we have to play games to
2524  * protect against truncate races as the page could now be beyond EOF.  Because
2525  * truncate writes the inode size before removing pages, once we have the
2526  * page lock we can determine safely if the page is beyond EOF. If it is not
2527  * beyond EOF, then the page is guaranteed safe against truncation until we
2528  * unlock the page.
2529  *
2530  * Direct callers of this function should protect against filesystem freezing
2531  * using sb_start_pagefault() - sb_end_pagefault() functions.
2532  */
2533 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2534                          get_block_t get_block)
2535 {
2536         struct page *page = vmf->page;
2537         struct inode *inode = file_inode(vma->vm_file);
2538         unsigned long end;
2539         loff_t size;
2540         int ret;
2541
2542         lock_page(page);
2543         size = i_size_read(inode);
2544         if ((page->mapping != inode->i_mapping) ||
2545             (page_offset(page) > size)) {
2546                 /* We overload EFAULT to mean page got truncated */
2547                 ret = -EFAULT;
2548                 goto out_unlock;
2549         }
2550
2551         /* page is wholly or partially inside EOF */
2552         if (((page->index + 1) << PAGE_SHIFT) > size)
2553                 end = size & ~PAGE_MASK;
2554         else
2555                 end = PAGE_SIZE;
2556
2557         ret = __block_write_begin(page, 0, end, get_block);
2558         if (!ret)
2559                 ret = block_commit_write(page, 0, end);
2560
2561         if (unlikely(ret < 0))
2562                 goto out_unlock;
2563         set_page_dirty(page);
2564         wait_for_stable_page(page);
2565         return 0;
2566 out_unlock:
2567         unlock_page(page);
2568         return ret;
2569 }
2570 EXPORT_SYMBOL(block_page_mkwrite);
2571
2572 int block_truncate_page(struct address_space *mapping,
2573                         loff_t from, get_block_t *get_block)
2574 {
2575         pgoff_t index = from >> PAGE_SHIFT;
2576         unsigned offset = from & (PAGE_SIZE-1);
2577         unsigned blocksize;
2578         sector_t iblock;
2579         unsigned length, pos;
2580         struct inode *inode = mapping->host;
2581         struct page *page;
2582         struct buffer_head *bh;
2583         int err = 0;
2584
2585         blocksize = i_blocksize(inode);
2586         length = offset & (blocksize - 1);
2587
2588         /* Block boundary? Nothing to do */
2589         if (!length)
2590                 return 0;
2591
2592         length = blocksize - length;
2593         iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2594         
2595         page = grab_cache_page(mapping, index);
2596         if (!page)
2597                 return -ENOMEM;
2598
2599         if (!page_has_buffers(page))
2600                 create_empty_buffers(page, blocksize, 0);
2601
2602         /* Find the buffer that contains "offset" */
2603         bh = page_buffers(page);
2604         pos = blocksize;
2605         while (offset >= pos) {
2606                 bh = bh->b_this_page;
2607                 iblock++;
2608                 pos += blocksize;
2609         }
2610
2611         if (!buffer_mapped(bh)) {
2612                 WARN_ON(bh->b_size != blocksize);
2613                 err = get_block(inode, iblock, bh, 0);
2614                 if (err)
2615                         goto unlock;
2616                 /* unmapped? It's a hole - nothing to do */
2617                 if (!buffer_mapped(bh))
2618                         goto unlock;
2619         }
2620
2621         /* Ok, it's mapped. Make sure it's up-to-date */
2622         if (PageUptodate(page))
2623                 set_buffer_uptodate(bh);
2624
2625         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2626                 err = bh_read(bh, 0);
2627                 /* Uhhuh. Read error. Complain and punt. */
2628                 if (err < 0)
2629                         goto unlock;
2630         }
2631
2632         zero_user(page, offset, length);
2633         mark_buffer_dirty(bh);
2634
2635 unlock:
2636         unlock_page(page);
2637         put_page(page);
2638
2639         return err;
2640 }
2641 EXPORT_SYMBOL(block_truncate_page);
2642
2643 /*
2644  * The generic ->writepage function for buffer-backed address_spaces
2645  */
2646 int block_write_full_page(struct page *page, get_block_t *get_block,
2647                         struct writeback_control *wbc)
2648 {
2649         struct inode * const inode = page->mapping->host;
2650         loff_t i_size = i_size_read(inode);
2651         const pgoff_t end_index = i_size >> PAGE_SHIFT;
2652         unsigned offset;
2653
2654         /* Is the page fully inside i_size? */
2655         if (page->index < end_index)
2656                 return __block_write_full_page(inode, page, get_block, wbc,
2657                                                end_buffer_async_write);
2658
2659         /* Is the page fully outside i_size? (truncate in progress) */
2660         offset = i_size & (PAGE_SIZE-1);
2661         if (page->index >= end_index+1 || !offset) {
2662                 unlock_page(page);
2663                 return 0; /* don't care */
2664         }
2665
2666         /*
2667          * The page straddles i_size.  It must be zeroed out on each and every
2668          * writepage invocation because it may be mmapped.  "A file is mapped
2669          * in multiples of the page size.  For a file that is not a multiple of
2670          * the  page size, the remaining memory is zeroed when mapped, and
2671          * writes to that region are not written out to the file."
2672          */
2673         zero_user_segment(page, offset, PAGE_SIZE);
2674         return __block_write_full_page(inode, page, get_block, wbc,
2675                                                         end_buffer_async_write);
2676 }
2677 EXPORT_SYMBOL(block_write_full_page);
2678
2679 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2680                             get_block_t *get_block)
2681 {
2682         struct inode *inode = mapping->host;
2683         struct buffer_head tmp = {
2684                 .b_size = i_blocksize(inode),
2685         };
2686
2687         get_block(inode, block, &tmp, 0);
2688         return tmp.b_blocknr;
2689 }
2690 EXPORT_SYMBOL(generic_block_bmap);
2691
2692 static void end_bio_bh_io_sync(struct bio *bio)
2693 {
2694         struct buffer_head *bh = bio->bi_private;
2695
2696         if (unlikely(bio_flagged(bio, BIO_QUIET)))
2697                 set_bit(BH_Quiet, &bh->b_state);
2698
2699         bh->b_end_io(bh, !bio->bi_status);
2700         bio_put(bio);
2701 }
2702
2703 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2704                           struct writeback_control *wbc)
2705 {
2706         const enum req_op op = opf & REQ_OP_MASK;
2707         struct bio *bio;
2708
2709         BUG_ON(!buffer_locked(bh));
2710         BUG_ON(!buffer_mapped(bh));
2711         BUG_ON(!bh->b_end_io);
2712         BUG_ON(buffer_delay(bh));
2713         BUG_ON(buffer_unwritten(bh));
2714
2715         /*
2716          * Only clear out a write error when rewriting
2717          */
2718         if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2719                 clear_buffer_write_io_error(bh);
2720
2721         if (buffer_meta(bh))
2722                 opf |= REQ_META;
2723         if (buffer_prio(bh))
2724                 opf |= REQ_PRIO;
2725
2726         bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2727
2728         fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2729
2730         bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2731
2732         bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2733         BUG_ON(bio->bi_iter.bi_size != bh->b_size);
2734
2735         bio->bi_end_io = end_bio_bh_io_sync;
2736         bio->bi_private = bh;
2737
2738         /* Take care of bh's that straddle the end of the device */
2739         guard_bio_eod(bio);
2740
2741         if (wbc) {
2742                 wbc_init_bio(wbc, bio);
2743                 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2744         }
2745
2746         submit_bio(bio);
2747 }
2748
2749 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2750 {
2751         submit_bh_wbc(opf, bh, NULL);
2752 }
2753 EXPORT_SYMBOL(submit_bh);
2754
2755 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2756 {
2757         lock_buffer(bh);
2758         if (!test_clear_buffer_dirty(bh)) {
2759                 unlock_buffer(bh);
2760                 return;
2761         }
2762         bh->b_end_io = end_buffer_write_sync;
2763         get_bh(bh);
2764         submit_bh(REQ_OP_WRITE | op_flags, bh);
2765 }
2766 EXPORT_SYMBOL(write_dirty_buffer);
2767
2768 /*
2769  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2770  * and then start new I/O and then wait upon it.  The caller must have a ref on
2771  * the buffer_head.
2772  */
2773 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2774 {
2775         WARN_ON(atomic_read(&bh->b_count) < 1);
2776         lock_buffer(bh);
2777         if (test_clear_buffer_dirty(bh)) {
2778                 /*
2779                  * The bh should be mapped, but it might not be if the
2780                  * device was hot-removed. Not much we can do but fail the I/O.
2781                  */
2782                 if (!buffer_mapped(bh)) {
2783                         unlock_buffer(bh);
2784                         return -EIO;
2785                 }
2786
2787                 get_bh(bh);
2788                 bh->b_end_io = end_buffer_write_sync;
2789                 submit_bh(REQ_OP_WRITE | op_flags, bh);
2790                 wait_on_buffer(bh);
2791                 if (!buffer_uptodate(bh))
2792                         return -EIO;
2793         } else {
2794                 unlock_buffer(bh);
2795         }
2796         return 0;
2797 }
2798 EXPORT_SYMBOL(__sync_dirty_buffer);
2799
2800 int sync_dirty_buffer(struct buffer_head *bh)
2801 {
2802         return __sync_dirty_buffer(bh, REQ_SYNC);
2803 }
2804 EXPORT_SYMBOL(sync_dirty_buffer);
2805
2806 /*
2807  * try_to_free_buffers() checks if all the buffers on this particular folio
2808  * are unused, and releases them if so.
2809  *
2810  * Exclusion against try_to_free_buffers may be obtained by either
2811  * locking the folio or by holding its mapping's private_lock.
2812  *
2813  * If the folio is dirty but all the buffers are clean then we need to
2814  * be sure to mark the folio clean as well.  This is because the folio
2815  * may be against a block device, and a later reattachment of buffers
2816  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2817  * filesystem data on the same device.
2818  *
2819  * The same applies to regular filesystem folios: if all the buffers are
2820  * clean then we set the folio clean and proceed.  To do that, we require
2821  * total exclusion from block_dirty_folio().  That is obtained with
2822  * private_lock.
2823  *
2824  * try_to_free_buffers() is non-blocking.
2825  */
2826 static inline int buffer_busy(struct buffer_head *bh)
2827 {
2828         return atomic_read(&bh->b_count) |
2829                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2830 }
2831
2832 static bool
2833 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2834 {
2835         struct buffer_head *head = folio_buffers(folio);
2836         struct buffer_head *bh;
2837
2838         bh = head;
2839         do {
2840                 if (buffer_busy(bh))
2841                         goto failed;
2842                 bh = bh->b_this_page;
2843         } while (bh != head);
2844
2845         do {
2846                 struct buffer_head *next = bh->b_this_page;
2847
2848                 if (bh->b_assoc_map)
2849                         __remove_assoc_queue(bh);
2850                 bh = next;
2851         } while (bh != head);
2852         *buffers_to_free = head;
2853         folio_detach_private(folio);
2854         return true;
2855 failed:
2856         return false;
2857 }
2858
2859 bool try_to_free_buffers(struct folio *folio)
2860 {
2861         struct address_space * const mapping = folio->mapping;
2862         struct buffer_head *buffers_to_free = NULL;
2863         bool ret = 0;
2864
2865         BUG_ON(!folio_test_locked(folio));
2866         if (folio_test_writeback(folio))
2867                 return false;
2868
2869         if (mapping == NULL) {          /* can this still happen? */
2870                 ret = drop_buffers(folio, &buffers_to_free);
2871                 goto out;
2872         }
2873
2874         spin_lock(&mapping->private_lock);
2875         ret = drop_buffers(folio, &buffers_to_free);
2876
2877         /*
2878          * If the filesystem writes its buffers by hand (eg ext3)
2879          * then we can have clean buffers against a dirty folio.  We
2880          * clean the folio here; otherwise the VM will never notice
2881          * that the filesystem did any IO at all.
2882          *
2883          * Also, during truncate, discard_buffer will have marked all
2884          * the folio's buffers clean.  We discover that here and clean
2885          * the folio also.
2886          *
2887          * private_lock must be held over this entire operation in order
2888          * to synchronise against block_dirty_folio and prevent the
2889          * dirty bit from being lost.
2890          */
2891         if (ret)
2892                 folio_cancel_dirty(folio);
2893         spin_unlock(&mapping->private_lock);
2894 out:
2895         if (buffers_to_free) {
2896                 struct buffer_head *bh = buffers_to_free;
2897
2898                 do {
2899                         struct buffer_head *next = bh->b_this_page;
2900                         free_buffer_head(bh);
2901                         bh = next;
2902                 } while (bh != buffers_to_free);
2903         }
2904         return ret;
2905 }
2906 EXPORT_SYMBOL(try_to_free_buffers);
2907
2908 /*
2909  * Buffer-head allocation
2910  */
2911 static struct kmem_cache *bh_cachep __read_mostly;
2912
2913 /*
2914  * Once the number of bh's in the machine exceeds this level, we start
2915  * stripping them in writeback.
2916  */
2917 static unsigned long max_buffer_heads;
2918
2919 int buffer_heads_over_limit;
2920
2921 struct bh_accounting {
2922         int nr;                 /* Number of live bh's */
2923         int ratelimit;          /* Limit cacheline bouncing */
2924 };
2925
2926 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2927
2928 static void recalc_bh_state(void)
2929 {
2930         int i;
2931         int tot = 0;
2932
2933         if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
2934                 return;
2935         __this_cpu_write(bh_accounting.ratelimit, 0);
2936         for_each_online_cpu(i)
2937                 tot += per_cpu(bh_accounting, i).nr;
2938         buffer_heads_over_limit = (tot > max_buffer_heads);
2939 }
2940
2941 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2942 {
2943         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
2944         if (ret) {
2945                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
2946                 spin_lock_init(&ret->b_uptodate_lock);
2947                 preempt_disable();
2948                 __this_cpu_inc(bh_accounting.nr);
2949                 recalc_bh_state();
2950                 preempt_enable();
2951         }
2952         return ret;
2953 }
2954 EXPORT_SYMBOL(alloc_buffer_head);
2955
2956 void free_buffer_head(struct buffer_head *bh)
2957 {
2958         BUG_ON(!list_empty(&bh->b_assoc_buffers));
2959         kmem_cache_free(bh_cachep, bh);
2960         preempt_disable();
2961         __this_cpu_dec(bh_accounting.nr);
2962         recalc_bh_state();
2963         preempt_enable();
2964 }
2965 EXPORT_SYMBOL(free_buffer_head);
2966
2967 static int buffer_exit_cpu_dead(unsigned int cpu)
2968 {
2969         int i;
2970         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2971
2972         for (i = 0; i < BH_LRU_SIZE; i++) {
2973                 brelse(b->bhs[i]);
2974                 b->bhs[i] = NULL;
2975         }
2976         this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
2977         per_cpu(bh_accounting, cpu).nr = 0;
2978         return 0;
2979 }
2980
2981 /**
2982  * bh_uptodate_or_lock - Test whether the buffer is uptodate
2983  * @bh: struct buffer_head
2984  *
2985  * Return true if the buffer is up-to-date and false,
2986  * with the buffer locked, if not.
2987  */
2988 int bh_uptodate_or_lock(struct buffer_head *bh)
2989 {
2990         if (!buffer_uptodate(bh)) {
2991                 lock_buffer(bh);
2992                 if (!buffer_uptodate(bh))
2993                         return 0;
2994                 unlock_buffer(bh);
2995         }
2996         return 1;
2997 }
2998 EXPORT_SYMBOL(bh_uptodate_or_lock);
2999
3000 /**
3001  * __bh_read - Submit read for a locked buffer
3002  * @bh: struct buffer_head
3003  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3004  * @wait: wait until reading finish
3005  *
3006  * Returns zero on success or don't wait, and -EIO on error.
3007  */
3008 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3009 {
3010         int ret = 0;
3011
3012         BUG_ON(!buffer_locked(bh));
3013
3014         get_bh(bh);
3015         bh->b_end_io = end_buffer_read_sync;
3016         submit_bh(REQ_OP_READ | op_flags, bh);
3017         if (wait) {
3018                 wait_on_buffer(bh);
3019                 if (!buffer_uptodate(bh))
3020                         ret = -EIO;
3021         }
3022         return ret;
3023 }
3024 EXPORT_SYMBOL(__bh_read);
3025
3026 /**
3027  * __bh_read_batch - Submit read for a batch of unlocked buffers
3028  * @nr: entry number of the buffer batch
3029  * @bhs: a batch of struct buffer_head
3030  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3031  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3032  *              buffer that cannot lock.
3033  *
3034  * Returns zero on success or don't wait, and -EIO on error.
3035  */
3036 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3037                      blk_opf_t op_flags, bool force_lock)
3038 {
3039         int i;
3040
3041         for (i = 0; i < nr; i++) {
3042                 struct buffer_head *bh = bhs[i];
3043
3044                 if (buffer_uptodate(bh))
3045                         continue;
3046
3047                 if (force_lock)
3048                         lock_buffer(bh);
3049                 else
3050                         if (!trylock_buffer(bh))
3051                                 continue;
3052
3053                 if (buffer_uptodate(bh)) {
3054                         unlock_buffer(bh);
3055                         continue;
3056                 }
3057
3058                 bh->b_end_io = end_buffer_read_sync;
3059                 get_bh(bh);
3060                 submit_bh(REQ_OP_READ | op_flags, bh);
3061         }
3062 }
3063 EXPORT_SYMBOL(__bh_read_batch);
3064
3065 void __init buffer_init(void)
3066 {
3067         unsigned long nrpages;
3068         int ret;
3069
3070         bh_cachep = kmem_cache_create("buffer_head",
3071                         sizeof(struct buffer_head), 0,
3072                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3073                                 SLAB_MEM_SPREAD),
3074                                 NULL);
3075
3076         /*
3077          * Limit the bh occupancy to 10% of ZONE_NORMAL
3078          */
3079         nrpages = (nr_free_buffer_pages() * 10) / 100;
3080         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3081         ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3082                                         NULL, buffer_exit_cpu_dead);
3083         WARN_ON(ret < 0);
3084 }