overlayfs: Implement splice-read
[linux-block.git] / fs / buffer.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/fs/buffer.c
4 *
5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
6 */
7
8/*
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 *
11 * Removed a lot of unnecessary code and simplified things now that
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 *
14 * Speed up hash, lru, and free list operations. Use gfp() for allocating
15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 *
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 *
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20 */
21
1da177e4 22#include <linux/kernel.h>
f361bf4a 23#include <linux/sched/signal.h>
1da177e4
LT
24#include <linux/syscalls.h>
25#include <linux/fs.h>
ae259a9c 26#include <linux/iomap.h>
1da177e4
LT
27#include <linux/mm.h>
28#include <linux/percpu.h>
29#include <linux/slab.h>
16f7e0fe 30#include <linux/capability.h>
1da177e4
LT
31#include <linux/blkdev.h>
32#include <linux/file.h>
33#include <linux/quotaops.h>
34#include <linux/highmem.h>
630d9c47 35#include <linux/export.h>
bafc0dba 36#include <linux/backing-dev.h>
1da177e4
LT
37#include <linux/writeback.h>
38#include <linux/hash.h>
39#include <linux/suspend.h>
40#include <linux/buffer_head.h>
55e829af 41#include <linux/task_io_accounting_ops.h>
1da177e4 42#include <linux/bio.h>
1da177e4
LT
43#include <linux/cpu.h>
44#include <linux/bitops.h>
45#include <linux/mpage.h>
fb1c8f93 46#include <linux/bit_spinlock.h>
29f3ad7d 47#include <linux/pagevec.h>
f745c6f5 48#include <linux/sched/mm.h>
5305cb83 49#include <trace/events/block.h>
31fb992c 50#include <linux/fscrypt.h>
4fa512ce 51#include <linux/fsverity.h>
1da177e4 52
2b211dc0
BD
53#include "internal.h"
54
1da177e4 55static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
5bdf402a
RHI
56static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
57 struct writeback_control *wbc);
1da177e4
LT
58
59#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
60
f0059afd
TH
61inline void touch_buffer(struct buffer_head *bh)
62{
5305cb83 63 trace_block_touch_buffer(bh);
03c5f331 64 folio_mark_accessed(bh->b_folio);
f0059afd
TH
65}
66EXPORT_SYMBOL(touch_buffer);
67
fc9b52cd 68void __lock_buffer(struct buffer_head *bh)
1da177e4 69{
74316201 70 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
1da177e4
LT
71}
72EXPORT_SYMBOL(__lock_buffer);
73
fc9b52cd 74void unlock_buffer(struct buffer_head *bh)
1da177e4 75{
51b07fc3 76 clear_bit_unlock(BH_Lock, &bh->b_state);
4e857c58 77 smp_mb__after_atomic();
1da177e4
LT
78 wake_up_bit(&bh->b_state, BH_Lock);
79}
1fe72eaa 80EXPORT_SYMBOL(unlock_buffer);
1da177e4 81
b4597226 82/*
520f301c
MWO
83 * Returns if the folio has dirty or writeback buffers. If all the buffers
84 * are unlocked and clean then the folio_test_dirty information is stale. If
85 * any of the buffers are locked, it is assumed they are locked for IO.
b4597226 86 */
520f301c 87void buffer_check_dirty_writeback(struct folio *folio,
b4597226
MG
88 bool *dirty, bool *writeback)
89{
90 struct buffer_head *head, *bh;
91 *dirty = false;
92 *writeback = false;
93
520f301c 94 BUG_ON(!folio_test_locked(folio));
b4597226 95
520f301c
MWO
96 head = folio_buffers(folio);
97 if (!head)
b4597226
MG
98 return;
99
520f301c 100 if (folio_test_writeback(folio))
b4597226
MG
101 *writeback = true;
102
b4597226
MG
103 bh = head;
104 do {
105 if (buffer_locked(bh))
106 *writeback = true;
107
108 if (buffer_dirty(bh))
109 *dirty = true;
110
111 bh = bh->b_this_page;
112 } while (bh != head);
113}
114EXPORT_SYMBOL(buffer_check_dirty_writeback);
115
1da177e4
LT
116/*
117 * Block until a buffer comes unlocked. This doesn't stop it
118 * from becoming locked again - you have to lock it yourself
119 * if you want to preserve its state.
120 */
121void __wait_on_buffer(struct buffer_head * bh)
122{
74316201 123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
1da177e4 124}
1fe72eaa 125EXPORT_SYMBOL(__wait_on_buffer);
1da177e4 126
b744c2ac 127static void buffer_io_error(struct buffer_head *bh, char *msg)
1da177e4 128{
432f16e6
RE
129 if (!test_bit(BH_Quiet, &bh->b_state))
130 printk_ratelimited(KERN_ERR
a1c6f057
DM
131 "Buffer I/O error on dev %pg, logical block %llu%s\n",
132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
1da177e4
LT
133}
134
135/*
68671f35
DM
136 * End-of-IO handler helper function which does not touch the bh after
137 * unlocking it.
138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * a race there is benign: unlock_buffer() only use the bh's address for
140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
141 * itself.
1da177e4 142 */
68671f35 143static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
144{
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
70246286 148 /* This happens, due to failed read-ahead attempts. */
1da177e4
LT
149 clear_buffer_uptodate(bh);
150 }
151 unlock_buffer(bh);
68671f35
DM
152}
153
154/*
155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
79f59784 156 * unlock the buffer.
68671f35
DM
157 */
158void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159{
160 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
161 put_bh(bh);
162}
1fe72eaa 163EXPORT_SYMBOL(end_buffer_read_sync);
1da177e4
LT
164
165void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166{
1da177e4
LT
167 if (uptodate) {
168 set_buffer_uptodate(bh);
169 } else {
432f16e6 170 buffer_io_error(bh, ", lost sync page write");
87354e5d 171 mark_buffer_write_io_error(bh);
1da177e4
LT
172 clear_buffer_uptodate(bh);
173 }
174 unlock_buffer(bh);
175 put_bh(bh);
176}
1fe72eaa 177EXPORT_SYMBOL(end_buffer_write_sync);
1da177e4 178
1da177e4
LT
179/*
180 * Various filesystems appear to want __find_get_block to be non-blocking.
181 * But it's the page lock which protects the buffers. To get around this,
182 * we get exclusion from try_to_free_buffers with the blockdev mapping's
183 * private_lock.
184 *
b93b0163 185 * Hack idea: for the blockdev mapping, private_lock contention
1da177e4 186 * may be quite high. This code could TryLock the page, and if that
b93b0163 187 * succeeds, there is no need to take private_lock.
1da177e4
LT
188 */
189static struct buffer_head *
385fd4c5 190__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
191{
192 struct inode *bd_inode = bdev->bd_inode;
193 struct address_space *bd_mapping = bd_inode->i_mapping;
194 struct buffer_head *ret = NULL;
195 pgoff_t index;
196 struct buffer_head *bh;
197 struct buffer_head *head;
198 struct page *page;
199 int all_mapped = 1;
43636c80 200 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
1da177e4 201
09cbfeaf 202 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
2457aec6 203 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
1da177e4
LT
204 if (!page)
205 goto out;
206
207 spin_lock(&bd_mapping->private_lock);
208 if (!page_has_buffers(page))
209 goto out_unlock;
210 head = page_buffers(page);
211 bh = head;
212 do {
97f76d3d
NK
213 if (!buffer_mapped(bh))
214 all_mapped = 0;
215 else if (bh->b_blocknr == block) {
1da177e4
LT
216 ret = bh;
217 get_bh(bh);
218 goto out_unlock;
219 }
1da177e4
LT
220 bh = bh->b_this_page;
221 } while (bh != head);
222
223 /* we might be here because some of the buffers on this page are
224 * not mapped. This is due to various races between
225 * file io on the block device and getblk. It gets dealt with
226 * elsewhere, don't buffer_error if we had some unmapped buffers
227 */
43636c80
TH
228 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
229 if (all_mapped && __ratelimit(&last_warned)) {
230 printk("__find_get_block_slow() failed. block=%llu, "
231 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232 "device %pg blocksize: %d\n",
233 (unsigned long long)block,
234 (unsigned long long)bh->b_blocknr,
235 bh->b_state, bh->b_size, bdev,
236 1 << bd_inode->i_blkbits);
1da177e4
LT
237 }
238out_unlock:
239 spin_unlock(&bd_mapping->private_lock);
09cbfeaf 240 put_page(page);
1da177e4
LT
241out:
242 return ret;
243}
244
1da177e4
LT
245static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
246{
1da177e4 247 unsigned long flags;
a3972203 248 struct buffer_head *first;
1da177e4 249 struct buffer_head *tmp;
2e2dba15
MWO
250 struct folio *folio;
251 int folio_uptodate = 1;
1da177e4
LT
252
253 BUG_ON(!buffer_async_read(bh));
254
2e2dba15 255 folio = bh->b_folio;
1da177e4
LT
256 if (uptodate) {
257 set_buffer_uptodate(bh);
258 } else {
259 clear_buffer_uptodate(bh);
432f16e6 260 buffer_io_error(bh, ", async page read");
2e2dba15 261 folio_set_error(folio);
1da177e4
LT
262 }
263
264 /*
265 * Be _very_ careful from here on. Bad things can happen if
266 * two buffer heads end IO at almost the same time and both
267 * decide that the page is now completely done.
268 */
2e2dba15 269 first = folio_buffers(folio);
f1e67e35 270 spin_lock_irqsave(&first->b_uptodate_lock, flags);
1da177e4
LT
271 clear_buffer_async_read(bh);
272 unlock_buffer(bh);
273 tmp = bh;
274 do {
275 if (!buffer_uptodate(tmp))
2e2dba15 276 folio_uptodate = 0;
1da177e4
LT
277 if (buffer_async_read(tmp)) {
278 BUG_ON(!buffer_locked(tmp));
279 goto still_busy;
280 }
281 tmp = tmp->b_this_page;
282 } while (tmp != bh);
f1e67e35 283 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
1da177e4
LT
284
285 /*
6e8e79fc
MWO
286 * If all of the buffers are uptodate then we can set the page
287 * uptodate.
1da177e4 288 */
2e2dba15
MWO
289 if (folio_uptodate)
290 folio_mark_uptodate(folio);
291 folio_unlock(folio);
1da177e4
LT
292 return;
293
294still_busy:
f1e67e35 295 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
1da177e4
LT
296 return;
297}
298
4fa512ce 299struct postprocess_bh_ctx {
31fb992c
EB
300 struct work_struct work;
301 struct buffer_head *bh;
302};
303
4fa512ce
EB
304static void verify_bh(struct work_struct *work)
305{
306 struct postprocess_bh_ctx *ctx =
307 container_of(work, struct postprocess_bh_ctx, work);
308 struct buffer_head *bh = ctx->bh;
309 bool valid;
310
8b7d3fe9 311 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
4fa512ce
EB
312 end_buffer_async_read(bh, valid);
313 kfree(ctx);
314}
315
316static bool need_fsverity(struct buffer_head *bh)
317{
8b7d3fe9
EB
318 struct folio *folio = bh->b_folio;
319 struct inode *inode = folio->mapping->host;
4fa512ce
EB
320
321 return fsverity_active(inode) &&
322 /* needed by ext4 */
8b7d3fe9 323 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4fa512ce
EB
324}
325
31fb992c
EB
326static void decrypt_bh(struct work_struct *work)
327{
4fa512ce
EB
328 struct postprocess_bh_ctx *ctx =
329 container_of(work, struct postprocess_bh_ctx, work);
31fb992c
EB
330 struct buffer_head *bh = ctx->bh;
331 int err;
332
9c7fb7f7
EB
333 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
334 bh_offset(bh));
4fa512ce
EB
335 if (err == 0 && need_fsverity(bh)) {
336 /*
337 * We use different work queues for decryption and for verity
338 * because verity may require reading metadata pages that need
339 * decryption, and we shouldn't recurse to the same workqueue.
340 */
341 INIT_WORK(&ctx->work, verify_bh);
342 fsverity_enqueue_verify_work(&ctx->work);
343 return;
344 }
31fb992c
EB
345 end_buffer_async_read(bh, err == 0);
346 kfree(ctx);
347}
348
349/*
2c69e205 350 * I/O completion handler for block_read_full_folio() - pages
31fb992c
EB
351 * which come unlocked at the end of I/O.
352 */
353static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
354{
3822a7c4 355 struct inode *inode = bh->b_folio->mapping->host;
4fa512ce
EB
356 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
357 bool verify = need_fsverity(bh);
358
359 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
360 if (uptodate && (decrypt || verify)) {
361 struct postprocess_bh_ctx *ctx =
362 kmalloc(sizeof(*ctx), GFP_ATOMIC);
31fb992c
EB
363
364 if (ctx) {
31fb992c 365 ctx->bh = bh;
4fa512ce
EB
366 if (decrypt) {
367 INIT_WORK(&ctx->work, decrypt_bh);
368 fscrypt_enqueue_decrypt_work(&ctx->work);
369 } else {
370 INIT_WORK(&ctx->work, verify_bh);
371 fsverity_enqueue_verify_work(&ctx->work);
372 }
31fb992c
EB
373 return;
374 }
375 uptodate = 0;
376 }
377 end_buffer_async_read(bh, uptodate);
378}
379
1da177e4
LT
380/*
381 * Completion handler for block_write_full_page() - pages which are unlocked
382 * during I/O, and which have PageWriteback cleared upon I/O completion.
383 */
35c80d5f 384void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4 385{
1da177e4 386 unsigned long flags;
a3972203 387 struct buffer_head *first;
1da177e4 388 struct buffer_head *tmp;
743ed81e 389 struct folio *folio;
1da177e4
LT
390
391 BUG_ON(!buffer_async_write(bh));
392
743ed81e 393 folio = bh->b_folio;
1da177e4
LT
394 if (uptodate) {
395 set_buffer_uptodate(bh);
396 } else {
432f16e6 397 buffer_io_error(bh, ", lost async page write");
87354e5d 398 mark_buffer_write_io_error(bh);
1da177e4 399 clear_buffer_uptodate(bh);
743ed81e 400 folio_set_error(folio);
1da177e4
LT
401 }
402
743ed81e 403 first = folio_buffers(folio);
f1e67e35 404 spin_lock_irqsave(&first->b_uptodate_lock, flags);
a3972203 405
1da177e4
LT
406 clear_buffer_async_write(bh);
407 unlock_buffer(bh);
408 tmp = bh->b_this_page;
409 while (tmp != bh) {
410 if (buffer_async_write(tmp)) {
411 BUG_ON(!buffer_locked(tmp));
412 goto still_busy;
413 }
414 tmp = tmp->b_this_page;
415 }
f1e67e35 416 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
743ed81e 417 folio_end_writeback(folio);
1da177e4
LT
418 return;
419
420still_busy:
f1e67e35 421 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
1da177e4
LT
422 return;
423}
1fe72eaa 424EXPORT_SYMBOL(end_buffer_async_write);
1da177e4
LT
425
426/*
427 * If a page's buffers are under async readin (end_buffer_async_read
428 * completion) then there is a possibility that another thread of
429 * control could lock one of the buffers after it has completed
430 * but while some of the other buffers have not completed. This
431 * locked buffer would confuse end_buffer_async_read() into not unlocking
432 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
433 * that this buffer is not under async I/O.
434 *
435 * The page comes unlocked when it has no locked buffer_async buffers
436 * left.
437 *
438 * PageLocked prevents anyone starting new async I/O reads any of
439 * the buffers.
440 *
441 * PageWriteback is used to prevent simultaneous writeout of the same
442 * page.
443 *
444 * PageLocked prevents anyone from starting writeback of a page which is
445 * under read I/O (PageWriteback is only ever set against a locked page).
446 */
447static void mark_buffer_async_read(struct buffer_head *bh)
448{
31fb992c 449 bh->b_end_io = end_buffer_async_read_io;
1da177e4
LT
450 set_buffer_async_read(bh);
451}
452
1fe72eaa
HS
453static void mark_buffer_async_write_endio(struct buffer_head *bh,
454 bh_end_io_t *handler)
1da177e4 455{
35c80d5f 456 bh->b_end_io = handler;
1da177e4
LT
457 set_buffer_async_write(bh);
458}
35c80d5f
CM
459
460void mark_buffer_async_write(struct buffer_head *bh)
461{
462 mark_buffer_async_write_endio(bh, end_buffer_async_write);
463}
1da177e4
LT
464EXPORT_SYMBOL(mark_buffer_async_write);
465
466
467/*
468 * fs/buffer.c contains helper functions for buffer-backed address space's
469 * fsync functions. A common requirement for buffer-based filesystems is
470 * that certain data from the backing blockdev needs to be written out for
471 * a successful fsync(). For example, ext2 indirect blocks need to be
472 * written back and waited upon before fsync() returns.
473 *
474 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
475 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
476 * management of a list of dependent buffers at ->i_mapping->private_list.
477 *
478 * Locking is a little subtle: try_to_free_buffers() will remove buffers
479 * from their controlling inode's queue when they are being freed. But
480 * try_to_free_buffers() will be operating against the *blockdev* mapping
481 * at the time, not against the S_ISREG file which depends on those buffers.
482 * So the locking for private_list is via the private_lock in the address_space
483 * which backs the buffers. Which is different from the address_space
484 * against which the buffers are listed. So for a particular address_space,
485 * mapping->private_lock does *not* protect mapping->private_list! In fact,
486 * mapping->private_list will always be protected by the backing blockdev's
487 * ->private_lock.
488 *
489 * Which introduces a requirement: all buffers on an address_space's
490 * ->private_list must be from the same address_space: the blockdev's.
491 *
492 * address_spaces which do not place buffers at ->private_list via these
493 * utility functions are free to use private_lock and private_list for
494 * whatever they want. The only requirement is that list_empty(private_list)
495 * be true at clear_inode() time.
496 *
497 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
498 * filesystems should do that. invalidate_inode_buffers() should just go
499 * BUG_ON(!list_empty).
500 *
501 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
502 * take an address_space, not an inode. And it should be called
503 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
504 * queued up.
505 *
506 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
507 * list if it is already on a list. Because if the buffer is on a list,
508 * it *must* already be on the right one. If not, the filesystem is being
509 * silly. This will save a ton of locking. But first we have to ensure
510 * that buffers are taken *off* the old inode's list when they are freed
511 * (presumably in truncate). That requires careful auditing of all
512 * filesystems (do it inside bforget()). It could also be done by bringing
513 * b_inode back.
514 */
515
516/*
517 * The buffer's backing address_space's private_lock must be held
518 */
dbacefc9 519static void __remove_assoc_queue(struct buffer_head *bh)
1da177e4
LT
520{
521 list_del_init(&bh->b_assoc_buffers);
58ff407b 522 WARN_ON(!bh->b_assoc_map);
58ff407b 523 bh->b_assoc_map = NULL;
1da177e4
LT
524}
525
526int inode_has_buffers(struct inode *inode)
527{
528 return !list_empty(&inode->i_data.private_list);
529}
530
531/*
532 * osync is designed to support O_SYNC io. It waits synchronously for
533 * all already-submitted IO to complete, but does not queue any new
534 * writes to the disk.
535 *
79f59784
ZY
536 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
537 * as you dirty the buffers, and then use osync_inode_buffers to wait for
1da177e4
LT
538 * completion. Any other dirty buffers which are not yet queued for
539 * write will not be flushed to disk by the osync.
540 */
541static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
542{
543 struct buffer_head *bh;
544 struct list_head *p;
545 int err = 0;
546
547 spin_lock(lock);
548repeat:
549 list_for_each_prev(p, list) {
550 bh = BH_ENTRY(p);
551 if (buffer_locked(bh)) {
552 get_bh(bh);
553 spin_unlock(lock);
554 wait_on_buffer(bh);
555 if (!buffer_uptodate(bh))
556 err = -EIO;
557 brelse(bh);
558 spin_lock(lock);
559 goto repeat;
560 }
561 }
562 spin_unlock(lock);
563 return err;
564}
565
08fdc8a0 566void emergency_thaw_bdev(struct super_block *sb)
c2d75438 567{
040f04bd 568 while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
a1c6f057 569 printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
01a05b33 570}
c2d75438 571
1da177e4 572/**
78a4a50a 573 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
67be2dd1 574 * @mapping: the mapping which wants those buffers written
1da177e4
LT
575 *
576 * Starts I/O against the buffers at mapping->private_list, and waits upon
577 * that I/O.
578 *
67be2dd1
MW
579 * Basically, this is a convenience function for fsync().
580 * @mapping is a file or directory which needs those buffers to be written for
581 * a successful fsync().
1da177e4
LT
582 */
583int sync_mapping_buffers(struct address_space *mapping)
584{
252aa6f5 585 struct address_space *buffer_mapping = mapping->private_data;
1da177e4
LT
586
587 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
588 return 0;
589
590 return fsync_buffers_list(&buffer_mapping->private_lock,
591 &mapping->private_list);
592}
593EXPORT_SYMBOL(sync_mapping_buffers);
594
595/*
596 * Called when we've recently written block `bblock', and it is known that
597 * `bblock' was for a buffer_boundary() buffer. This means that the block at
598 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
599 * dirty, schedule it for IO. So that indirects merge nicely with their data.
600 */
601void write_boundary_block(struct block_device *bdev,
602 sector_t bblock, unsigned blocksize)
603{
604 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
605 if (bh) {
606 if (buffer_dirty(bh))
e7ea1129 607 write_dirty_buffer(bh, 0);
1da177e4
LT
608 put_bh(bh);
609 }
610}
611
612void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
613{
614 struct address_space *mapping = inode->i_mapping;
abc8a8a2 615 struct address_space *buffer_mapping = bh->b_folio->mapping;
1da177e4
LT
616
617 mark_buffer_dirty(bh);
252aa6f5
RA
618 if (!mapping->private_data) {
619 mapping->private_data = buffer_mapping;
1da177e4 620 } else {
252aa6f5 621 BUG_ON(mapping->private_data != buffer_mapping);
1da177e4 622 }
535ee2fb 623 if (!bh->b_assoc_map) {
1da177e4
LT
624 spin_lock(&buffer_mapping->private_lock);
625 list_move_tail(&bh->b_assoc_buffers,
626 &mapping->private_list);
58ff407b 627 bh->b_assoc_map = mapping;
1da177e4
LT
628 spin_unlock(&buffer_mapping->private_lock);
629 }
630}
631EXPORT_SYMBOL(mark_buffer_dirty_inode);
632
633/*
634 * Add a page to the dirty page list.
635 *
636 * It is a sad fact of life that this function is called from several places
637 * deeply under spinlocking. It may not sleep.
638 *
639 * If the page has buffers, the uptodate buffers are set dirty, to preserve
640 * dirty-state coherency between the page and the buffers. It the page does
641 * not have buffers then when they are later attached they will all be set
642 * dirty.
643 *
644 * The buffers are dirtied before the page is dirtied. There's a small race
645 * window in which a writepage caller may see the page cleanness but not the
646 * buffer dirtiness. That's fine. If this code were to set the page dirty
647 * before the buffers, a concurrent writepage caller could clear the page dirty
648 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
649 * page on the dirty page list.
650 *
651 * We use private_lock to lock against try_to_free_buffers while using the
652 * page's buffer list. Also use this to protect against clean buffers being
653 * added to the page after it was set dirty.
654 *
655 * FIXME: may need to call ->reservepage here as well. That's rather up to the
656 * address_space though.
657 */
e621900a 658bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
1da177e4 659{
e621900a
MWO
660 struct buffer_head *head;
661 bool newly_dirty;
1da177e4
LT
662
663 spin_lock(&mapping->private_lock);
e621900a
MWO
664 head = folio_buffers(folio);
665 if (head) {
1da177e4
LT
666 struct buffer_head *bh = head;
667
668 do {
669 set_buffer_dirty(bh);
670 bh = bh->b_this_page;
671 } while (bh != head);
672 }
c4843a75 673 /*
bcfe06bf 674 * Lock out page's memcg migration to keep PageDirty
81f8c3a4 675 * synchronized with per-memcg dirty page counters.
c4843a75 676 */
e621900a
MWO
677 folio_memcg_lock(folio);
678 newly_dirty = !folio_test_set_dirty(folio);
1da177e4
LT
679 spin_unlock(&mapping->private_lock);
680
a8e7d49a 681 if (newly_dirty)
e621900a 682 __folio_mark_dirty(folio, mapping, 1);
c4843a75 683
e621900a 684 folio_memcg_unlock(folio);
c4843a75
GT
685
686 if (newly_dirty)
687 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
688
a8e7d49a 689 return newly_dirty;
1da177e4 690}
e621900a 691EXPORT_SYMBOL(block_dirty_folio);
1da177e4
LT
692
693/*
694 * Write out and wait upon a list of buffers.
695 *
696 * We have conflicting pressures: we want to make sure that all
697 * initially dirty buffers get waited on, but that any subsequently
698 * dirtied buffers don't. After all, we don't want fsync to last
699 * forever if somebody is actively writing to the file.
700 *
701 * Do this in two main stages: first we copy dirty buffers to a
702 * temporary inode list, queueing the writes as we go. Then we clean
703 * up, waiting for those writes to complete.
704 *
705 * During this second stage, any subsequent updates to the file may end
706 * up refiling the buffer on the original inode's dirty list again, so
707 * there is a chance we will end up with a buffer queued for write but
708 * not yet completed on that list. So, as a final cleanup we go through
709 * the osync code to catch these locked, dirty buffers without requeuing
710 * any newly dirty buffers for write.
711 */
712static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
713{
714 struct buffer_head *bh;
715 struct list_head tmp;
7eaceacc 716 struct address_space *mapping;
1da177e4 717 int err = 0, err2;
4ee2491e 718 struct blk_plug plug;
1da177e4
LT
719
720 INIT_LIST_HEAD(&tmp);
4ee2491e 721 blk_start_plug(&plug);
1da177e4
LT
722
723 spin_lock(lock);
724 while (!list_empty(list)) {
725 bh = BH_ENTRY(list->next);
535ee2fb 726 mapping = bh->b_assoc_map;
58ff407b 727 __remove_assoc_queue(bh);
535ee2fb
JK
728 /* Avoid race with mark_buffer_dirty_inode() which does
729 * a lockless check and we rely on seeing the dirty bit */
730 smp_mb();
1da177e4
LT
731 if (buffer_dirty(bh) || buffer_locked(bh)) {
732 list_add(&bh->b_assoc_buffers, &tmp);
535ee2fb 733 bh->b_assoc_map = mapping;
1da177e4
LT
734 if (buffer_dirty(bh)) {
735 get_bh(bh);
736 spin_unlock(lock);
737 /*
738 * Ensure any pending I/O completes so that
9cb569d6
CH
739 * write_dirty_buffer() actually writes the
740 * current contents - it is a noop if I/O is
741 * still in flight on potentially older
742 * contents.
1da177e4 743 */
70fd7614 744 write_dirty_buffer(bh, REQ_SYNC);
9cf6b720
JA
745
746 /*
747 * Kick off IO for the previous mapping. Note
748 * that we will not run the very last mapping,
749 * wait_on_buffer() will do that for us
750 * through sync_buffer().
751 */
1da177e4
LT
752 brelse(bh);
753 spin_lock(lock);
754 }
755 }
756 }
757
4ee2491e
JA
758 spin_unlock(lock);
759 blk_finish_plug(&plug);
760 spin_lock(lock);
761
1da177e4
LT
762 while (!list_empty(&tmp)) {
763 bh = BH_ENTRY(tmp.prev);
1da177e4 764 get_bh(bh);
535ee2fb
JK
765 mapping = bh->b_assoc_map;
766 __remove_assoc_queue(bh);
767 /* Avoid race with mark_buffer_dirty_inode() which does
768 * a lockless check and we rely on seeing the dirty bit */
769 smp_mb();
770 if (buffer_dirty(bh)) {
771 list_add(&bh->b_assoc_buffers,
e3892296 772 &mapping->private_list);
535ee2fb
JK
773 bh->b_assoc_map = mapping;
774 }
1da177e4
LT
775 spin_unlock(lock);
776 wait_on_buffer(bh);
777 if (!buffer_uptodate(bh))
778 err = -EIO;
779 brelse(bh);
780 spin_lock(lock);
781 }
782
783 spin_unlock(lock);
784 err2 = osync_buffers_list(lock, list);
785 if (err)
786 return err;
787 else
788 return err2;
789}
790
791/*
792 * Invalidate any and all dirty buffers on a given inode. We are
793 * probably unmounting the fs, but that doesn't mean we have already
794 * done a sync(). Just drop the buffers from the inode list.
795 *
796 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
797 * assumes that all the buffers are against the blockdev. Not true
798 * for reiserfs.
799 */
800void invalidate_inode_buffers(struct inode *inode)
801{
802 if (inode_has_buffers(inode)) {
803 struct address_space *mapping = &inode->i_data;
804 struct list_head *list = &mapping->private_list;
252aa6f5 805 struct address_space *buffer_mapping = mapping->private_data;
1da177e4
LT
806
807 spin_lock(&buffer_mapping->private_lock);
808 while (!list_empty(list))
809 __remove_assoc_queue(BH_ENTRY(list->next));
810 spin_unlock(&buffer_mapping->private_lock);
811 }
812}
52b19ac9 813EXPORT_SYMBOL(invalidate_inode_buffers);
1da177e4
LT
814
815/*
816 * Remove any clean buffers from the inode's buffer list. This is called
817 * when we're trying to free the inode itself. Those buffers can pin it.
818 *
819 * Returns true if all buffers were removed.
820 */
821int remove_inode_buffers(struct inode *inode)
822{
823 int ret = 1;
824
825 if (inode_has_buffers(inode)) {
826 struct address_space *mapping = &inode->i_data;
827 struct list_head *list = &mapping->private_list;
252aa6f5 828 struct address_space *buffer_mapping = mapping->private_data;
1da177e4
LT
829
830 spin_lock(&buffer_mapping->private_lock);
831 while (!list_empty(list)) {
832 struct buffer_head *bh = BH_ENTRY(list->next);
833 if (buffer_dirty(bh)) {
834 ret = 0;
835 break;
836 }
837 __remove_assoc_queue(bh);
838 }
839 spin_unlock(&buffer_mapping->private_lock);
840 }
841 return ret;
842}
843
844/*
c71124a8 845 * Create the appropriate buffers when given a folio for data area and
1da177e4
LT
846 * the size of each buffer.. Use the bh->b_this_page linked list to
847 * follow the buffers created. Return NULL if unable to create more
848 * buffers.
849 *
850 * The retry flag is used to differentiate async IO (paging, swapping)
851 * which may not fail from ordinary buffer allocations.
852 */
c71124a8
PR
853struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
854 bool retry)
1da177e4
LT
855{
856 struct buffer_head *bh, *head;
f745c6f5 857 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
1da177e4 858 long offset;
b87d8cef 859 struct mem_cgroup *memcg, *old_memcg;
1da177e4 860
640ab98f
JA
861 if (retry)
862 gfp |= __GFP_NOFAIL;
863
c71124a8
PR
864 /* The folio lock pins the memcg */
865 memcg = folio_memcg(folio);
b87d8cef 866 old_memcg = set_active_memcg(memcg);
f745c6f5 867
1da177e4 868 head = NULL;
c71124a8 869 offset = folio_size(folio);
1da177e4 870 while ((offset -= size) >= 0) {
640ab98f 871 bh = alloc_buffer_head(gfp);
1da177e4
LT
872 if (!bh)
873 goto no_grow;
874
1da177e4
LT
875 bh->b_this_page = head;
876 bh->b_blocknr = -1;
877 head = bh;
878
1da177e4
LT
879 bh->b_size = size;
880
c71124a8
PR
881 /* Link the buffer to its folio */
882 folio_set_bh(bh, folio, offset);
1da177e4 883 }
f745c6f5 884out:
b87d8cef 885 set_active_memcg(old_memcg);
1da177e4
LT
886 return head;
887/*
888 * In case anything failed, we just free everything we got.
889 */
890no_grow:
891 if (head) {
892 do {
893 bh = head;
894 head = head->b_this_page;
895 free_buffer_head(bh);
896 } while (head);
897 }
898
f745c6f5 899 goto out;
1da177e4 900}
c71124a8
PR
901EXPORT_SYMBOL_GPL(folio_alloc_buffers);
902
903struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
904 bool retry)
905{
906 return folio_alloc_buffers(page_folio(page), size, retry);
907}
1da177e4
LT
908EXPORT_SYMBOL_GPL(alloc_page_buffers);
909
910static inline void
911link_dev_buffers(struct page *page, struct buffer_head *head)
912{
913 struct buffer_head *bh, *tail;
914
915 bh = head;
916 do {
917 tail = bh;
918 bh = bh->b_this_page;
919 } while (bh);
920 tail->b_this_page = head;
45dcfc27 921 attach_page_private(page, head);
1da177e4
LT
922}
923
bbec0270
LT
924static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
925{
926 sector_t retval = ~((sector_t)0);
b86058f9 927 loff_t sz = bdev_nr_bytes(bdev);
bbec0270
LT
928
929 if (sz) {
930 unsigned int sizebits = blksize_bits(size);
931 retval = (sz >> sizebits);
932 }
933 return retval;
934}
935
1da177e4
LT
936/*
937 * Initialise the state of a blockdev page's buffers.
938 */
676ce6d5 939static sector_t
1da177e4
LT
940init_page_buffers(struct page *page, struct block_device *bdev,
941 sector_t block, int size)
942{
943 struct buffer_head *head = page_buffers(page);
944 struct buffer_head *bh = head;
945 int uptodate = PageUptodate(page);
bcd1d063 946 sector_t end_block = blkdev_max_block(bdev, size);
1da177e4
LT
947
948 do {
949 if (!buffer_mapped(bh)) {
01950a34
EB
950 bh->b_end_io = NULL;
951 bh->b_private = NULL;
1da177e4
LT
952 bh->b_bdev = bdev;
953 bh->b_blocknr = block;
954 if (uptodate)
955 set_buffer_uptodate(bh);
080399aa
JM
956 if (block < end_block)
957 set_buffer_mapped(bh);
1da177e4
LT
958 }
959 block++;
960 bh = bh->b_this_page;
961 } while (bh != head);
676ce6d5
HD
962
963 /*
964 * Caller needs to validate requested block against end of device.
965 */
966 return end_block;
1da177e4
LT
967}
968
969/*
970 * Create the page-cache page that contains the requested block.
971 *
676ce6d5 972 * This is used purely for blockdev mappings.
1da177e4 973 */
676ce6d5 974static int
1da177e4 975grow_dev_page(struct block_device *bdev, sector_t block,
3b5e6454 976 pgoff_t index, int size, int sizebits, gfp_t gfp)
1da177e4
LT
977{
978 struct inode *inode = bdev->bd_inode;
979 struct page *page;
980 struct buffer_head *bh;
676ce6d5 981 sector_t end_block;
c4b4c2a7 982 int ret = 0;
84235de3 983 gfp_t gfp_mask;
1da177e4 984
c62d2555 985 gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
3b5e6454 986
84235de3
JW
987 /*
988 * XXX: __getblk_slow() can not really deal with failure and
989 * will endlessly loop on improvised global reclaim. Prefer
990 * looping in the allocator rather than here, at least that
991 * code knows what it's doing.
992 */
993 gfp_mask |= __GFP_NOFAIL;
994
995 page = find_or_create_page(inode->i_mapping, index, gfp_mask);
1da177e4 996
e827f923 997 BUG_ON(!PageLocked(page));
1da177e4
LT
998
999 if (page_has_buffers(page)) {
1000 bh = page_buffers(page);
1001 if (bh->b_size == size) {
676ce6d5 1002 end_block = init_page_buffers(page, bdev,
f2d5a944
AA
1003 (sector_t)index << sizebits,
1004 size);
676ce6d5 1005 goto done;
1da177e4 1006 }
68189fef 1007 if (!try_to_free_buffers(page_folio(page)))
1da177e4
LT
1008 goto failed;
1009 }
1010
1011 /*
1012 * Allocate some buffers for this page
1013 */
94dc24c0 1014 bh = alloc_page_buffers(page, size, true);
1da177e4
LT
1015
1016 /*
1017 * Link the page to the buffers and initialise them. Take the
1018 * lock to be atomic wrt __find_get_block(), which does not
1019 * run under the page lock.
1020 */
1021 spin_lock(&inode->i_mapping->private_lock);
1022 link_dev_buffers(page, bh);
f2d5a944
AA
1023 end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
1024 size);
1da177e4 1025 spin_unlock(&inode->i_mapping->private_lock);
676ce6d5
HD
1026done:
1027 ret = (block < end_block) ? 1 : -ENXIO;
1da177e4 1028failed:
1da177e4 1029 unlock_page(page);
09cbfeaf 1030 put_page(page);
676ce6d5 1031 return ret;
1da177e4
LT
1032}
1033
1034/*
1035 * Create buffers for the specified block device block's page. If
1036 * that page was dirty, the buffers are set dirty also.
1da177e4 1037 */
858119e1 1038static int
3b5e6454 1039grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1da177e4 1040{
1da177e4
LT
1041 pgoff_t index;
1042 int sizebits;
1043
90432e60 1044 sizebits = PAGE_SHIFT - __ffs(size);
1da177e4 1045 index = block >> sizebits;
1da177e4 1046
e5657933
AM
1047 /*
1048 * Check for a block which wants to lie outside our maximum possible
1049 * pagecache index. (this comparison is done using sector_t types).
1050 */
1051 if (unlikely(index != block >> sizebits)) {
e5657933 1052 printk(KERN_ERR "%s: requested out-of-range block %llu for "
a1c6f057 1053 "device %pg\n",
8e24eea7 1054 __func__, (unsigned long long)block,
a1c6f057 1055 bdev);
e5657933
AM
1056 return -EIO;
1057 }
676ce6d5 1058
1da177e4 1059 /* Create a page with the proper size buffers.. */
3b5e6454 1060 return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1da177e4
LT
1061}
1062
0026ba40 1063static struct buffer_head *
3b5e6454
GK
1064__getblk_slow(struct block_device *bdev, sector_t block,
1065 unsigned size, gfp_t gfp)
1da177e4
LT
1066{
1067 /* Size must be multiple of hard sectorsize */
e1defc4f 1068 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1da177e4
LT
1069 (size < 512 || size > PAGE_SIZE))) {
1070 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1071 size);
e1defc4f
MP
1072 printk(KERN_ERR "logical block size: %d\n",
1073 bdev_logical_block_size(bdev));
1da177e4
LT
1074
1075 dump_stack();
1076 return NULL;
1077 }
1078
676ce6d5
HD
1079 for (;;) {
1080 struct buffer_head *bh;
1081 int ret;
1da177e4
LT
1082
1083 bh = __find_get_block(bdev, block, size);
1084 if (bh)
1085 return bh;
676ce6d5 1086
3b5e6454 1087 ret = grow_buffers(bdev, block, size, gfp);
676ce6d5
HD
1088 if (ret < 0)
1089 return NULL;
1da177e4
LT
1090 }
1091}
1092
1093/*
1094 * The relationship between dirty buffers and dirty pages:
1095 *
1096 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
ec82e1c1 1097 * the page is tagged dirty in the page cache.
1da177e4
LT
1098 *
1099 * At all times, the dirtiness of the buffers represents the dirtiness of
1100 * subsections of the page. If the page has buffers, the page dirty bit is
1101 * merely a hint about the true dirty state.
1102 *
1103 * When a page is set dirty in its entirety, all its buffers are marked dirty
1104 * (if the page has buffers).
1105 *
1106 * When a buffer is marked dirty, its page is dirtied, but the page's other
1107 * buffers are not.
1108 *
1109 * Also. When blockdev buffers are explicitly read with bread(), they
1110 * individually become uptodate. But their backing page remains not
1111 * uptodate - even if all of its buffers are uptodate. A subsequent
2c69e205
MWO
1112 * block_read_full_folio() against that folio will discover all the uptodate
1113 * buffers, will set the folio uptodate and will perform no I/O.
1da177e4
LT
1114 */
1115
1116/**
1117 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1118 * @bh: the buffer_head to mark dirty
1da177e4 1119 *
ec82e1c1
MW
1120 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1121 * its backing page dirty, then tag the page as dirty in the page cache
1122 * and then attach the address_space's inode to its superblock's dirty
1da177e4
LT
1123 * inode list.
1124 *
abc8a8a2 1125 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock,
b93b0163 1126 * i_pages lock and mapping->host->i_lock.
1da177e4 1127 */
fc9b52cd 1128void mark_buffer_dirty(struct buffer_head *bh)
1da177e4 1129{
787d2214 1130 WARN_ON_ONCE(!buffer_uptodate(bh));
1be62dc1 1131
5305cb83
TH
1132 trace_block_dirty_buffer(bh);
1133
1be62dc1
LT
1134 /*
1135 * Very *carefully* optimize the it-is-already-dirty case.
1136 *
1137 * Don't let the final "is it dirty" escape to before we
1138 * perhaps modified the buffer.
1139 */
1140 if (buffer_dirty(bh)) {
1141 smp_mb();
1142 if (buffer_dirty(bh))
1143 return;
1144 }
1145
a8e7d49a 1146 if (!test_set_buffer_dirty(bh)) {
cf1d3417 1147 struct folio *folio = bh->b_folio;
c4843a75 1148 struct address_space *mapping = NULL;
c4843a75 1149
cf1d3417
MWO
1150 folio_memcg_lock(folio);
1151 if (!folio_test_set_dirty(folio)) {
1152 mapping = folio->mapping;
8e9d78ed 1153 if (mapping)
cf1d3417 1154 __folio_mark_dirty(folio, mapping, 0);
8e9d78ed 1155 }
cf1d3417 1156 folio_memcg_unlock(folio);
c4843a75
GT
1157 if (mapping)
1158 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
a8e7d49a 1159 }
1da177e4 1160}
1fe72eaa 1161EXPORT_SYMBOL(mark_buffer_dirty);
1da177e4 1162
87354e5d
JL
1163void mark_buffer_write_io_error(struct buffer_head *bh)
1164{
485e9605
JL
1165 struct super_block *sb;
1166
87354e5d
JL
1167 set_buffer_write_io_error(bh);
1168 /* FIXME: do we need to set this in both places? */
abc8a8a2
MWO
1169 if (bh->b_folio && bh->b_folio->mapping)
1170 mapping_set_error(bh->b_folio->mapping, -EIO);
87354e5d
JL
1171 if (bh->b_assoc_map)
1172 mapping_set_error(bh->b_assoc_map, -EIO);
485e9605
JL
1173 rcu_read_lock();
1174 sb = READ_ONCE(bh->b_bdev->bd_super);
1175 if (sb)
1176 errseq_set(&sb->s_wb_err, -EIO);
1177 rcu_read_unlock();
87354e5d
JL
1178}
1179EXPORT_SYMBOL(mark_buffer_write_io_error);
1180
1da177e4
LT
1181/*
1182 * Decrement a buffer_head's reference count. If all buffers against a page
1183 * have zero reference count, are clean and unlocked, and if the page is clean
1184 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1185 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1186 * a page but it ends up not being freed, and buffers may later be reattached).
1187 */
1188void __brelse(struct buffer_head * buf)
1189{
1190 if (atomic_read(&buf->b_count)) {
1191 put_bh(buf);
1192 return;
1193 }
5c752ad9 1194 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1da177e4 1195}
1fe72eaa 1196EXPORT_SYMBOL(__brelse);
1da177e4
LT
1197
1198/*
1199 * bforget() is like brelse(), except it discards any
1200 * potentially dirty data.
1201 */
1202void __bforget(struct buffer_head *bh)
1203{
1204 clear_buffer_dirty(bh);
535ee2fb 1205 if (bh->b_assoc_map) {
abc8a8a2 1206 struct address_space *buffer_mapping = bh->b_folio->mapping;
1da177e4
LT
1207
1208 spin_lock(&buffer_mapping->private_lock);
1209 list_del_init(&bh->b_assoc_buffers);
58ff407b 1210 bh->b_assoc_map = NULL;
1da177e4
LT
1211 spin_unlock(&buffer_mapping->private_lock);
1212 }
1213 __brelse(bh);
1214}
1fe72eaa 1215EXPORT_SYMBOL(__bforget);
1da177e4
LT
1216
1217static struct buffer_head *__bread_slow(struct buffer_head *bh)
1218{
1219 lock_buffer(bh);
1220 if (buffer_uptodate(bh)) {
1221 unlock_buffer(bh);
1222 return bh;
1223 } else {
1224 get_bh(bh);
1225 bh->b_end_io = end_buffer_read_sync;
1420c4a5 1226 submit_bh(REQ_OP_READ, bh);
1da177e4
LT
1227 wait_on_buffer(bh);
1228 if (buffer_uptodate(bh))
1229 return bh;
1230 }
1231 brelse(bh);
1232 return NULL;
1233}
1234
1235/*
1236 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1237 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1238 * refcount elevated by one when they're in an LRU. A buffer can only appear
1239 * once in a particular CPU's LRU. A single buffer can be present in multiple
1240 * CPU's LRUs at the same time.
1241 *
1242 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1243 * sb_find_get_block().
1244 *
1245 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1246 * a local interrupt disable for that.
1247 */
1248
86cf78d7 1249#define BH_LRU_SIZE 16
1da177e4
LT
1250
1251struct bh_lru {
1252 struct buffer_head *bhs[BH_LRU_SIZE];
1253};
1254
1255static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1256
1257#ifdef CONFIG_SMP
1258#define bh_lru_lock() local_irq_disable()
1259#define bh_lru_unlock() local_irq_enable()
1260#else
1261#define bh_lru_lock() preempt_disable()
1262#define bh_lru_unlock() preempt_enable()
1263#endif
1264
1265static inline void check_irqs_on(void)
1266{
1267#ifdef irqs_disabled
1268 BUG_ON(irqs_disabled());
1269#endif
1270}
1271
1272/*
241f01fb
EB
1273 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1274 * inserted at the front, and the buffer_head at the back if any is evicted.
1275 * Or, if already in the LRU it is moved to the front.
1da177e4
LT
1276 */
1277static void bh_lru_install(struct buffer_head *bh)
1278{
241f01fb
EB
1279 struct buffer_head *evictee = bh;
1280 struct bh_lru *b;
1281 int i;
1da177e4
LT
1282
1283 check_irqs_on();
c0226eb8
MK
1284 bh_lru_lock();
1285
8cc621d2
MK
1286 /*
1287 * the refcount of buffer_head in bh_lru prevents dropping the
1288 * attached page(i.e., try_to_free_buffers) so it could cause
1289 * failing page migration.
1290 * Skip putting upcoming bh into bh_lru until migration is done.
1291 */
c0226eb8
MK
1292 if (lru_cache_disabled()) {
1293 bh_lru_unlock();
8cc621d2 1294 return;
c0226eb8 1295 }
1da177e4 1296
241f01fb
EB
1297 b = this_cpu_ptr(&bh_lrus);
1298 for (i = 0; i < BH_LRU_SIZE; i++) {
1299 swap(evictee, b->bhs[i]);
1300 if (evictee == bh) {
1301 bh_lru_unlock();
1302 return;
1da177e4 1303 }
1da177e4 1304 }
1da177e4 1305
241f01fb
EB
1306 get_bh(bh);
1307 bh_lru_unlock();
1308 brelse(evictee);
1da177e4
LT
1309}
1310
1311/*
1312 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1313 */
858119e1 1314static struct buffer_head *
3991d3bd 1315lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1316{
1317 struct buffer_head *ret = NULL;
3991d3bd 1318 unsigned int i;
1da177e4
LT
1319
1320 check_irqs_on();
1321 bh_lru_lock();
1da177e4 1322 for (i = 0; i < BH_LRU_SIZE; i++) {
c7b92516 1323 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1da177e4 1324
9470dd5d
ZB
1325 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1326 bh->b_size == size) {
1da177e4
LT
1327 if (i) {
1328 while (i) {
c7b92516
CL
1329 __this_cpu_write(bh_lrus.bhs[i],
1330 __this_cpu_read(bh_lrus.bhs[i - 1]));
1da177e4
LT
1331 i--;
1332 }
c7b92516 1333 __this_cpu_write(bh_lrus.bhs[0], bh);
1da177e4
LT
1334 }
1335 get_bh(bh);
1336 ret = bh;
1337 break;
1338 }
1339 }
1340 bh_lru_unlock();
1341 return ret;
1342}
1343
1344/*
1345 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1346 * it in the LRU and mark it as accessed. If it is not present then return
1347 * NULL
1348 */
1349struct buffer_head *
3991d3bd 1350__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1351{
1352 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1353
1354 if (bh == NULL) {
2457aec6 1355 /* __find_get_block_slow will mark the page accessed */
385fd4c5 1356 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1357 if (bh)
1358 bh_lru_install(bh);
2457aec6 1359 } else
1da177e4 1360 touch_buffer(bh);
2457aec6 1361
1da177e4
LT
1362 return bh;
1363}
1364EXPORT_SYMBOL(__find_get_block);
1365
1366/*
3b5e6454 1367 * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1da177e4
LT
1368 * which corresponds to the passed block_device, block and size. The
1369 * returned buffer has its reference count incremented.
1370 *
3b5e6454
GK
1371 * __getblk_gfp() will lock up the machine if grow_dev_page's
1372 * try_to_free_buffers() attempt is failing. FIXME, perhaps?
1da177e4
LT
1373 */
1374struct buffer_head *
3b5e6454
GK
1375__getblk_gfp(struct block_device *bdev, sector_t block,
1376 unsigned size, gfp_t gfp)
1da177e4
LT
1377{
1378 struct buffer_head *bh = __find_get_block(bdev, block, size);
1379
1380 might_sleep();
1381 if (bh == NULL)
3b5e6454 1382 bh = __getblk_slow(bdev, block, size, gfp);
1da177e4
LT
1383 return bh;
1384}
3b5e6454 1385EXPORT_SYMBOL(__getblk_gfp);
1da177e4
LT
1386
1387/*
1388 * Do async read-ahead on a buffer..
1389 */
3991d3bd 1390void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1391{
1392 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5 1393 if (likely(bh)) {
e7ea1129 1394 bh_readahead(bh, REQ_RAHEAD);
a3e713b5
AM
1395 brelse(bh);
1396 }
1da177e4
LT
1397}
1398EXPORT_SYMBOL(__breadahead);
1399
1400/**
3b5e6454 1401 * __bread_gfp() - reads a specified block and returns the bh
67be2dd1 1402 * @bdev: the block_device to read from
1da177e4
LT
1403 * @block: number of block
1404 * @size: size (in bytes) to read
3b5e6454
GK
1405 * @gfp: page allocation flag
1406 *
1da177e4 1407 * Reads a specified block, and returns buffer head that contains it.
3b5e6454
GK
1408 * The page cache can be allocated from non-movable area
1409 * not to prevent page migration if you set gfp to zero.
1da177e4
LT
1410 * It returns NULL if the block was unreadable.
1411 */
1412struct buffer_head *
3b5e6454
GK
1413__bread_gfp(struct block_device *bdev, sector_t block,
1414 unsigned size, gfp_t gfp)
1da177e4 1415{
3b5e6454 1416 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1da177e4 1417
a3e713b5 1418 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1419 bh = __bread_slow(bh);
1420 return bh;
1421}
3b5e6454 1422EXPORT_SYMBOL(__bread_gfp);
1da177e4 1423
8cc621d2
MK
1424static void __invalidate_bh_lrus(struct bh_lru *b)
1425{
1426 int i;
1427
1428 for (i = 0; i < BH_LRU_SIZE; i++) {
1429 brelse(b->bhs[i]);
1430 b->bhs[i] = NULL;
1431 }
1432}
1da177e4
LT
1433/*
1434 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1435 * This doesn't race because it runs in each cpu either in irq
1436 * or with preempt disabled.
1437 */
1438static void invalidate_bh_lru(void *arg)
1439{
1440 struct bh_lru *b = &get_cpu_var(bh_lrus);
1da177e4 1441
8cc621d2 1442 __invalidate_bh_lrus(b);
1da177e4
LT
1443 put_cpu_var(bh_lrus);
1444}
42be35d0 1445
8cc621d2 1446bool has_bh_in_lru(int cpu, void *dummy)
42be35d0
GBY
1447{
1448 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1449 int i;
1da177e4 1450
42be35d0
GBY
1451 for (i = 0; i < BH_LRU_SIZE; i++) {
1452 if (b->bhs[i])
1d706679 1453 return true;
42be35d0
GBY
1454 }
1455
1d706679 1456 return false;
42be35d0
GBY
1457}
1458
f9a14399 1459void invalidate_bh_lrus(void)
1da177e4 1460{
cb923159 1461 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1da177e4 1462}
9db5579b 1463EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1da177e4 1464
243418e3
MK
1465/*
1466 * It's called from workqueue context so we need a bh_lru_lock to close
1467 * the race with preemption/irq.
1468 */
1469void invalidate_bh_lrus_cpu(void)
8cc621d2
MK
1470{
1471 struct bh_lru *b;
1472
1473 bh_lru_lock();
243418e3 1474 b = this_cpu_ptr(&bh_lrus);
8cc621d2
MK
1475 __invalidate_bh_lrus(b);
1476 bh_lru_unlock();
1477}
1478
1da177e4
LT
1479void set_bh_page(struct buffer_head *bh,
1480 struct page *page, unsigned long offset)
1481{
1482 bh->b_page = page;
e827f923 1483 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1484 if (PageHighMem(page))
1485 /*
1486 * This catches illegal uses and preserves the offset:
1487 */
1488 bh->b_data = (char *)(0 + offset);
1489 else
1490 bh->b_data = page_address(page) + offset;
1491}
1492EXPORT_SYMBOL(set_bh_page);
1493
465e5e6a
PR
1494void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1495 unsigned long offset)
1496{
1497 bh->b_folio = folio;
1498 BUG_ON(offset >= folio_size(folio));
1499 if (folio_test_highmem(folio))
1500 /*
1501 * This catches illegal uses and preserves the offset:
1502 */
1503 bh->b_data = (char *)(0 + offset);
1504 else
1505 bh->b_data = folio_address(folio) + offset;
1506}
1507EXPORT_SYMBOL(folio_set_bh);
1508
1da177e4
LT
1509/*
1510 * Called when truncating a buffer on a page completely.
1511 */
e7470ee8
MG
1512
1513/* Bits that are cleared during an invalidate */
1514#define BUFFER_FLAGS_DISCARD \
1515 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1516 1 << BH_Delay | 1 << BH_Unwritten)
1517
858119e1 1518static void discard_buffer(struct buffer_head * bh)
1da177e4 1519{
b0192296 1520 unsigned long b_state;
e7470ee8 1521
1da177e4
LT
1522 lock_buffer(bh);
1523 clear_buffer_dirty(bh);
1524 bh->b_bdev = NULL;
b0192296
UB
1525 b_state = READ_ONCE(bh->b_state);
1526 do {
1527 } while (!try_cmpxchg(&bh->b_state, &b_state,
1528 b_state & ~BUFFER_FLAGS_DISCARD));
1da177e4
LT
1529 unlock_buffer(bh);
1530}
1531
1da177e4 1532/**
7ba13abb
MWO
1533 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1534 * @folio: The folio which is affected.
d47992f8
LC
1535 * @offset: start of the range to invalidate
1536 * @length: length of the range to invalidate
1da177e4 1537 *
7ba13abb 1538 * block_invalidate_folio() is called when all or part of the folio has been
814e1d25 1539 * invalidated by a truncate operation.
1da177e4 1540 *
7ba13abb 1541 * block_invalidate_folio() does not have to release all buffers, but it must
1da177e4
LT
1542 * ensure that no dirty buffer is left outside @offset and that no I/O
1543 * is underway against any of the blocks which are outside the truncation
1544 * point. Because the caller is about to free (and possibly reuse) those
1545 * blocks on-disk.
1546 */
7ba13abb 1547void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1da177e4
LT
1548{
1549 struct buffer_head *head, *bh, *next;
7ba13abb
MWO
1550 size_t curr_off = 0;
1551 size_t stop = length + offset;
1da177e4 1552
7ba13abb 1553 BUG_ON(!folio_test_locked(folio));
1da177e4 1554
d47992f8
LC
1555 /*
1556 * Check for overflow
1557 */
7ba13abb
MWO
1558 BUG_ON(stop > folio_size(folio) || stop < length);
1559
1560 head = folio_buffers(folio);
1561 if (!head)
1562 return;
d47992f8 1563
1da177e4
LT
1564 bh = head;
1565 do {
7ba13abb 1566 size_t next_off = curr_off + bh->b_size;
1da177e4
LT
1567 next = bh->b_this_page;
1568
d47992f8
LC
1569 /*
1570 * Are we still fully in range ?
1571 */
1572 if (next_off > stop)
1573 goto out;
1574
1da177e4
LT
1575 /*
1576 * is this block fully invalidated?
1577 */
1578 if (offset <= curr_off)
1579 discard_buffer(bh);
1580 curr_off = next_off;
1581 bh = next;
1582 } while (bh != head);
1583
1584 /*
7ba13abb 1585 * We release buffers only if the entire folio is being invalidated.
1da177e4
LT
1586 * The get_block cached value has been unconditionally invalidated,
1587 * so real IO is not possible anymore.
1588 */
7ba13abb
MWO
1589 if (length == folio_size(folio))
1590 filemap_release_folio(folio, 0);
1da177e4 1591out:
2ff28e22 1592 return;
1da177e4 1593}
7ba13abb 1594EXPORT_SYMBOL(block_invalidate_folio);
1da177e4
LT
1595
1596/*
1597 * We attach and possibly dirty the buffers atomically wrt
e621900a 1598 * block_dirty_folio() via private_lock. try_to_free_buffers
8e2e1756 1599 * is already excluded via the folio lock.
1da177e4 1600 */
8e2e1756
PR
1601void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
1602 unsigned long b_state)
1da177e4
LT
1603{
1604 struct buffer_head *bh, *head, *tail;
1605
8e2e1756 1606 head = folio_alloc_buffers(folio, blocksize, true);
1da177e4
LT
1607 bh = head;
1608 do {
1609 bh->b_state |= b_state;
1610 tail = bh;
1611 bh = bh->b_this_page;
1612 } while (bh);
1613 tail->b_this_page = head;
1614
8e2e1756
PR
1615 spin_lock(&folio->mapping->private_lock);
1616 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1da177e4
LT
1617 bh = head;
1618 do {
8e2e1756 1619 if (folio_test_dirty(folio))
1da177e4 1620 set_buffer_dirty(bh);
8e2e1756 1621 if (folio_test_uptodate(folio))
1da177e4
LT
1622 set_buffer_uptodate(bh);
1623 bh = bh->b_this_page;
1624 } while (bh != head);
1625 }
8e2e1756
PR
1626 folio_attach_private(folio, head);
1627 spin_unlock(&folio->mapping->private_lock);
1628}
1629EXPORT_SYMBOL(folio_create_empty_buffers);
1630
1631void create_empty_buffers(struct page *page,
1632 unsigned long blocksize, unsigned long b_state)
1633{
1634 folio_create_empty_buffers(page_folio(page), blocksize, b_state);
1da177e4
LT
1635}
1636EXPORT_SYMBOL(create_empty_buffers);
1637
29f3ad7d
JK
1638/**
1639 * clean_bdev_aliases: clean a range of buffers in block device
1640 * @bdev: Block device to clean buffers in
1641 * @block: Start of a range of blocks to clean
1642 * @len: Number of blocks to clean
1da177e4 1643 *
29f3ad7d
JK
1644 * We are taking a range of blocks for data and we don't want writeback of any
1645 * buffer-cache aliases starting from return from this function and until the
1646 * moment when something will explicitly mark the buffer dirty (hopefully that
1647 * will not happen until we will free that block ;-) We don't even need to mark
1648 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1649 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1650 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1651 * would confuse anyone who might pick it with bread() afterwards...
1652 *
1653 * Also.. Note that bforget() doesn't lock the buffer. So there can be
1654 * writeout I/O going on against recently-freed buffers. We don't wait on that
1655 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1656 * need to. That happens here.
1da177e4 1657 */
29f3ad7d 1658void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1da177e4 1659{
29f3ad7d
JK
1660 struct inode *bd_inode = bdev->bd_inode;
1661 struct address_space *bd_mapping = bd_inode->i_mapping;
9e0b6f31 1662 struct folio_batch fbatch;
29f3ad7d
JK
1663 pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
1664 pgoff_t end;
c10f778d 1665 int i, count;
29f3ad7d
JK
1666 struct buffer_head *bh;
1667 struct buffer_head *head;
1da177e4 1668
29f3ad7d 1669 end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
9e0b6f31
MWO
1670 folio_batch_init(&fbatch);
1671 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1672 count = folio_batch_count(&fbatch);
c10f778d 1673 for (i = 0; i < count; i++) {
9e0b6f31 1674 struct folio *folio = fbatch.folios[i];
1da177e4 1675
9e0b6f31 1676 if (!folio_buffers(folio))
29f3ad7d
JK
1677 continue;
1678 /*
9e0b6f31 1679 * We use folio lock instead of bd_mapping->private_lock
29f3ad7d
JK
1680 * to pin buffers here since we can afford to sleep and
1681 * it scales better than a global spinlock lock.
1682 */
9e0b6f31
MWO
1683 folio_lock(folio);
1684 /* Recheck when the folio is locked which pins bhs */
1685 head = folio_buffers(folio);
1686 if (!head)
29f3ad7d 1687 goto unlock_page;
29f3ad7d
JK
1688 bh = head;
1689 do {
6c006a9d 1690 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
29f3ad7d
JK
1691 goto next;
1692 if (bh->b_blocknr >= block + len)
1693 break;
1694 clear_buffer_dirty(bh);
1695 wait_on_buffer(bh);
1696 clear_buffer_req(bh);
1697next:
1698 bh = bh->b_this_page;
1699 } while (bh != head);
1700unlock_page:
9e0b6f31 1701 folio_unlock(folio);
29f3ad7d 1702 }
9e0b6f31 1703 folio_batch_release(&fbatch);
29f3ad7d 1704 cond_resched();
c10f778d
JK
1705 /* End of range already reached? */
1706 if (index > end || !index)
1707 break;
1da177e4
LT
1708 }
1709}
29f3ad7d 1710EXPORT_SYMBOL(clean_bdev_aliases);
1da177e4 1711
45bce8f3
LT
1712/*
1713 * Size is a power-of-two in the range 512..PAGE_SIZE,
1714 * and the case we care about most is PAGE_SIZE.
1715 *
1716 * So this *could* possibly be written with those
1717 * constraints in mind (relevant mostly if some
1718 * architecture has a slow bit-scan instruction)
1719 */
1720static inline int block_size_bits(unsigned int blocksize)
1721{
1722 return ilog2(blocksize);
1723}
1724
c6c8c3e7
PR
1725static struct buffer_head *folio_create_buffers(struct folio *folio,
1726 struct inode *inode,
1727 unsigned int b_state)
45bce8f3 1728{
c6c8c3e7 1729 BUG_ON(!folio_test_locked(folio));
45bce8f3 1730
c6c8c3e7
PR
1731 if (!folio_buffers(folio))
1732 folio_create_empty_buffers(folio,
1733 1 << READ_ONCE(inode->i_blkbits),
1734 b_state);
1735 return folio_buffers(folio);
45bce8f3
LT
1736}
1737
1da177e4
LT
1738/*
1739 * NOTE! All mapped/uptodate combinations are valid:
1740 *
1741 * Mapped Uptodate Meaning
1742 *
1743 * No No "unknown" - must do get_block()
1744 * No Yes "hole" - zero-filled
1745 * Yes No "allocated" - allocated on disk, not read in
1746 * Yes Yes "valid" - allocated and up-to-date in memory.
1747 *
1748 * "Dirty" is valid only with the last case (mapped+uptodate).
1749 */
1750
1751/*
1752 * While block_write_full_page is writing back the dirty buffers under
1753 * the page lock, whoever dirtied the buffers may decide to clean them
1754 * again at any time. We handle that by only looking at the buffer
1755 * state inside lock_buffer().
1756 *
1757 * If block_write_full_page() is called for regular writeback
1758 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1759 * locked buffer. This only can happen if someone has written the buffer
1760 * directly, with submit_bh(). At the address_space level PageWriteback
1761 * prevents this contention from occurring.
6e34eedd
TT
1762 *
1763 * If block_write_full_page() is called with wbc->sync_mode ==
70fd7614 1764 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
721a9602 1765 * causes the writes to be flagged as synchronous writes.
1da177e4 1766 */
b4bba389 1767int __block_write_full_page(struct inode *inode, struct page *page,
35c80d5f
CM
1768 get_block_t *get_block, struct writeback_control *wbc,
1769 bh_end_io_t *handler)
1da177e4
LT
1770{
1771 int err;
1772 sector_t block;
1773 sector_t last_block;
f0fbd5fc 1774 struct buffer_head *bh, *head;
45bce8f3 1775 unsigned int blocksize, bbits;
1da177e4 1776 int nr_underway = 0;
3ae72869 1777 blk_opf_t write_flags = wbc_to_write_flags(wbc);
1da177e4 1778
c6c8c3e7
PR
1779 head = folio_create_buffers(page_folio(page), inode,
1780 (1 << BH_Dirty) | (1 << BH_Uptodate));
1da177e4
LT
1781
1782 /*
e621900a 1783 * Be very careful. We have no exclusion from block_dirty_folio
1da177e4
LT
1784 * here, and the (potentially unmapped) buffers may become dirty at
1785 * any time. If a buffer becomes dirty here after we've inspected it
1786 * then we just miss that fact, and the page stays dirty.
1787 *
e621900a 1788 * Buffers outside i_size may be dirtied by block_dirty_folio;
1da177e4
LT
1789 * handle that here by just cleaning them.
1790 */
1791
1da177e4 1792 bh = head;
45bce8f3
LT
1793 blocksize = bh->b_size;
1794 bbits = block_size_bits(blocksize);
1795
09cbfeaf 1796 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
45bce8f3 1797 last_block = (i_size_read(inode) - 1) >> bbits;
1da177e4
LT
1798
1799 /*
1800 * Get all the dirty buffers mapped to disk addresses and
1801 * handle any aliases from the underlying blockdev's mapping.
1802 */
1803 do {
1804 if (block > last_block) {
1805 /*
1806 * mapped buffers outside i_size will occur, because
1807 * this page can be outside i_size when there is a
1808 * truncate in progress.
1809 */
1810 /*
1811 * The buffer was zeroed by block_write_full_page()
1812 */
1813 clear_buffer_dirty(bh);
1814 set_buffer_uptodate(bh);
29a814d2
AT
1815 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1816 buffer_dirty(bh)) {
b0cf2321 1817 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1818 err = get_block(inode, block, bh, 1);
1819 if (err)
1820 goto recover;
29a814d2 1821 clear_buffer_delay(bh);
1da177e4
LT
1822 if (buffer_new(bh)) {
1823 /* blockdev mappings never come here */
1824 clear_buffer_new(bh);
e64855c6 1825 clean_bdev_bh_alias(bh);
1da177e4
LT
1826 }
1827 }
1828 bh = bh->b_this_page;
1829 block++;
1830 } while (bh != head);
1831
1832 do {
1da177e4
LT
1833 if (!buffer_mapped(bh))
1834 continue;
1835 /*
1836 * If it's a fully non-blocking write attempt and we cannot
1837 * lock the buffer then redirty the page. Note that this can
5b0830cb
JA
1838 * potentially cause a busy-wait loop from writeback threads
1839 * and kswapd activity, but those code paths have their own
1840 * higher-level throttling.
1da177e4 1841 */
1b430bee 1842 if (wbc->sync_mode != WB_SYNC_NONE) {
1da177e4 1843 lock_buffer(bh);
ca5de404 1844 } else if (!trylock_buffer(bh)) {
1da177e4
LT
1845 redirty_page_for_writepage(wbc, page);
1846 continue;
1847 }
1848 if (test_clear_buffer_dirty(bh)) {
35c80d5f 1849 mark_buffer_async_write_endio(bh, handler);
1da177e4
LT
1850 } else {
1851 unlock_buffer(bh);
1852 }
1853 } while ((bh = bh->b_this_page) != head);
1854
1855 /*
1856 * The page and its buffers are protected by PageWriteback(), so we can
1857 * drop the bh refcounts early.
1858 */
1859 BUG_ON(PageWriteback(page));
1860 set_page_writeback(page);
1da177e4
LT
1861
1862 do {
1863 struct buffer_head *next = bh->b_this_page;
1864 if (buffer_async_write(bh)) {
1420c4a5 1865 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1da177e4
LT
1866 nr_underway++;
1867 }
1da177e4
LT
1868 bh = next;
1869 } while (bh != head);
05937baa 1870 unlock_page(page);
1da177e4
LT
1871
1872 err = 0;
1873done:
1874 if (nr_underway == 0) {
1875 /*
1876 * The page was marked dirty, but the buffers were
1877 * clean. Someone wrote them back by hand with
79f59784 1878 * write_dirty_buffer/submit_bh. A rare case.
1da177e4 1879 */
1da177e4 1880 end_page_writeback(page);
3d67f2d7 1881
1da177e4
LT
1882 /*
1883 * The page and buffer_heads can be released at any time from
1884 * here on.
1885 */
1da177e4
LT
1886 }
1887 return err;
1888
1889recover:
1890 /*
1891 * ENOSPC, or some other error. We may already have added some
1892 * blocks to the file, so we need to write these out to avoid
1893 * exposing stale data.
1894 * The page is currently locked and not marked for writeback
1895 */
1896 bh = head;
1897 /* Recovery: lock and submit the mapped buffers */
1898 do {
29a814d2
AT
1899 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1900 !buffer_delay(bh)) {
1da177e4 1901 lock_buffer(bh);
35c80d5f 1902 mark_buffer_async_write_endio(bh, handler);
1da177e4
LT
1903 } else {
1904 /*
1905 * The buffer may have been set dirty during
1906 * attachment to a dirty page.
1907 */
1908 clear_buffer_dirty(bh);
1909 }
1910 } while ((bh = bh->b_this_page) != head);
1911 SetPageError(page);
1912 BUG_ON(PageWriteback(page));
7e4c3690 1913 mapping_set_error(page->mapping, err);
1da177e4 1914 set_page_writeback(page);
1da177e4
LT
1915 do {
1916 struct buffer_head *next = bh->b_this_page;
1917 if (buffer_async_write(bh)) {
1918 clear_buffer_dirty(bh);
1420c4a5 1919 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1da177e4
LT
1920 nr_underway++;
1921 }
1da177e4
LT
1922 bh = next;
1923 } while (bh != head);
ffda9d30 1924 unlock_page(page);
1da177e4
LT
1925 goto done;
1926}
b4bba389 1927EXPORT_SYMBOL(__block_write_full_page);
1da177e4 1928
afddba49
NP
1929/*
1930 * If a page has any new buffers, zero them out here, and mark them uptodate
1931 * and dirty so they'll be written out (in order to prevent uninitialised
1932 * block data from leaking). And clear the new bit.
1933 */
1934void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1935{
1936 unsigned int block_start, block_end;
1937 struct buffer_head *head, *bh;
1938
1939 BUG_ON(!PageLocked(page));
1940 if (!page_has_buffers(page))
1941 return;
1942
1943 bh = head = page_buffers(page);
1944 block_start = 0;
1945 do {
1946 block_end = block_start + bh->b_size;
1947
1948 if (buffer_new(bh)) {
1949 if (block_end > from && block_start < to) {
1950 if (!PageUptodate(page)) {
1951 unsigned start, size;
1952
1953 start = max(from, block_start);
1954 size = min(to, block_end) - start;
1955
eebd2aa3 1956 zero_user(page, start, size);
afddba49
NP
1957 set_buffer_uptodate(bh);
1958 }
1959
1960 clear_buffer_new(bh);
1961 mark_buffer_dirty(bh);
1962 }
1963 }
1964
1965 block_start = block_end;
1966 bh = bh->b_this_page;
1967 } while (bh != head);
1968}
1969EXPORT_SYMBOL(page_zero_new_buffers);
1970
ae259a9c
CH
1971static void
1972iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
6d49cc85 1973 const struct iomap *iomap)
ae259a9c
CH
1974{
1975 loff_t offset = block << inode->i_blkbits;
1976
1977 bh->b_bdev = iomap->bdev;
1978
1979 /*
1980 * Block points to offset in file we need to map, iomap contains
1981 * the offset at which the map starts. If the map ends before the
1982 * current block, then do not map the buffer and let the caller
1983 * handle it.
1984 */
1985 BUG_ON(offset >= iomap->offset + iomap->length);
1986
1987 switch (iomap->type) {
1988 case IOMAP_HOLE:
1989 /*
1990 * If the buffer is not up to date or beyond the current EOF,
1991 * we need to mark it as new to ensure sub-block zeroing is
1992 * executed if necessary.
1993 */
1994 if (!buffer_uptodate(bh) ||
1995 (offset >= i_size_read(inode)))
1996 set_buffer_new(bh);
1997 break;
1998 case IOMAP_DELALLOC:
1999 if (!buffer_uptodate(bh) ||
2000 (offset >= i_size_read(inode)))
2001 set_buffer_new(bh);
2002 set_buffer_uptodate(bh);
2003 set_buffer_mapped(bh);
2004 set_buffer_delay(bh);
2005 break;
2006 case IOMAP_UNWRITTEN:
2007 /*
3d7b6b21
AG
2008 * For unwritten regions, we always need to ensure that regions
2009 * in the block we are not writing to are zeroed. Mark the
2010 * buffer as new to ensure this.
ae259a9c
CH
2011 */
2012 set_buffer_new(bh);
2013 set_buffer_unwritten(bh);
df561f66 2014 fallthrough;
ae259a9c 2015 case IOMAP_MAPPED:
3d7b6b21
AG
2016 if ((iomap->flags & IOMAP_F_NEW) ||
2017 offset >= i_size_read(inode))
ae259a9c 2018 set_buffer_new(bh);
19fe5f64
AG
2019 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2020 inode->i_blkbits;
ae259a9c
CH
2021 set_buffer_mapped(bh);
2022 break;
2023 }
2024}
2025
d1bd0b4e 2026int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
6d49cc85 2027 get_block_t *get_block, const struct iomap *iomap)
1da177e4 2028{
09cbfeaf 2029 unsigned from = pos & (PAGE_SIZE - 1);
ebdec241 2030 unsigned to = from + len;
d1bd0b4e 2031 struct inode *inode = folio->mapping->host;
1da177e4
LT
2032 unsigned block_start, block_end;
2033 sector_t block;
2034 int err = 0;
2035 unsigned blocksize, bbits;
2036 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2037
d1bd0b4e 2038 BUG_ON(!folio_test_locked(folio));
09cbfeaf
KS
2039 BUG_ON(from > PAGE_SIZE);
2040 BUG_ON(to > PAGE_SIZE);
1da177e4
LT
2041 BUG_ON(from > to);
2042
c6c8c3e7 2043 head = folio_create_buffers(folio, inode, 0);
45bce8f3
LT
2044 blocksize = head->b_size;
2045 bbits = block_size_bits(blocksize);
1da177e4 2046
d1bd0b4e 2047 block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1da177e4
LT
2048
2049 for(bh = head, block_start = 0; bh != head || !block_start;
2050 block++, block_start=block_end, bh = bh->b_this_page) {
2051 block_end = block_start + blocksize;
2052 if (block_end <= from || block_start >= to) {
d1bd0b4e 2053 if (folio_test_uptodate(folio)) {
1da177e4
LT
2054 if (!buffer_uptodate(bh))
2055 set_buffer_uptodate(bh);
2056 }
2057 continue;
2058 }
2059 if (buffer_new(bh))
2060 clear_buffer_new(bh);
2061 if (!buffer_mapped(bh)) {
b0cf2321 2062 WARN_ON(bh->b_size != blocksize);
ae259a9c
CH
2063 if (get_block) {
2064 err = get_block(inode, block, bh, 1);
2065 if (err)
2066 break;
2067 } else {
2068 iomap_to_bh(inode, block, bh, iomap);
2069 }
2070
1da177e4 2071 if (buffer_new(bh)) {
e64855c6 2072 clean_bdev_bh_alias(bh);
d1bd0b4e 2073 if (folio_test_uptodate(folio)) {
637aff46 2074 clear_buffer_new(bh);
1da177e4 2075 set_buffer_uptodate(bh);
637aff46 2076 mark_buffer_dirty(bh);
1da177e4
LT
2077 continue;
2078 }
eebd2aa3 2079 if (block_end > to || block_start < from)
d1bd0b4e 2080 folio_zero_segments(folio,
eebd2aa3
CL
2081 to, block_end,
2082 block_start, from);
1da177e4
LT
2083 continue;
2084 }
2085 }
d1bd0b4e 2086 if (folio_test_uptodate(folio)) {
1da177e4
LT
2087 if (!buffer_uptodate(bh))
2088 set_buffer_uptodate(bh);
2089 continue;
2090 }
2091 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 2092 !buffer_unwritten(bh) &&
1da177e4 2093 (block_start < from || block_end > to)) {
e7ea1129 2094 bh_read_nowait(bh, 0);
1da177e4
LT
2095 *wait_bh++=bh;
2096 }
2097 }
2098 /*
2099 * If we issued read requests - let them complete.
2100 */
2101 while(wait_bh > wait) {
2102 wait_on_buffer(*--wait_bh);
2103 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 2104 err = -EIO;
1da177e4 2105 }
f9f07b6c 2106 if (unlikely(err))
d1bd0b4e 2107 page_zero_new_buffers(&folio->page, from, to);
1da177e4
LT
2108 return err;
2109}
ae259a9c
CH
2110
2111int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2112 get_block_t *get_block)
2113{
d1bd0b4e
MWO
2114 return __block_write_begin_int(page_folio(page), pos, len, get_block,
2115 NULL);
ae259a9c 2116}
ebdec241 2117EXPORT_SYMBOL(__block_write_begin);
1da177e4
LT
2118
2119static int __block_commit_write(struct inode *inode, struct page *page,
2120 unsigned from, unsigned to)
2121{
2122 unsigned block_start, block_end;
2123 int partial = 0;
2124 unsigned blocksize;
2125 struct buffer_head *bh, *head;
2126
45bce8f3
LT
2127 bh = head = page_buffers(page);
2128 blocksize = bh->b_size;
1da177e4 2129
45bce8f3
LT
2130 block_start = 0;
2131 do {
1da177e4
LT
2132 block_end = block_start + blocksize;
2133 if (block_end <= from || block_start >= to) {
2134 if (!buffer_uptodate(bh))
2135 partial = 1;
2136 } else {
2137 set_buffer_uptodate(bh);
2138 mark_buffer_dirty(bh);
2139 }
4ebd3aec
YG
2140 if (buffer_new(bh))
2141 clear_buffer_new(bh);
45bce8f3
LT
2142
2143 block_start = block_end;
2144 bh = bh->b_this_page;
2145 } while (bh != head);
1da177e4
LT
2146
2147 /*
2148 * If this is a partial write which happened to make all buffers
2c69e205 2149 * uptodate then we can optimize away a bogus read_folio() for
1da177e4
LT
2150 * the next read(). Here we 'discover' whether the page went
2151 * uptodate as a result of this (potentially partial) write.
2152 */
2153 if (!partial)
2154 SetPageUptodate(page);
2155 return 0;
2156}
2157
afddba49 2158/*
155130a4
CH
2159 * block_write_begin takes care of the basic task of block allocation and
2160 * bringing partial write blocks uptodate first.
2161 *
7bb46a67 2162 * The filesystem needs to handle block truncation upon failure.
afddba49 2163 */
155130a4 2164int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
b3992d1e 2165 struct page **pagep, get_block_t *get_block)
afddba49 2166{
09cbfeaf 2167 pgoff_t index = pos >> PAGE_SHIFT;
afddba49 2168 struct page *page;
6e1db88d 2169 int status;
afddba49 2170
b7446e7c 2171 page = grab_cache_page_write_begin(mapping, index);
6e1db88d
CH
2172 if (!page)
2173 return -ENOMEM;
afddba49 2174
6e1db88d 2175 status = __block_write_begin(page, pos, len, get_block);
afddba49 2176 if (unlikely(status)) {
6e1db88d 2177 unlock_page(page);
09cbfeaf 2178 put_page(page);
6e1db88d 2179 page = NULL;
afddba49
NP
2180 }
2181
6e1db88d 2182 *pagep = page;
afddba49
NP
2183 return status;
2184}
2185EXPORT_SYMBOL(block_write_begin);
2186
2187int block_write_end(struct file *file, struct address_space *mapping,
2188 loff_t pos, unsigned len, unsigned copied,
2189 struct page *page, void *fsdata)
2190{
2191 struct inode *inode = mapping->host;
2192 unsigned start;
2193
09cbfeaf 2194 start = pos & (PAGE_SIZE - 1);
afddba49
NP
2195
2196 if (unlikely(copied < len)) {
2197 /*
2c69e205
MWO
2198 * The buffers that were written will now be uptodate, so
2199 * we don't have to worry about a read_folio reading them
2200 * and overwriting a partial write. However if we have
2201 * encountered a short write and only partially written
2202 * into a buffer, it will not be marked uptodate, so a
2203 * read_folio might come in and destroy our partial write.
afddba49
NP
2204 *
2205 * Do the simplest thing, and just treat any short write to a
2206 * non uptodate page as a zero-length write, and force the
2207 * caller to redo the whole thing.
2208 */
2209 if (!PageUptodate(page))
2210 copied = 0;
2211
2212 page_zero_new_buffers(page, start+copied, start+len);
2213 }
2214 flush_dcache_page(page);
2215
2216 /* This could be a short (even 0-length) commit */
2217 __block_commit_write(inode, page, start, start+copied);
2218
2219 return copied;
2220}
2221EXPORT_SYMBOL(block_write_end);
2222
2223int generic_write_end(struct file *file, struct address_space *mapping,
2224 loff_t pos, unsigned len, unsigned copied,
2225 struct page *page, void *fsdata)
2226{
8af54f29
CH
2227 struct inode *inode = mapping->host;
2228 loff_t old_size = inode->i_size;
2229 bool i_size_changed = false;
2230
afddba49 2231 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
8af54f29
CH
2232
2233 /*
2234 * No need to use i_size_read() here, the i_size cannot change under us
2235 * because we hold i_rwsem.
2236 *
2237 * But it's important to update i_size while still holding page lock:
2238 * page writeout could otherwise come in and zero beyond i_size.
2239 */
2240 if (pos + copied > inode->i_size) {
2241 i_size_write(inode, pos + copied);
2242 i_size_changed = true;
2243 }
2244
2245 unlock_page(page);
7a77dad7 2246 put_page(page);
8af54f29
CH
2247
2248 if (old_size < pos)
2249 pagecache_isize_extended(inode, old_size, pos);
2250 /*
2251 * Don't mark the inode dirty under page lock. First, it unnecessarily
2252 * makes the holding time of page lock longer. Second, it forces lock
2253 * ordering of page lock and transaction start for journaling
2254 * filesystems.
2255 */
2256 if (i_size_changed)
2257 mark_inode_dirty(inode);
26ddb1f4 2258 return copied;
afddba49
NP
2259}
2260EXPORT_SYMBOL(generic_write_end);
2261
8ab22b9a 2262/*
2e7e80f7 2263 * block_is_partially_uptodate checks whether buffers within a folio are
8ab22b9a
HH
2264 * uptodate or not.
2265 *
2e7e80f7
MWO
2266 * Returns true if all buffers which correspond to the specified part
2267 * of the folio are uptodate.
8ab22b9a 2268 */
2e7e80f7 2269bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
8ab22b9a 2270{
8ab22b9a
HH
2271 unsigned block_start, block_end, blocksize;
2272 unsigned to;
2273 struct buffer_head *bh, *head;
2e7e80f7 2274 bool ret = true;
8ab22b9a 2275
2e7e80f7
MWO
2276 head = folio_buffers(folio);
2277 if (!head)
2278 return false;
45bce8f3 2279 blocksize = head->b_size;
2e7e80f7 2280 to = min_t(unsigned, folio_size(folio) - from, count);
8ab22b9a 2281 to = from + to;
2e7e80f7
MWO
2282 if (from < blocksize && to > folio_size(folio) - blocksize)
2283 return false;
8ab22b9a 2284
8ab22b9a
HH
2285 bh = head;
2286 block_start = 0;
2287 do {
2288 block_end = block_start + blocksize;
2289 if (block_end > from && block_start < to) {
2290 if (!buffer_uptodate(bh)) {
2e7e80f7 2291 ret = false;
8ab22b9a
HH
2292 break;
2293 }
2294 if (block_end >= to)
2295 break;
2296 }
2297 block_start = block_end;
2298 bh = bh->b_this_page;
2299 } while (bh != head);
2300
2301 return ret;
2302}
2303EXPORT_SYMBOL(block_is_partially_uptodate);
2304
1da177e4 2305/*
2c69e205 2306 * Generic "read_folio" function for block devices that have the normal
1da177e4 2307 * get_block functionality. This is most of the block device filesystems.
2c69e205 2308 * Reads the folio asynchronously --- the unlock_buffer() and
1da177e4 2309 * set/clear_buffer_uptodate() functions propagate buffer state into the
2c69e205 2310 * folio once IO has completed.
1da177e4 2311 */
2c69e205 2312int block_read_full_folio(struct folio *folio, get_block_t *get_block)
1da177e4 2313{
2c69e205 2314 struct inode *inode = folio->mapping->host;
1da177e4
LT
2315 sector_t iblock, lblock;
2316 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
45bce8f3 2317 unsigned int blocksize, bbits;
1da177e4
LT
2318 int nr, i;
2319 int fully_mapped = 1;
b7a6eb22 2320 bool page_error = false;
4fa512ce
EB
2321 loff_t limit = i_size_read(inode);
2322
2323 /* This is needed for ext4. */
2324 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2325 limit = inode->i_sb->s_maxbytes;
1da177e4 2326
2c69e205
MWO
2327 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2328
c6c8c3e7 2329 head = folio_create_buffers(folio, inode, 0);
45bce8f3
LT
2330 blocksize = head->b_size;
2331 bbits = block_size_bits(blocksize);
1da177e4 2332
2c69e205 2333 iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
4fa512ce 2334 lblock = (limit+blocksize-1) >> bbits;
1da177e4
LT
2335 bh = head;
2336 nr = 0;
2337 i = 0;
2338
2339 do {
2340 if (buffer_uptodate(bh))
2341 continue;
2342
2343 if (!buffer_mapped(bh)) {
c64610ba
AM
2344 int err = 0;
2345
1da177e4
LT
2346 fully_mapped = 0;
2347 if (iblock < lblock) {
b0cf2321 2348 WARN_ON(bh->b_size != blocksize);
c64610ba 2349 err = get_block(inode, iblock, bh, 0);
b7a6eb22 2350 if (err) {
2c69e205 2351 folio_set_error(folio);
b7a6eb22
MWO
2352 page_error = true;
2353 }
1da177e4
LT
2354 }
2355 if (!buffer_mapped(bh)) {
2c69e205
MWO
2356 folio_zero_range(folio, i * blocksize,
2357 blocksize);
c64610ba
AM
2358 if (!err)
2359 set_buffer_uptodate(bh);
1da177e4
LT
2360 continue;
2361 }
2362 /*
2363 * get_block() might have updated the buffer
2364 * synchronously
2365 */
2366 if (buffer_uptodate(bh))
2367 continue;
2368 }
2369 arr[nr++] = bh;
2370 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2371
2372 if (fully_mapped)
2c69e205 2373 folio_set_mappedtodisk(folio);
1da177e4
LT
2374
2375 if (!nr) {
2376 /*
2c69e205 2377 * All buffers are uptodate - we can set the folio uptodate
1da177e4
LT
2378 * as well. But not if get_block() returned an error.
2379 */
b7a6eb22 2380 if (!page_error)
2c69e205
MWO
2381 folio_mark_uptodate(folio);
2382 folio_unlock(folio);
1da177e4
LT
2383 return 0;
2384 }
2385
2386 /* Stage two: lock the buffers */
2387 for (i = 0; i < nr; i++) {
2388 bh = arr[i];
2389 lock_buffer(bh);
2390 mark_buffer_async_read(bh);
2391 }
2392
2393 /*
2394 * Stage 3: start the IO. Check for uptodateness
2395 * inside the buffer lock in case another process reading
2396 * the underlying blockdev brought it uptodate (the sct fix).
2397 */
2398 for (i = 0; i < nr; i++) {
2399 bh = arr[i];
2400 if (buffer_uptodate(bh))
2401 end_buffer_async_read(bh, 1);
2402 else
1420c4a5 2403 submit_bh(REQ_OP_READ, bh);
1da177e4
LT
2404 }
2405 return 0;
2406}
2c69e205 2407EXPORT_SYMBOL(block_read_full_folio);
1da177e4
LT
2408
2409/* utility function for filesystems that need to do work on expanding
89e10787 2410 * truncates. Uses filesystem pagecache writes to allow the filesystem to
1da177e4
LT
2411 * deal with the hole.
2412 */
89e10787 2413int generic_cont_expand_simple(struct inode *inode, loff_t size)
1da177e4
LT
2414{
2415 struct address_space *mapping = inode->i_mapping;
53b524b8 2416 const struct address_space_operations *aops = mapping->a_ops;
1da177e4 2417 struct page *page;
1468c6f4 2418 void *fsdata = NULL;
1da177e4
LT
2419 int err;
2420
c08d3b0e 2421 err = inode_newsize_ok(inode, size);
2422 if (err)
1da177e4
LT
2423 goto out;
2424
53b524b8 2425 err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
89e10787 2426 if (err)
05eb0b51 2427 goto out;
05eb0b51 2428
53b524b8 2429 err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
89e10787 2430 BUG_ON(err > 0);
05eb0b51 2431
1da177e4
LT
2432out:
2433 return err;
2434}
1fe72eaa 2435EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4 2436
f1e3af72
AB
2437static int cont_expand_zero(struct file *file, struct address_space *mapping,
2438 loff_t pos, loff_t *bytes)
1da177e4 2439{
1da177e4 2440 struct inode *inode = mapping->host;
53b524b8 2441 const struct address_space_operations *aops = mapping->a_ops;
93407472 2442 unsigned int blocksize = i_blocksize(inode);
89e10787 2443 struct page *page;
1468c6f4 2444 void *fsdata = NULL;
89e10787
NP
2445 pgoff_t index, curidx;
2446 loff_t curpos;
2447 unsigned zerofrom, offset, len;
2448 int err = 0;
1da177e4 2449
09cbfeaf
KS
2450 index = pos >> PAGE_SHIFT;
2451 offset = pos & ~PAGE_MASK;
89e10787 2452
09cbfeaf
KS
2453 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2454 zerofrom = curpos & ~PAGE_MASK;
1da177e4
LT
2455 if (zerofrom & (blocksize-1)) {
2456 *bytes |= (blocksize-1);
2457 (*bytes)++;
2458 }
09cbfeaf 2459 len = PAGE_SIZE - zerofrom;
1da177e4 2460
53b524b8 2461 err = aops->write_begin(file, mapping, curpos, len,
c718a975 2462 &page, &fsdata);
89e10787
NP
2463 if (err)
2464 goto out;
eebd2aa3 2465 zero_user(page, zerofrom, len);
53b524b8 2466 err = aops->write_end(file, mapping, curpos, len, len,
89e10787
NP
2467 page, fsdata);
2468 if (err < 0)
2469 goto out;
2470 BUG_ON(err != len);
2471 err = 0;
061e9746
OH
2472
2473 balance_dirty_pages_ratelimited(mapping);
c2ca0fcd 2474
08d405c8 2475 if (fatal_signal_pending(current)) {
c2ca0fcd
MP
2476 err = -EINTR;
2477 goto out;
2478 }
89e10787 2479 }
1da177e4 2480
89e10787
NP
2481 /* page covers the boundary, find the boundary offset */
2482 if (index == curidx) {
09cbfeaf 2483 zerofrom = curpos & ~PAGE_MASK;
1da177e4 2484 /* if we will expand the thing last block will be filled */
89e10787
NP
2485 if (offset <= zerofrom) {
2486 goto out;
2487 }
2488 if (zerofrom & (blocksize-1)) {
1da177e4
LT
2489 *bytes |= (blocksize-1);
2490 (*bytes)++;
2491 }
89e10787 2492 len = offset - zerofrom;
1da177e4 2493
53b524b8 2494 err = aops->write_begin(file, mapping, curpos, len,
c718a975 2495 &page, &fsdata);
89e10787
NP
2496 if (err)
2497 goto out;
eebd2aa3 2498 zero_user(page, zerofrom, len);
53b524b8 2499 err = aops->write_end(file, mapping, curpos, len, len,
89e10787
NP
2500 page, fsdata);
2501 if (err < 0)
2502 goto out;
2503 BUG_ON(err != len);
2504 err = 0;
1da177e4 2505 }
89e10787
NP
2506out:
2507 return err;
2508}
2509
2510/*
2511 * For moronic filesystems that do not allow holes in file.
2512 * We may have to extend the file.
2513 */
282dc178 2514int cont_write_begin(struct file *file, struct address_space *mapping,
be3bbbc5 2515 loff_t pos, unsigned len,
89e10787
NP
2516 struct page **pagep, void **fsdata,
2517 get_block_t *get_block, loff_t *bytes)
2518{
2519 struct inode *inode = mapping->host;
93407472
FF
2520 unsigned int blocksize = i_blocksize(inode);
2521 unsigned int zerofrom;
89e10787
NP
2522 int err;
2523
2524 err = cont_expand_zero(file, mapping, pos, bytes);
2525 if (err)
155130a4 2526 return err;
89e10787 2527
09cbfeaf 2528 zerofrom = *bytes & ~PAGE_MASK;
89e10787
NP
2529 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2530 *bytes |= (blocksize-1);
2531 (*bytes)++;
1da177e4 2532 }
1da177e4 2533
b3992d1e 2534 return block_write_begin(mapping, pos, len, pagep, get_block);
1da177e4 2535}
1fe72eaa 2536EXPORT_SYMBOL(cont_write_begin);
1da177e4 2537
1da177e4
LT
2538int block_commit_write(struct page *page, unsigned from, unsigned to)
2539{
2540 struct inode *inode = page->mapping->host;
2541 __block_commit_write(inode,page,from,to);
2542 return 0;
2543}
1fe72eaa 2544EXPORT_SYMBOL(block_commit_write);
1da177e4 2545
54171690
DC
2546/*
2547 * block_page_mkwrite() is not allowed to change the file size as it gets
2548 * called from a page fault handler when a page is first dirtied. Hence we must
2549 * be careful to check for EOF conditions here. We set the page up correctly
2550 * for a written page which means we get ENOSPC checking when writing into
2551 * holes and correct delalloc and unwritten extent mapping on filesystems that
2552 * support these features.
2553 *
2554 * We are not allowed to take the i_mutex here so we have to play games to
2555 * protect against truncate races as the page could now be beyond EOF. Because
7bb46a67 2556 * truncate writes the inode size before removing pages, once we have the
54171690
DC
2557 * page lock we can determine safely if the page is beyond EOF. If it is not
2558 * beyond EOF, then the page is guaranteed safe against truncation until we
2559 * unlock the page.
ea13a864 2560 *
14da9200 2561 * Direct callers of this function should protect against filesystem freezing
5c500029 2562 * using sb_start_pagefault() - sb_end_pagefault() functions.
54171690 2563 */
5c500029 2564int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
24da4fab 2565 get_block_t get_block)
54171690 2566{
c2ec175c 2567 struct page *page = vmf->page;
496ad9aa 2568 struct inode *inode = file_inode(vma->vm_file);
54171690
DC
2569 unsigned long end;
2570 loff_t size;
24da4fab 2571 int ret;
54171690
DC
2572
2573 lock_page(page);
2574 size = i_size_read(inode);
2575 if ((page->mapping != inode->i_mapping) ||
18336338 2576 (page_offset(page) > size)) {
24da4fab
JK
2577 /* We overload EFAULT to mean page got truncated */
2578 ret = -EFAULT;
2579 goto out_unlock;
54171690
DC
2580 }
2581
2582 /* page is wholly or partially inside EOF */
09cbfeaf
KS
2583 if (((page->index + 1) << PAGE_SHIFT) > size)
2584 end = size & ~PAGE_MASK;
54171690 2585 else
09cbfeaf 2586 end = PAGE_SIZE;
54171690 2587
ebdec241 2588 ret = __block_write_begin(page, 0, end, get_block);
54171690
DC
2589 if (!ret)
2590 ret = block_commit_write(page, 0, end);
2591
24da4fab
JK
2592 if (unlikely(ret < 0))
2593 goto out_unlock;
ea13a864 2594 set_page_dirty(page);
1d1d1a76 2595 wait_for_stable_page(page);
24da4fab
JK
2596 return 0;
2597out_unlock:
2598 unlock_page(page);
54171690 2599 return ret;
24da4fab 2600}
1fe72eaa 2601EXPORT_SYMBOL(block_page_mkwrite);
1da177e4 2602
1da177e4
LT
2603int block_truncate_page(struct address_space *mapping,
2604 loff_t from, get_block_t *get_block)
2605{
09cbfeaf
KS
2606 pgoff_t index = from >> PAGE_SHIFT;
2607 unsigned offset = from & (PAGE_SIZE-1);
1da177e4 2608 unsigned blocksize;
54b21a79 2609 sector_t iblock;
1da177e4
LT
2610 unsigned length, pos;
2611 struct inode *inode = mapping->host;
2612 struct page *page;
2613 struct buffer_head *bh;
dc7cb2d2 2614 int err = 0;
1da177e4 2615
93407472 2616 blocksize = i_blocksize(inode);
1da177e4
LT
2617 length = offset & (blocksize - 1);
2618
2619 /* Block boundary? Nothing to do */
2620 if (!length)
2621 return 0;
2622
2623 length = blocksize - length;
09cbfeaf 2624 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
1da177e4
LT
2625
2626 page = grab_cache_page(mapping, index);
1da177e4 2627 if (!page)
dc7cb2d2 2628 return -ENOMEM;
1da177e4
LT
2629
2630 if (!page_has_buffers(page))
2631 create_empty_buffers(page, blocksize, 0);
2632
2633 /* Find the buffer that contains "offset" */
2634 bh = page_buffers(page);
2635 pos = blocksize;
2636 while (offset >= pos) {
2637 bh = bh->b_this_page;
2638 iblock++;
2639 pos += blocksize;
2640 }
2641
1da177e4 2642 if (!buffer_mapped(bh)) {
b0cf2321 2643 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2644 err = get_block(inode, iblock, bh, 0);
2645 if (err)
2646 goto unlock;
2647 /* unmapped? It's a hole - nothing to do */
2648 if (!buffer_mapped(bh))
2649 goto unlock;
2650 }
2651
2652 /* Ok, it's mapped. Make sure it's up-to-date */
2653 if (PageUptodate(page))
2654 set_buffer_uptodate(bh);
2655
33a266dd 2656 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
e7ea1129 2657 err = bh_read(bh, 0);
1da177e4 2658 /* Uhhuh. Read error. Complain and punt. */
e7ea1129 2659 if (err < 0)
1da177e4
LT
2660 goto unlock;
2661 }
2662
eebd2aa3 2663 zero_user(page, offset, length);
1da177e4 2664 mark_buffer_dirty(bh);
1da177e4
LT
2665
2666unlock:
2667 unlock_page(page);
09cbfeaf 2668 put_page(page);
dc7cb2d2 2669
1da177e4
LT
2670 return err;
2671}
1fe72eaa 2672EXPORT_SYMBOL(block_truncate_page);
1da177e4
LT
2673
2674/*
2675 * The generic ->writepage function for buffer-backed address_spaces
2676 */
1b938c08
MW
2677int block_write_full_page(struct page *page, get_block_t *get_block,
2678 struct writeback_control *wbc)
1da177e4
LT
2679{
2680 struct inode * const inode = page->mapping->host;
2681 loff_t i_size = i_size_read(inode);
09cbfeaf 2682 const pgoff_t end_index = i_size >> PAGE_SHIFT;
1da177e4 2683 unsigned offset;
1da177e4
LT
2684
2685 /* Is the page fully inside i_size? */
2686 if (page->index < end_index)
35c80d5f 2687 return __block_write_full_page(inode, page, get_block, wbc,
1b938c08 2688 end_buffer_async_write);
1da177e4
LT
2689
2690 /* Is the page fully outside i_size? (truncate in progress) */
09cbfeaf 2691 offset = i_size & (PAGE_SIZE-1);
1da177e4 2692 if (page->index >= end_index+1 || !offset) {
1da177e4
LT
2693 unlock_page(page);
2694 return 0; /* don't care */
2695 }
2696
2697 /*
2698 * The page straddles i_size. It must be zeroed out on each and every
2a61aa40 2699 * writepage invocation because it may be mmapped. "A file is mapped
1da177e4
LT
2700 * in multiples of the page size. For a file that is not a multiple of
2701 * the page size, the remaining memory is zeroed when mapped, and
2702 * writes to that region are not written out to the file."
2703 */
09cbfeaf 2704 zero_user_segment(page, offset, PAGE_SIZE);
1b938c08
MW
2705 return __block_write_full_page(inode, page, get_block, wbc,
2706 end_buffer_async_write);
35c80d5f 2707}
1fe72eaa 2708EXPORT_SYMBOL(block_write_full_page);
35c80d5f 2709
1da177e4
LT
2710sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2711 get_block_t *get_block)
2712{
1da177e4 2713 struct inode *inode = mapping->host;
2a527d68
AP
2714 struct buffer_head tmp = {
2715 .b_size = i_blocksize(inode),
2716 };
2717
1da177e4
LT
2718 get_block(inode, block, &tmp, 0);
2719 return tmp.b_blocknr;
2720}
1fe72eaa 2721EXPORT_SYMBOL(generic_block_bmap);
1da177e4 2722
4246a0b6 2723static void end_bio_bh_io_sync(struct bio *bio)
1da177e4
LT
2724{
2725 struct buffer_head *bh = bio->bi_private;
2726
b7c44ed9 2727 if (unlikely(bio_flagged(bio, BIO_QUIET)))
08bafc03
KM
2728 set_bit(BH_Quiet, &bh->b_state);
2729
4e4cbee9 2730 bh->b_end_io(bh, !bio->bi_status);
1da177e4 2731 bio_put(bio);
1da177e4
LT
2732}
2733
5bdf402a
RHI
2734static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2735 struct writeback_control *wbc)
1da177e4 2736{
1420c4a5 2737 const enum req_op op = opf & REQ_OP_MASK;
1da177e4 2738 struct bio *bio;
1da177e4
LT
2739
2740 BUG_ON(!buffer_locked(bh));
2741 BUG_ON(!buffer_mapped(bh));
2742 BUG_ON(!bh->b_end_io);
8fb0e342
AK
2743 BUG_ON(buffer_delay(bh));
2744 BUG_ON(buffer_unwritten(bh));
1da177e4 2745
1da177e4 2746 /*
48fd4f93 2747 * Only clear out a write error when rewriting
1da177e4 2748 */
2a222ca9 2749 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
1da177e4
LT
2750 clear_buffer_write_io_error(bh);
2751
07888c66 2752 if (buffer_meta(bh))
1420c4a5 2753 opf |= REQ_META;
07888c66 2754 if (buffer_prio(bh))
1420c4a5 2755 opf |= REQ_PRIO;
07888c66 2756
1420c4a5 2757 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
1da177e4 2758
4f74d15f
EB
2759 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2760
4f024f37 2761 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
1da177e4 2762
6cf66b4c
KO
2763 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2764 BUG_ON(bio->bi_iter.bi_size != bh->b_size);
1da177e4
LT
2765
2766 bio->bi_end_io = end_bio_bh_io_sync;
2767 bio->bi_private = bh;
2768
83c9c547
ML
2769 /* Take care of bh's that straddle the end of the device */
2770 guard_bio_eod(bio);
2771
fd42df30
DZ
2772 if (wbc) {
2773 wbc_init_bio(wbc, bio);
34e51a5e 2774 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
fd42df30
DZ
2775 }
2776
4e49ea4a 2777 submit_bio(bio);
1da177e4 2778}
bafc0dba 2779
5bdf402a 2780void submit_bh(blk_opf_t opf, struct buffer_head *bh)
bafc0dba 2781{
5bdf402a 2782 submit_bh_wbc(opf, bh, NULL);
71368511 2783}
1fe72eaa 2784EXPORT_SYMBOL(submit_bh);
1da177e4 2785
3ae72869 2786void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
9cb569d6
CH
2787{
2788 lock_buffer(bh);
2789 if (!test_clear_buffer_dirty(bh)) {
2790 unlock_buffer(bh);
2791 return;
2792 }
2793 bh->b_end_io = end_buffer_write_sync;
2794 get_bh(bh);
1420c4a5 2795 submit_bh(REQ_OP_WRITE | op_flags, bh);
9cb569d6
CH
2796}
2797EXPORT_SYMBOL(write_dirty_buffer);
2798
1da177e4
LT
2799/*
2800 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2801 * and then start new I/O and then wait upon it. The caller must have a ref on
2802 * the buffer_head.
2803 */
3ae72869 2804int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
1da177e4 2805{
1da177e4
LT
2806 WARN_ON(atomic_read(&bh->b_count) < 1);
2807 lock_buffer(bh);
2808 if (test_clear_buffer_dirty(bh)) {
377254b2
XT
2809 /*
2810 * The bh should be mapped, but it might not be if the
2811 * device was hot-removed. Not much we can do but fail the I/O.
2812 */
2813 if (!buffer_mapped(bh)) {
2814 unlock_buffer(bh);
2815 return -EIO;
2816 }
2817
1da177e4
LT
2818 get_bh(bh);
2819 bh->b_end_io = end_buffer_write_sync;
ab620620 2820 submit_bh(REQ_OP_WRITE | op_flags, bh);
1da177e4 2821 wait_on_buffer(bh);
ab620620
RHI
2822 if (!buffer_uptodate(bh))
2823 return -EIO;
1da177e4
LT
2824 } else {
2825 unlock_buffer(bh);
2826 }
ab620620 2827 return 0;
1da177e4 2828}
87e99511
CH
2829EXPORT_SYMBOL(__sync_dirty_buffer);
2830
2831int sync_dirty_buffer(struct buffer_head *bh)
2832{
70fd7614 2833 return __sync_dirty_buffer(bh, REQ_SYNC);
87e99511 2834}
1fe72eaa 2835EXPORT_SYMBOL(sync_dirty_buffer);
1da177e4
LT
2836
2837/*
68189fef 2838 * try_to_free_buffers() checks if all the buffers on this particular folio
1da177e4
LT
2839 * are unused, and releases them if so.
2840 *
2841 * Exclusion against try_to_free_buffers may be obtained by either
68189fef 2842 * locking the folio or by holding its mapping's private_lock.
1da177e4 2843 *
68189fef
MWO
2844 * If the folio is dirty but all the buffers are clean then we need to
2845 * be sure to mark the folio clean as well. This is because the folio
1da177e4 2846 * may be against a block device, and a later reattachment of buffers
68189fef 2847 * to a dirty folio will set *all* buffers dirty. Which would corrupt
1da177e4
LT
2848 * filesystem data on the same device.
2849 *
68189fef
MWO
2850 * The same applies to regular filesystem folios: if all the buffers are
2851 * clean then we set the folio clean and proceed. To do that, we require
e621900a 2852 * total exclusion from block_dirty_folio(). That is obtained with
1da177e4
LT
2853 * private_lock.
2854 *
2855 * try_to_free_buffers() is non-blocking.
2856 */
2857static inline int buffer_busy(struct buffer_head *bh)
2858{
2859 return atomic_read(&bh->b_count) |
2860 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2861}
2862
64394763
MWO
2863static bool
2864drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
1da177e4 2865{
64394763 2866 struct buffer_head *head = folio_buffers(folio);
1da177e4
LT
2867 struct buffer_head *bh;
2868
2869 bh = head;
2870 do {
1da177e4
LT
2871 if (buffer_busy(bh))
2872 goto failed;
2873 bh = bh->b_this_page;
2874 } while (bh != head);
2875
2876 do {
2877 struct buffer_head *next = bh->b_this_page;
2878
535ee2fb 2879 if (bh->b_assoc_map)
1da177e4
LT
2880 __remove_assoc_queue(bh);
2881 bh = next;
2882 } while (bh != head);
2883 *buffers_to_free = head;
64394763
MWO
2884 folio_detach_private(folio);
2885 return true;
1da177e4 2886failed:
64394763 2887 return false;
1da177e4
LT
2888}
2889
68189fef 2890bool try_to_free_buffers(struct folio *folio)
1da177e4 2891{
68189fef 2892 struct address_space * const mapping = folio->mapping;
1da177e4 2893 struct buffer_head *buffers_to_free = NULL;
68189fef 2894 bool ret = 0;
1da177e4 2895
68189fef
MWO
2896 BUG_ON(!folio_test_locked(folio));
2897 if (folio_test_writeback(folio))
2898 return false;
1da177e4
LT
2899
2900 if (mapping == NULL) { /* can this still happen? */
64394763 2901 ret = drop_buffers(folio, &buffers_to_free);
1da177e4
LT
2902 goto out;
2903 }
2904
2905 spin_lock(&mapping->private_lock);
64394763 2906 ret = drop_buffers(folio, &buffers_to_free);
ecdfc978
LT
2907
2908 /*
2909 * If the filesystem writes its buffers by hand (eg ext3)
68189fef
MWO
2910 * then we can have clean buffers against a dirty folio. We
2911 * clean the folio here; otherwise the VM will never notice
ecdfc978
LT
2912 * that the filesystem did any IO at all.
2913 *
2914 * Also, during truncate, discard_buffer will have marked all
68189fef
MWO
2915 * the folio's buffers clean. We discover that here and clean
2916 * the folio also.
87df7241
NP
2917 *
2918 * private_lock must be held over this entire operation in order
e621900a 2919 * to synchronise against block_dirty_folio and prevent the
87df7241 2920 * dirty bit from being lost.
ecdfc978 2921 */
11f81bec 2922 if (ret)
68189fef 2923 folio_cancel_dirty(folio);
87df7241 2924 spin_unlock(&mapping->private_lock);
1da177e4
LT
2925out:
2926 if (buffers_to_free) {
2927 struct buffer_head *bh = buffers_to_free;
2928
2929 do {
2930 struct buffer_head *next = bh->b_this_page;
2931 free_buffer_head(bh);
2932 bh = next;
2933 } while (bh != buffers_to_free);
2934 }
2935 return ret;
2936}
2937EXPORT_SYMBOL(try_to_free_buffers);
2938
1da177e4
LT
2939/*
2940 * Buffer-head allocation
2941 */
a0a9b043 2942static struct kmem_cache *bh_cachep __read_mostly;
1da177e4
LT
2943
2944/*
2945 * Once the number of bh's in the machine exceeds this level, we start
2946 * stripping them in writeback.
2947 */
43be594a 2948static unsigned long max_buffer_heads;
1da177e4
LT
2949
2950int buffer_heads_over_limit;
2951
2952struct bh_accounting {
2953 int nr; /* Number of live bh's */
2954 int ratelimit; /* Limit cacheline bouncing */
2955};
2956
2957static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2958
2959static void recalc_bh_state(void)
2960{
2961 int i;
2962 int tot = 0;
2963
ee1be862 2964 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
1da177e4 2965 return;
c7b92516 2966 __this_cpu_write(bh_accounting.ratelimit, 0);
8a143426 2967 for_each_online_cpu(i)
1da177e4
LT
2968 tot += per_cpu(bh_accounting, i).nr;
2969 buffer_heads_over_limit = (tot > max_buffer_heads);
2970}
c7b92516 2971
dd0fc66f 2972struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 2973{
019b4d12 2974 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
1da177e4 2975 if (ret) {
a35afb83 2976 INIT_LIST_HEAD(&ret->b_assoc_buffers);
f1e67e35 2977 spin_lock_init(&ret->b_uptodate_lock);
c7b92516
CL
2978 preempt_disable();
2979 __this_cpu_inc(bh_accounting.nr);
1da177e4 2980 recalc_bh_state();
c7b92516 2981 preempt_enable();
1da177e4
LT
2982 }
2983 return ret;
2984}
2985EXPORT_SYMBOL(alloc_buffer_head);
2986
2987void free_buffer_head(struct buffer_head *bh)
2988{
2989 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2990 kmem_cache_free(bh_cachep, bh);
c7b92516
CL
2991 preempt_disable();
2992 __this_cpu_dec(bh_accounting.nr);
1da177e4 2993 recalc_bh_state();
c7b92516 2994 preempt_enable();
1da177e4
LT
2995}
2996EXPORT_SYMBOL(free_buffer_head);
2997
fc4d24c9 2998static int buffer_exit_cpu_dead(unsigned int cpu)
1da177e4
LT
2999{
3000 int i;
3001 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3002
3003 for (i = 0; i < BH_LRU_SIZE; i++) {
3004 brelse(b->bhs[i]);
3005 b->bhs[i] = NULL;
3006 }
c7b92516 3007 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
8a143426 3008 per_cpu(bh_accounting, cpu).nr = 0;
fc4d24c9 3009 return 0;
1da177e4 3010}
1da177e4 3011
389d1b08 3012/**
a6b91919 3013 * bh_uptodate_or_lock - Test whether the buffer is uptodate
389d1b08
AK
3014 * @bh: struct buffer_head
3015 *
3016 * Return true if the buffer is up-to-date and false,
3017 * with the buffer locked, if not.
3018 */
3019int bh_uptodate_or_lock(struct buffer_head *bh)
3020{
3021 if (!buffer_uptodate(bh)) {
3022 lock_buffer(bh);
3023 if (!buffer_uptodate(bh))
3024 return 0;
3025 unlock_buffer(bh);
3026 }
3027 return 1;
3028}
3029EXPORT_SYMBOL(bh_uptodate_or_lock);
3030
3031/**
fdee117e 3032 * __bh_read - Submit read for a locked buffer
389d1b08 3033 * @bh: struct buffer_head
fdee117e
ZY
3034 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3035 * @wait: wait until reading finish
389d1b08 3036 *
fdee117e 3037 * Returns zero on success or don't wait, and -EIO on error.
389d1b08 3038 */
fdee117e 3039int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
389d1b08 3040{
fdee117e 3041 int ret = 0;
389d1b08 3042
fdee117e 3043 BUG_ON(!buffer_locked(bh));
389d1b08
AK
3044
3045 get_bh(bh);
3046 bh->b_end_io = end_buffer_read_sync;
fdee117e
ZY
3047 submit_bh(REQ_OP_READ | op_flags, bh);
3048 if (wait) {
3049 wait_on_buffer(bh);
3050 if (!buffer_uptodate(bh))
3051 ret = -EIO;
3052 }
3053 return ret;
3054}
3055EXPORT_SYMBOL(__bh_read);
3056
3057/**
3058 * __bh_read_batch - Submit read for a batch of unlocked buffers
3059 * @nr: entry number of the buffer batch
3060 * @bhs: a batch of struct buffer_head
3061 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3062 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3063 * buffer that cannot lock.
3064 *
3065 * Returns zero on success or don't wait, and -EIO on error.
3066 */
3067void __bh_read_batch(int nr, struct buffer_head *bhs[],
3068 blk_opf_t op_flags, bool force_lock)
3069{
3070 int i;
3071
3072 for (i = 0; i < nr; i++) {
3073 struct buffer_head *bh = bhs[i];
3074
3075 if (buffer_uptodate(bh))
3076 continue;
3077
3078 if (force_lock)
3079 lock_buffer(bh);
3080 else
3081 if (!trylock_buffer(bh))
3082 continue;
3083
3084 if (buffer_uptodate(bh)) {
3085 unlock_buffer(bh);
3086 continue;
3087 }
3088
3089 bh->b_end_io = end_buffer_read_sync;
3090 get_bh(bh);
3091 submit_bh(REQ_OP_READ | op_flags, bh);
3092 }
389d1b08 3093}
fdee117e 3094EXPORT_SYMBOL(__bh_read_batch);
389d1b08 3095
1da177e4
LT
3096void __init buffer_init(void)
3097{
43be594a 3098 unsigned long nrpages;
fc4d24c9 3099 int ret;
1da177e4 3100
b98938c3
CL
3101 bh_cachep = kmem_cache_create("buffer_head",
3102 sizeof(struct buffer_head), 0,
3103 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3104 SLAB_MEM_SPREAD),
019b4d12 3105 NULL);
1da177e4
LT
3106
3107 /*
3108 * Limit the bh occupancy to 10% of ZONE_NORMAL
3109 */
3110 nrpages = (nr_free_buffer_pages() * 10) / 100;
3111 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
fc4d24c9
SAS
3112 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3113 NULL, buffer_exit_cpu_dead);
3114 WARN_ON(ret < 0);
1da177e4 3115}