Merge tag 'perf-tools-fixes-for-v6.9-2024-04-19' of git://git.kernel.org/pub/scm...
[linux-2.6-block.git] / include / linux / buffer_head.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2/*
3 * include/linux/buffer_head.h
4 *
5 * Everything to do with buffer_heads.
6 */
7
8#ifndef _LINUX_BUFFER_HEAD_H
9#define _LINUX_BUFFER_HEAD_H
10
11#include <linux/types.h>
3ae72869 12#include <linux/blk_types.h>
1da177e4
LT
13#include <linux/fs.h>
14#include <linux/linkage.h>
15#include <linux/pagemap.h>
16#include <linux/wait.h>
60063497 17#include <linux/atomic.h>
1da177e4
LT
18
19enum bh_state_bits {
20 BH_Uptodate, /* Contains valid data */
21 BH_Dirty, /* Is dirty */
22 BH_Lock, /* Is locked */
23 BH_Req, /* Has been submitted for I/O */
24
25 BH_Mapped, /* Has a disk mapping */
26 BH_New, /* Disk mapping was newly created by get_block */
27 BH_Async_Read, /* Is under end_buffer_async_read I/O */
28 BH_Async_Write, /* Is under end_buffer_async_write I/O */
29 BH_Delay, /* Buffer is not yet allocated on disk */
30 BH_Boundary, /* Block is followed by a discontiguity */
31 BH_Write_EIO, /* I/O error on write */
33a266dd 32 BH_Unwritten, /* Buffer is allocated on disk but not written */
08bafc03 33 BH_Quiet, /* Buffer Error Prinks to be quiet */
877f962c
TT
34 BH_Meta, /* Buffer contains metadata */
35 BH_Prio, /* Buffer should be submitted with REQ_PRIO */
7b7a8665 36 BH_Defer_Completion, /* Defer AIO completion to workqueue */
1da177e4
LT
37
38 BH_PrivateStart,/* not a state bit, but the first bit available
39 * for private allocation by other entities
40 */
41};
42
09cbfeaf 43#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
1da177e4
LT
44
45struct page;
46struct buffer_head;
47struct address_space;
48typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
49
50/*
205f87f6
BP
51 * Historically, a buffer_head was used to map a single block
52 * within a page, and of course as the unit of I/O through the
53 * filesystem and block layers. Nowadays the basic I/O unit
54 * is the bio, and buffer_heads are used for extracting block
55 * mappings (via a get_block_t call), for tracking state within
56 * a page (via a page_mapping) and for wrapping bio submission
57 * for backward compatibility reasons (e.g. submit_bh).
1da177e4
LT
58 */
59struct buffer_head {
1da177e4
LT
60 unsigned long b_state; /* buffer state bitmap (see above) */
61 struct buffer_head *b_this_page;/* circular list of page's buffers */
d685c668
MWO
62 union {
63 struct page *b_page; /* the page this bh is mapped to */
64 struct folio *b_folio; /* the folio this bh is mapped to */
65 };
1da177e4 66
205f87f6
BP
67 sector_t b_blocknr; /* start block number */
68 size_t b_size; /* size of mapping */
69 char *b_data; /* pointer to data within the page */
1da177e4
LT
70
71 struct block_device *b_bdev;
72 bh_end_io_t *b_end_io; /* I/O completion */
73 void *b_private; /* reserved for b_end_io */
74 struct list_head b_assoc_buffers; /* associated with another mapping */
58ff407b
JK
75 struct address_space *b_assoc_map; /* mapping this buffer is
76 associated with */
205f87f6 77 atomic_t b_count; /* users using this buffer_head */
f1e67e35
TG
78 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
79 * serialise IO completion of other
80 * buffers in the page */
1da177e4
LT
81};
82
83/*
84 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
85 * and buffer_foo() functions.
60f91826
KW
86 * To avoid reset buffer flags that are already set, because that causes
87 * a costly cache line transition, check the flag first.
1da177e4
LT
88 */
89#define BUFFER_FNS(bit, name) \
ee91ef61 90static __always_inline void set_buffer_##name(struct buffer_head *bh) \
1da177e4 91{ \
60f91826
KW
92 if (!test_bit(BH_##bit, &(bh)->b_state)) \
93 set_bit(BH_##bit, &(bh)->b_state); \
1da177e4 94} \
ee91ef61 95static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
1da177e4
LT
96{ \
97 clear_bit(BH_##bit, &(bh)->b_state); \
98} \
ee91ef61 99static __always_inline int buffer_##name(const struct buffer_head *bh) \
1da177e4
LT
100{ \
101 return test_bit(BH_##bit, &(bh)->b_state); \
102}
103
104/*
105 * test_set_buffer_foo() and test_clear_buffer_foo()
106 */
107#define TAS_BUFFER_FNS(bit, name) \
ee91ef61 108static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
1da177e4
LT
109{ \
110 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
111} \
ee91ef61 112static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
1da177e4
LT
113{ \
114 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
115} \
116
117/*
118 * Emit the buffer bitops functions. Note that there are also functions
119 * of the form "mark_buffer_foo()". These are higher-level functions which
120 * do something in addition to setting a b_state bit.
121 */
1da177e4
LT
122BUFFER_FNS(Dirty, dirty)
123TAS_BUFFER_FNS(Dirty, dirty)
124BUFFER_FNS(Lock, locked)
1da177e4
LT
125BUFFER_FNS(Req, req)
126TAS_BUFFER_FNS(Req, req)
127BUFFER_FNS(Mapped, mapped)
128BUFFER_FNS(New, new)
129BUFFER_FNS(Async_Read, async_read)
130BUFFER_FNS(Async_Write, async_write)
131BUFFER_FNS(Delay, delay)
132BUFFER_FNS(Boundary, boundary)
133BUFFER_FNS(Write_EIO, write_io_error)
33a266dd 134BUFFER_FNS(Unwritten, unwritten)
877f962c
TT
135BUFFER_FNS(Meta, meta)
136BUFFER_FNS(Prio, prio)
7b7a8665 137BUFFER_FNS(Defer_Completion, defer_completion)
1da177e4 138
d4252071
MP
139static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
140{
2f79cdfe
LT
141 /*
142 * If somebody else already set this uptodate, they will
143 * have done the memory barrier, and a reader will thus
144 * see *some* valid buffer state.
145 *
146 * Any other serialization (with IO errors or whatever that
147 * might clear the bit) has to come from other state (eg BH_Lock).
148 */
149 if (test_bit(BH_Uptodate, &bh->b_state))
150 return;
151
d4252071
MP
152 /*
153 * make it consistent with folio_mark_uptodate
154 * pairs with smp_load_acquire in buffer_uptodate
155 */
156 smp_mb__before_atomic();
157 set_bit(BH_Uptodate, &bh->b_state);
158}
159
160static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
161{
162 clear_bit(BH_Uptodate, &bh->b_state);
163}
164
165static __always_inline int buffer_uptodate(const struct buffer_head *bh)
166{
167 /*
168 * make it consistent with folio_test_uptodate
169 * pairs with smp_mb__before_atomic in set_buffer_uptodate
170 */
8238b457 171 return test_bit_acquire(BH_Uptodate, &bh->b_state);
d4252071
MP
172}
173
f94cf220
MWO
174static inline unsigned long bh_offset(const struct buffer_head *bh)
175{
176 return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
177}
1da177e4
LT
178
179/* If we *know* page->private refers to buffer_heads */
180#define page_buffers(page) \
181 ({ \
4c21e2f2
HD
182 BUG_ON(!PagePrivate(page)); \
183 ((struct buffer_head *)page_private(page)); \
1da177e4
LT
184 })
185#define page_has_buffers(page) PagePrivate(page)
cd1067be 186#define folio_buffers(folio) folio_get_private(folio)
1da177e4 187
520f301c 188void buffer_check_dirty_writeback(struct folio *folio,
b4597226
MG
189 bool *dirty, bool *writeback);
190
1da177e4
LT
191/*
192 * Declarations
193 */
194
b3c97528 195void mark_buffer_dirty(struct buffer_head *bh);
87354e5d 196void mark_buffer_write_io_error(struct buffer_head *bh);
f0059afd 197void touch_buffer(struct buffer_head *bh);
465e5e6a
PR
198void folio_set_bh(struct buffer_head *bh, struct folio *folio,
199 unsigned long offset);
c71124a8 200struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
2a418157 201 gfp_t gfp);
1da177e4 202struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
640ab98f 203 bool retry);
0a88810d 204struct buffer_head *create_empty_buffers(struct folio *folio,
3decb856 205 unsigned long blocksize, unsigned long b_state);
1da177e4
LT
206void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
207void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
1da177e4
LT
208
209/* Things to do with buffers at mapping->private_list */
210void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
31b2ebc0
RHI
211int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
212 bool datasync);
213int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
214 bool datasync);
29f3ad7d
JK
215void clean_bdev_aliases(struct block_device *bdev, sector_t block,
216 sector_t len);
e64855c6
JK
217static inline void clean_bdev_bh_alias(struct buffer_head *bh)
218{
219 clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
220}
1da177e4
LT
221
222void mark_buffer_async_write(struct buffer_head *bh);
1da177e4
LT
223void __wait_on_buffer(struct buffer_head *);
224wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
3991d3bd
TK
225struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
226 unsigned size);
3ed65f04
MWO
227struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
228 unsigned size, gfp_t gfp);
1da177e4
LT
229void __brelse(struct buffer_head *);
230void __bforget(struct buffer_head *);
3991d3bd 231void __breadahead(struct block_device *, sector_t block, unsigned int size);
3b5e6454
GK
232struct buffer_head *__bread_gfp(struct block_device *,
233 sector_t block, unsigned size, gfp_t gfp);
dd0fc66f 234struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
1da177e4 235void free_buffer_head(struct buffer_head * bh);
b3c97528
HH
236void unlock_buffer(struct buffer_head *bh);
237void __lock_buffer(struct buffer_head *bh);
1da177e4 238int sync_dirty_buffer(struct buffer_head *bh);
3ae72869
BVA
239int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
240void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
5bdf402a 241void submit_bh(blk_opf_t, struct buffer_head *);
1da177e4
LT
242void write_boundary_block(struct block_device *bdev,
243 sector_t bblock, unsigned blocksize);
389d1b08 244int bh_uptodate_or_lock(struct buffer_head *bh);
fdee117e
ZY
245int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
246void __bh_read_batch(int nr, struct buffer_head *bhs[],
247 blk_opf_t op_flags, bool force_lock);
1da177e4 248
1da177e4
LT
249/*
250 * Generic address_space_operations implementations for buffer_head-backed
251 * address_spaces.
252 */
7ba13abb 253void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
17bf23a9
MWO
254int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
255 void *get_block);
53418a18 256int __block_write_full_folio(struct inode *inode, struct folio *folio,
14059f66 257 get_block_t *get_block, struct writeback_control *wbc);
2c69e205 258int block_read_full_folio(struct folio *, get_block_t *);
2e7e80f7 259bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
155130a4 260int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
b3992d1e 261 struct page **pagep, get_block_t *get_block);
6e1db88d
CH
262int __block_write_begin(struct page *page, loff_t pos, unsigned len,
263 get_block_t *get_block);
afddba49
NP
264int block_write_end(struct file *, struct address_space *,
265 loff_t, unsigned, unsigned,
266 struct page *, void *);
267int generic_write_end(struct file *, struct address_space *,
268 loff_t, unsigned, unsigned,
269 struct page *, void *);
4a9622f2 270void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
89e10787 271int cont_write_begin(struct file *, struct address_space *, loff_t,
be3bbbc5 272 unsigned, struct page **, void **,
89e10787 273 get_block_t *, loff_t *);
05eb0b51 274int generic_cont_expand_simple(struct inode *inode, loff_t size);
a524fcfe 275void block_commit_write(struct page *page, unsigned int from, unsigned int to);
c2ec175c 276int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
54171690 277 get_block_t get_block);
1da177e4 278sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
1da177e4 279int block_truncate_page(struct address_space *, loff_t, get_block_t *);
1da177e4 280
67235182
MWO
281#ifdef CONFIG_MIGRATION
282extern int buffer_migrate_folio(struct address_space *,
283 struct folio *dst, struct folio *src, enum migrate_mode);
284extern int buffer_migrate_folio_norefs(struct address_space *,
285 struct folio *dst, struct folio *src, enum migrate_mode);
286#else
287#define buffer_migrate_folio NULL
288#define buffer_migrate_folio_norefs NULL
289#endif
1da177e4 290
1da177e4
LT
291/*
292 * inline definitions
293 */
294
1da177e4
LT
295static inline void get_bh(struct buffer_head *bh)
296{
297 atomic_inc(&bh->b_count);
298}
299
300static inline void put_bh(struct buffer_head *bh)
301{
4e857c58 302 smp_mb__before_atomic();
1da177e4
LT
303 atomic_dec(&bh->b_count);
304}
305
306static inline void brelse(struct buffer_head *bh)
307{
308 if (bh)
309 __brelse(bh);
310}
311
312static inline void bforget(struct buffer_head *bh)
313{
314 if (bh)
315 __bforget(bh);
316}
317
318static inline struct buffer_head *
319sb_bread(struct super_block *sb, sector_t block)
320{
3b5e6454
GK
321 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
322}
323
324static inline struct buffer_head *
325sb_bread_unmovable(struct super_block *sb, sector_t block)
326{
327 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
1da177e4
LT
328}
329
330static inline void
331sb_breadahead(struct super_block *sb, sector_t block)
332{
333 __breadahead(sb->s_bdev, block, sb->s_blocksize);
334}
335
c645e65c
MWO
336static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
337 sector_t block, unsigned size)
338{
339 gfp_t gfp;
340
341 gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
342 gfp |= __GFP_NOFAIL;
343
344 return bdev_getblk(bdev, block, size, gfp);
345}
346
347static inline struct buffer_head *__getblk(struct block_device *bdev,
348 sector_t block, unsigned size)
349{
350 gfp_t gfp;
351
352 gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
353 gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
354
355 return bdev_getblk(bdev, block, size, gfp);
356}
357
4b9c8b19
MWO
358static inline struct buffer_head *sb_getblk(struct super_block *sb,
359 sector_t block)
1da177e4 360{
4b9c8b19 361 return __getblk(sb->s_bdev, block, sb->s_blocksize);
1da177e4
LT
362}
363
8a83ac54
MWO
364static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb,
365 sector_t block, gfp_t gfp)
bd7ade3c 366{
8a83ac54 367 return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp);
bd7ade3c
NB
368}
369
1da177e4
LT
370static inline struct buffer_head *
371sb_find_get_block(struct super_block *sb, sector_t block)
372{
373 return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
374}
375
376static inline void
377map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
378{
379 set_buffer_mapped(bh);
380 bh->b_bdev = sb->s_bdev;
381 bh->b_blocknr = block;
b0cf2321 382 bh->b_size = sb->s_blocksize;
1da177e4
LT
383}
384
1da177e4
LT
385static inline void wait_on_buffer(struct buffer_head *bh)
386{
387 might_sleep();
a9877cc2 388 if (buffer_locked(bh))
1da177e4
LT
389 __wait_on_buffer(bh);
390}
391
ca5de404
NP
392static inline int trylock_buffer(struct buffer_head *bh)
393{
51b07fc3 394 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
ca5de404
NP
395}
396
1da177e4
LT
397static inline void lock_buffer(struct buffer_head *bh)
398{
399 might_sleep();
ca5de404 400 if (!trylock_buffer(bh))
1da177e4
LT
401 __lock_buffer(bh);
402}
403
fdee117e
ZY
404static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
405{
406 if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
407 if (!buffer_uptodate(bh))
408 __bh_read(bh, op_flags, false);
409 else
410 unlock_buffer(bh);
411 }
412}
413
414static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
415{
416 if (!bh_uptodate_or_lock(bh))
417 __bh_read(bh, op_flags, false);
418}
419
420/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
421static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
422{
423 if (bh_uptodate_or_lock(bh))
424 return 1;
425 return __bh_read(bh, op_flags, true);
426}
427
428static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
429{
430 __bh_read_batch(nr, bhs, 0, true);
431}
432
433static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
434 blk_opf_t op_flags)
435{
436 __bh_read_batch(nr, bhs, op_flags, false);
437}
438
3b5e6454
GK
439/**
440 * __bread() - reads a specified block and returns the bh
441 * @bdev: the block_device to read from
442 * @block: number of block
443 * @size: size (in bytes) to read
444 *
445 * Reads a specified block, and returns buffer head that contains it.
446 * The page cache is allocated from movable area so that it can be migrated.
447 * It returns NULL if the block was unreadable.
448 */
449static inline struct buffer_head *
450__bread(struct block_device *bdev, sector_t block, unsigned size)
451{
452 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
453}
454
0217fbb0
MWO
455/**
456 * get_nth_bh - Get a reference on the n'th buffer after this one.
457 * @bh: The buffer to start counting from.
458 * @count: How many buffers to skip.
459 *
460 * This is primarily useful for finding the nth buffer in a folio; in
461 * that case you pass the head buffer and the byte offset in the folio
462 * divided by the block size. It can be used for other purposes, but
463 * it will wrap at the end of the folio rather than returning NULL or
464 * proceeding to the next folio for you.
465 *
466 * Return: The requested buffer with an elevated refcount.
467 */
468static inline __must_check
469struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count)
470{
471 while (count--)
472 bh = bh->b_this_page;
473 get_bh(bh);
474 return bh;
475}
476
e621900a 477bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
9361401e 478
925c86a1
CH
479#ifdef CONFIG_BUFFER_HEAD
480
481void buffer_init(void);
482bool try_to_free_buffers(struct folio *folio);
483int inode_has_buffers(struct inode *inode);
484void invalidate_inode_buffers(struct inode *inode);
485int remove_inode_buffers(struct inode *inode);
486int sync_mapping_buffers(struct address_space *mapping);
487void invalidate_bh_lrus(void);
488void invalidate_bh_lrus_cpu(void);
489bool has_bh_in_lru(int cpu, void *dummy);
490extern int buffer_heads_over_limit;
491
492#else /* CONFIG_BUFFER_HEAD */
9361401e
DH
493
494static inline void buffer_init(void) {}
68189fef 495static inline bool try_to_free_buffers(struct folio *folio) { return true; }
9361401e
DH
496static inline int inode_has_buffers(struct inode *inode) { return 0; }
497static inline void invalidate_inode_buffers(struct inode *inode) {}
498static inline int remove_inode_buffers(struct inode *inode) { return 1; }
499static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
925c86a1 500static inline void invalidate_bh_lrus(void) {}
243418e3 501static inline void invalidate_bh_lrus_cpu(void) {}
6de522d1 502static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
d2de7ea4 503#define buffer_heads_over_limit 0
9361401e 504
925c86a1 505#endif /* CONFIG_BUFFER_HEAD */
1da177e4 506#endif /* _LINUX_BUFFER_HEAD_H */