1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
7 * Buffer cache handling
9 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
13 #include <linux/types.h>
14 #include <linux/highmem.h>
15 #include <linux/bio.h>
17 #include <cluster/masklog.h>
25 #include "buffer_head_io.h"
26 #include "ocfs2_trace.h"
29 * Bits on bh->b_state used by ocfs2.
31 * These MUST be after the JBD2 bits. Hence, we use BH_JBDPrivateStart.
33 enum ocfs2_state_bits {
34 BH_NeedsValidate = BH_JBDPrivateStart,
37 /* Expand the magic b_state functions */
38 BUFFER_FNS(NeedsValidate, needs_validate);
40 int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
41 struct ocfs2_caching_info *ci)
45 trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
47 BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
48 BUG_ON(buffer_jbd(bh));
50 /* No need to check for a soft readonly file system here. non
51 * journalled writes are only ever done on system files which
52 * can get modified during recovery even if read-only. */
53 if (ocfs2_is_hard_readonly(osb)) {
59 ocfs2_metadata_cache_io_lock(ci);
62 set_buffer_uptodate(bh);
64 /* remove from dirty list before I/O. */
65 clear_buffer_dirty(bh);
67 get_bh(bh); /* for end_buffer_write_sync() */
68 bh->b_end_io = end_buffer_write_sync;
69 submit_bh(REQ_OP_WRITE, 0, bh);
73 if (buffer_uptodate(bh)) {
74 ocfs2_set_buffer_uptodate(ci, bh);
76 /* We don't need to remove the clustered uptodate
77 * information for this bh as it's not marked locally
83 ocfs2_metadata_cache_io_unlock(ci);
88 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
89 * will be easier to handle read failure.
91 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
92 unsigned int nr, struct buffer_head *bhs[])
96 struct buffer_head *bh;
99 trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
104 /* Don't put buffer head and re-assign it to NULL if it is allocated
105 * outside since the caller can't be aware of this alternation!
107 new_bh = (bhs[0] == NULL);
109 for (i = 0 ; i < nr ; i++) {
110 if (bhs[i] == NULL) {
111 bhs[i] = sb_getblk(osb->sb, block++);
112 if (bhs[i] == NULL) {
120 if (buffer_jbd(bh)) {
121 trace_ocfs2_read_blocks_sync_jbd(
122 (unsigned long long)bh->b_blocknr);
126 if (buffer_dirty(bh)) {
127 /* This should probably be a BUG, or
128 * at least return an error. */
130 "trying to sync read a dirty "
131 "buffer! (blocknr = %llu), skipping\n",
132 (unsigned long long)bh->b_blocknr);
137 if (buffer_jbd(bh)) {
138 #ifdef CATCH_BH_JBD_RACES
140 "block %llu had the JBD bit set "
141 "while I was in lock_buffer!",
142 (unsigned long long)bh->b_blocknr);
150 get_bh(bh); /* for end_buffer_read_sync() */
151 bh->b_end_io = end_buffer_read_sync;
152 submit_bh(REQ_OP_READ, 0, bh);
156 for (i = nr; i > 0; i--) {
159 if (unlikely(status)) {
161 /* If middle bh fails, let previous bh
162 * finish its read and then put it to
169 } else if (bh && buffer_uptodate(bh)) {
170 clear_buffer_uptodate(bh);
175 /* No need to wait on the buffer if it's managed by JBD. */
179 if (!buffer_uptodate(bh)) {
180 /* Status won't be cleared from here on out,
181 * so we can safely record this and loop back
182 * to cleanup the other buffers. */
192 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
193 * will be easier to handle read failure.
195 int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
196 struct buffer_head *bhs[], int flags,
197 int (*validate)(struct super_block *sb,
198 struct buffer_head *bh))
201 int i, ignore_cache = 0;
202 struct buffer_head *bh;
203 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
206 trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
209 BUG_ON((flags & OCFS2_BH_READAHEAD) &&
210 (flags & OCFS2_BH_IGNORE_CACHE));
219 mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
230 /* Don't put buffer head and re-assign it to NULL if it is allocated
231 * outside since the caller can't be aware of this alternation!
233 new_bh = (bhs[0] == NULL);
235 ocfs2_metadata_cache_io_lock(ci);
236 for (i = 0 ; i < nr ; i++) {
237 if (bhs[i] == NULL) {
238 bhs[i] = sb_getblk(sb, block++);
239 if (bhs[i] == NULL) {
240 ocfs2_metadata_cache_io_unlock(ci);
243 /* Don't forget to put previous bh! */
248 ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
250 /* There are three read-ahead cases here which we need to
251 * be concerned with. All three assume a buffer has
252 * previously been submitted with OCFS2_BH_READAHEAD
253 * and it hasn't yet completed I/O.
255 * 1) The current request is sync to disk. This rarely
256 * happens these days, and never when performance
257 * matters - the code can just wait on the buffer
258 * lock and re-submit.
260 * 2) The current request is cached, but not
261 * readahead. ocfs2_buffer_uptodate() will return
262 * false anyway, so we'll wind up waiting on the
263 * buffer lock to do I/O. We re-check the request
264 * with after getting the lock to avoid a re-submit.
266 * 3) The current request is readahead (and so must
267 * also be a caching one). We short circuit if the
268 * buffer is locked (under I/O) and if it's in the
269 * uptodate cache. The re-check from #2 catches the
270 * case that the previous read-ahead completes just
271 * before our is-it-in-flight check.
274 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
275 trace_ocfs2_read_blocks_from_disk(
276 (unsigned long long)bh->b_blocknr,
277 (unsigned long long)ocfs2_metadata_cache_owner(ci));
278 /* We're using ignore_cache here to say
283 trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
284 ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
286 if (buffer_jbd(bh)) {
291 if (buffer_dirty(bh)) {
292 /* This should probably be a BUG, or
293 * at least return an error. */
297 /* A read-ahead request was made - if the
298 * buffer is already under read-ahead from a
299 * previously submitted request than we are
301 if ((flags & OCFS2_BH_READAHEAD)
302 && ocfs2_buffer_read_ahead(ci, bh))
306 if (buffer_jbd(bh)) {
307 #ifdef CATCH_BH_JBD_RACES
308 mlog(ML_ERROR, "block %llu had the JBD bit set "
309 "while I was in lock_buffer!",
310 (unsigned long long)bh->b_blocknr);
318 /* Re-check ocfs2_buffer_uptodate() as a
319 * previously read-ahead buffer may have
320 * completed I/O while we were waiting for the
322 if (!(flags & OCFS2_BH_IGNORE_CACHE)
323 && !(flags & OCFS2_BH_READAHEAD)
324 && ocfs2_buffer_uptodate(ci, bh)) {
329 get_bh(bh); /* for end_buffer_read_sync() */
331 set_buffer_needs_validate(bh);
332 bh->b_end_io = end_buffer_read_sync;
333 submit_bh(REQ_OP_READ, 0, bh);
339 for (i = (nr - 1); i >= 0; i--) {
342 if (!(flags & OCFS2_BH_READAHEAD)) {
343 if (unlikely(status)) {
344 /* Clear the buffers on error including those
345 * ever succeeded in reading
348 /* If middle bh fails, let previous bh
349 * finish its read and then put it to
356 } else if (bh && buffer_uptodate(bh)) {
357 clear_buffer_uptodate(bh);
361 /* We know this can't have changed as we hold the
362 * owner sem. Avoid doing any work on the bh if the
367 if (!buffer_uptodate(bh)) {
368 /* Status won't be cleared from here on out,
369 * so we can safely record this and loop back
370 * to cleanup the other buffers. Don't need to
371 * remove the clustered uptodate information
372 * for this bh as it's not marked locally
375 clear_buffer_needs_validate(bh);
379 if (buffer_needs_validate(bh)) {
380 /* We never set NeedsValidate if the
381 * buffer was held by the journal, so
382 * that better not have changed */
383 BUG_ON(buffer_jbd(bh));
384 clear_buffer_needs_validate(bh);
385 status = validate(sb, bh);
391 /* Always set the buffer in the cache, even if it was
392 * a forced read, or read-ahead which hasn't yet
394 ocfs2_set_buffer_uptodate(ci, bh);
396 ocfs2_metadata_cache_io_unlock(ci);
398 trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
399 flags, ignore_cache);
406 /* Check whether the blkno is the super block or one of the backups. */
407 static void ocfs2_check_super_or_backup(struct super_block *sb,
413 if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
416 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
417 backup_blkno = ocfs2_backup_super_blkno(sb, i);
418 if (backup_blkno == blkno)
426 * Write super block and backups doesn't need to collaborate with journal,
427 * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
428 * into this function.
430 int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
431 struct buffer_head *bh)
434 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
436 BUG_ON(buffer_jbd(bh));
437 ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
439 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
446 set_buffer_uptodate(bh);
448 /* remove from dirty list before I/O. */
449 clear_buffer_dirty(bh);
451 get_bh(bh); /* for end_buffer_write_sync() */
452 bh->b_end_io = end_buffer_write_sync;
453 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
454 submit_bh(REQ_OP_WRITE, 0, bh);
458 if (!buffer_uptodate(bh)) {