Commit | Line | Data |
---|---|---|
328970de | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
fa60ce2c | 2 | /* |
ccd979bd MF |
3 | * io.c |
4 | * | |
5 | * Buffer cache handling | |
6 | * | |
7 | * Copyright (C) 2002, 2004 Oracle. All rights reserved. | |
ccd979bd MF |
8 | */ |
9 | ||
10 | #include <linux/fs.h> | |
11 | #include <linux/types.h> | |
ccd979bd | 12 | #include <linux/highmem.h> |
2f8b5444 | 13 | #include <linux/bio.h> |
ccd979bd MF |
14 | |
15 | #include <cluster/masklog.h> | |
16 | ||
17 | #include "ocfs2.h" | |
18 | ||
19 | #include "alloc.h" | |
20 | #include "inode.h" | |
21 | #include "journal.h" | |
22 | #include "uptodate.h" | |
ccd979bd | 23 | #include "buffer_head_io.h" |
15057e98 | 24 | #include "ocfs2_trace.h" |
ccd979bd | 25 | |
970e4936 JB |
26 | /* |
27 | * Bits on bh->b_state used by ocfs2. | |
28 | * | |
b86c86fa | 29 | * These MUST be after the JBD2 bits. Hence, we use BH_JBDPrivateStart. |
970e4936 JB |
30 | */ |
31 | enum ocfs2_state_bits { | |
b86c86fa | 32 | BH_NeedsValidate = BH_JBDPrivateStart, |
970e4936 JB |
33 | }; |
34 | ||
35 | /* Expand the magic b_state functions */ | |
36 | BUFFER_FNS(NeedsValidate, needs_validate); | |
37 | ||
ccd979bd | 38 | int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, |
8cb471e8 | 39 | struct ocfs2_caching_info *ci) |
ccd979bd MF |
40 | { |
41 | int ret = 0; | |
42 | ||
15057e98 | 43 | trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci); |
ccd979bd MF |
44 | |
45 | BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); | |
46 | BUG_ON(buffer_jbd(bh)); | |
47 | ||
48 | /* No need to check for a soft readonly file system here. non | |
49 | * journalled writes are only ever done on system files which | |
50 | * can get modified during recovery even if read-only. */ | |
51 | if (ocfs2_is_hard_readonly(osb)) { | |
52 | ret = -EROFS; | |
c1e8d35e | 53 | mlog_errno(ret); |
ccd979bd MF |
54 | goto out; |
55 | } | |
56 | ||
8cb471e8 | 57 | ocfs2_metadata_cache_io_lock(ci); |
ccd979bd MF |
58 | |
59 | lock_buffer(bh); | |
60 | set_buffer_uptodate(bh); | |
61 | ||
62 | /* remove from dirty list before I/O. */ | |
63 | clear_buffer_dirty(bh); | |
64 | ||
da1e9098 | 65 | get_bh(bh); /* for end_buffer_write_sync() */ |
ccd979bd | 66 | bh->b_end_io = end_buffer_write_sync; |
2a222ca9 | 67 | submit_bh(REQ_OP_WRITE, 0, bh); |
ccd979bd MF |
68 | |
69 | wait_on_buffer(bh); | |
70 | ||
71 | if (buffer_uptodate(bh)) { | |
8cb471e8 | 72 | ocfs2_set_buffer_uptodate(ci, bh); |
ccd979bd MF |
73 | } else { |
74 | /* We don't need to remove the clustered uptodate | |
75 | * information for this bh as it's not marked locally | |
76 | * uptodate. */ | |
77 | ret = -EIO; | |
c1e8d35e | 78 | mlog_errno(ret); |
ccd979bd MF |
79 | } |
80 | ||
8cb471e8 | 81 | ocfs2_metadata_cache_io_unlock(ci); |
ccd979bd | 82 | out: |
ccd979bd MF |
83 | return ret; |
84 | } | |
85 | ||
cf76c785 CG |
86 | /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it |
87 | * will be easier to handle read failure. | |
88 | */ | |
da1e9098 JB |
89 | int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, |
90 | unsigned int nr, struct buffer_head *bhs[]) | |
91 | { | |
92 | int status = 0; | |
93 | unsigned int i; | |
94 | struct buffer_head *bh; | |
cf76c785 | 95 | int new_bh = 0; |
da1e9098 | 96 | |
15057e98 TM |
97 | trace_ocfs2_read_blocks_sync((unsigned long long)block, nr); |
98 | ||
99 | if (!nr) | |
da1e9098 | 100 | goto bail; |
da1e9098 | 101 | |
cf76c785 CG |
102 | /* Don't put buffer head and re-assign it to NULL if it is allocated |
103 | * outside since the caller can't be aware of this alternation! | |
104 | */ | |
105 | new_bh = (bhs[0] == NULL); | |
106 | ||
da1e9098 JB |
107 | for (i = 0 ; i < nr ; i++) { |
108 | if (bhs[i] == NULL) { | |
109 | bhs[i] = sb_getblk(osb->sb, block++); | |
110 | if (bhs[i] == NULL) { | |
7391a294 | 111 | status = -ENOMEM; |
da1e9098 | 112 | mlog_errno(status); |
cf76c785 | 113 | break; |
da1e9098 JB |
114 | } |
115 | } | |
116 | bh = bhs[i]; | |
117 | ||
118 | if (buffer_jbd(bh)) { | |
15057e98 TM |
119 | trace_ocfs2_read_blocks_sync_jbd( |
120 | (unsigned long long)bh->b_blocknr); | |
da1e9098 JB |
121 | continue; |
122 | } | |
123 | ||
124 | if (buffer_dirty(bh)) { | |
125 | /* This should probably be a BUG, or | |
126 | * at least return an error. */ | |
127 | mlog(ML_ERROR, | |
128 | "trying to sync read a dirty " | |
129 | "buffer! (blocknr = %llu), skipping\n", | |
130 | (unsigned long long)bh->b_blocknr); | |
131 | continue; | |
132 | } | |
133 | ||
134 | lock_buffer(bh); | |
135 | if (buffer_jbd(bh)) { | |
7186ee06 | 136 | #ifdef CATCH_BH_JBD_RACES |
da1e9098 JB |
137 | mlog(ML_ERROR, |
138 | "block %llu had the JBD bit set " | |
139 | "while I was in lock_buffer!", | |
140 | (unsigned long long)bh->b_blocknr); | |
141 | BUG(); | |
7186ee06 GH |
142 | #else |
143 | unlock_buffer(bh); | |
144 | continue; | |
145 | #endif | |
da1e9098 JB |
146 | } |
147 | ||
da1e9098 JB |
148 | get_bh(bh); /* for end_buffer_read_sync() */ |
149 | bh->b_end_io = end_buffer_read_sync; | |
2a222ca9 | 150 | submit_bh(REQ_OP_READ, 0, bh); |
da1e9098 JB |
151 | } |
152 | ||
cf76c785 | 153 | read_failure: |
da1e9098 JB |
154 | for (i = nr; i > 0; i--) { |
155 | bh = bhs[i - 1]; | |
156 | ||
cf76c785 CG |
157 | if (unlikely(status)) { |
158 | if (new_bh && bh) { | |
159 | /* If middle bh fails, let previous bh | |
160 | * finish its read and then put it to | |
161 | * aovoid bh leak | |
162 | */ | |
163 | if (!buffer_jbd(bh)) | |
164 | wait_on_buffer(bh); | |
165 | put_bh(bh); | |
166 | bhs[i - 1] = NULL; | |
167 | } else if (bh && buffer_uptodate(bh)) { | |
168 | clear_buffer_uptodate(bh); | |
169 | } | |
170 | continue; | |
171 | } | |
172 | ||
d6b58f89 MF |
173 | /* No need to wait on the buffer if it's managed by JBD. */ |
174 | if (!buffer_jbd(bh)) | |
175 | wait_on_buffer(bh); | |
da1e9098 | 176 | |
da1e9098 JB |
177 | if (!buffer_uptodate(bh)) { |
178 | /* Status won't be cleared from here on out, | |
179 | * so we can safely record this and loop back | |
180 | * to cleanup the other buffers. */ | |
181 | status = -EIO; | |
cf76c785 | 182 | goto read_failure; |
da1e9098 JB |
183 | } |
184 | } | |
185 | ||
186 | bail: | |
187 | return status; | |
188 | } | |
189 | ||
cf76c785 CG |
190 | /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it |
191 | * will be easier to handle read failure. | |
192 | */ | |
8cb471e8 | 193 | int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, |
970e4936 JB |
194 | struct buffer_head *bhs[], int flags, |
195 | int (*validate)(struct super_block *sb, | |
196 | struct buffer_head *bh)) | |
ccd979bd MF |
197 | { |
198 | int status = 0; | |
ccd979bd MF |
199 | int i, ignore_cache = 0; |
200 | struct buffer_head *bh; | |
8cb471e8 | 201 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); |
cf76c785 | 202 | int new_bh = 0; |
ccd979bd | 203 | |
15057e98 | 204 | trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags); |
ccd979bd | 205 | |
8cb471e8 | 206 | BUG_ON(!ci); |
d4a8c93c JB |
207 | BUG_ON((flags & OCFS2_BH_READAHEAD) && |
208 | (flags & OCFS2_BH_IGNORE_CACHE)); | |
aa958874 | 209 | |
31d33073 | 210 | if (bhs == NULL) { |
ccd979bd MF |
211 | status = -EINVAL; |
212 | mlog_errno(status); | |
213 | goto bail; | |
214 | } | |
215 | ||
216 | if (nr < 0) { | |
217 | mlog(ML_ERROR, "asked to read %d blocks!\n", nr); | |
218 | status = -EINVAL; | |
219 | mlog_errno(status); | |
220 | goto bail; | |
221 | } | |
222 | ||
223 | if (nr == 0) { | |
ccd979bd MF |
224 | status = 0; |
225 | goto bail; | |
226 | } | |
227 | ||
cf76c785 CG |
228 | /* Don't put buffer head and re-assign it to NULL if it is allocated |
229 | * outside since the caller can't be aware of this alternation! | |
230 | */ | |
231 | new_bh = (bhs[0] == NULL); | |
232 | ||
8cb471e8 | 233 | ocfs2_metadata_cache_io_lock(ci); |
ccd979bd MF |
234 | for (i = 0 ; i < nr ; i++) { |
235 | if (bhs[i] == NULL) { | |
8cb471e8 | 236 | bhs[i] = sb_getblk(sb, block++); |
ccd979bd | 237 | if (bhs[i] == NULL) { |
8cb471e8 | 238 | ocfs2_metadata_cache_io_unlock(ci); |
7391a294 | 239 | status = -ENOMEM; |
ccd979bd | 240 | mlog_errno(status); |
cf76c785 CG |
241 | /* Don't forget to put previous bh! */ |
242 | break; | |
ccd979bd MF |
243 | } |
244 | } | |
245 | bh = bhs[i]; | |
d4a8c93c | 246 | ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE); |
ccd979bd | 247 | |
aa958874 MF |
248 | /* There are three read-ahead cases here which we need to |
249 | * be concerned with. All three assume a buffer has | |
250 | * previously been submitted with OCFS2_BH_READAHEAD | |
251 | * and it hasn't yet completed I/O. | |
252 | * | |
253 | * 1) The current request is sync to disk. This rarely | |
254 | * happens these days, and never when performance | |
255 | * matters - the code can just wait on the buffer | |
256 | * lock and re-submit. | |
257 | * | |
258 | * 2) The current request is cached, but not | |
259 | * readahead. ocfs2_buffer_uptodate() will return | |
260 | * false anyway, so we'll wind up waiting on the | |
261 | * buffer lock to do I/O. We re-check the request | |
262 | * with after getting the lock to avoid a re-submit. | |
263 | * | |
264 | * 3) The current request is readahead (and so must | |
265 | * also be a caching one). We short circuit if the | |
266 | * buffer is locked (under I/O) and if it's in the | |
267 | * uptodate cache. The re-check from #2 catches the | |
268 | * case that the previous read-ahead completes just | |
269 | * before our is-it-in-flight check. | |
270 | */ | |
271 | ||
8cb471e8 | 272 | if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) { |
d701485a | 273 | trace_ocfs2_read_blocks_from_disk( |
ccd979bd | 274 | (unsigned long long)bh->b_blocknr, |
8cb471e8 | 275 | (unsigned long long)ocfs2_metadata_cache_owner(ci)); |
d4a8c93c JB |
276 | /* We're using ignore_cache here to say |
277 | * "go to disk" */ | |
ccd979bd MF |
278 | ignore_cache = 1; |
279 | } | |
280 | ||
15057e98 TM |
281 | trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr, |
282 | ignore_cache, buffer_jbd(bh), buffer_dirty(bh)); | |
283 | ||
ccd979bd | 284 | if (buffer_jbd(bh)) { |
ccd979bd MF |
285 | continue; |
286 | } | |
287 | ||
d4a8c93c | 288 | if (ignore_cache) { |
ccd979bd MF |
289 | if (buffer_dirty(bh)) { |
290 | /* This should probably be a BUG, or | |
291 | * at least return an error. */ | |
ccd979bd MF |
292 | continue; |
293 | } | |
294 | ||
aa958874 MF |
295 | /* A read-ahead request was made - if the |
296 | * buffer is already under read-ahead from a | |
297 | * previously submitted request than we are | |
298 | * done here. */ | |
299 | if ((flags & OCFS2_BH_READAHEAD) | |
8cb471e8 | 300 | && ocfs2_buffer_read_ahead(ci, bh)) |
aa958874 MF |
301 | continue; |
302 | ||
ccd979bd MF |
303 | lock_buffer(bh); |
304 | if (buffer_jbd(bh)) { | |
305 | #ifdef CATCH_BH_JBD_RACES | |
306 | mlog(ML_ERROR, "block %llu had the JBD bit set " | |
307 | "while I was in lock_buffer!", | |
308 | (unsigned long long)bh->b_blocknr); | |
309 | BUG(); | |
310 | #else | |
311 | unlock_buffer(bh); | |
312 | continue; | |
313 | #endif | |
314 | } | |
aa958874 MF |
315 | |
316 | /* Re-check ocfs2_buffer_uptodate() as a | |
317 | * previously read-ahead buffer may have | |
318 | * completed I/O while we were waiting for the | |
319 | * buffer lock. */ | |
d4a8c93c | 320 | if (!(flags & OCFS2_BH_IGNORE_CACHE) |
aa958874 | 321 | && !(flags & OCFS2_BH_READAHEAD) |
8cb471e8 | 322 | && ocfs2_buffer_uptodate(ci, bh)) { |
aa958874 MF |
323 | unlock_buffer(bh); |
324 | continue; | |
325 | } | |
326 | ||
ccd979bd | 327 | get_bh(bh); /* for end_buffer_read_sync() */ |
970e4936 JB |
328 | if (validate) |
329 | set_buffer_needs_validate(bh); | |
ccd979bd | 330 | bh->b_end_io = end_buffer_read_sync; |
2a222ca9 | 331 | submit_bh(REQ_OP_READ, 0, bh); |
ccd979bd MF |
332 | continue; |
333 | } | |
334 | } | |
335 | ||
cf76c785 | 336 | read_failure: |
ccd979bd MF |
337 | for (i = (nr - 1); i >= 0; i--) { |
338 | bh = bhs[i]; | |
339 | ||
aa958874 | 340 | if (!(flags & OCFS2_BH_READAHEAD)) { |
cf76c785 CG |
341 | if (unlikely(status)) { |
342 | /* Clear the buffers on error including those | |
343 | * ever succeeded in reading | |
344 | */ | |
345 | if (new_bh && bh) { | |
346 | /* If middle bh fails, let previous bh | |
347 | * finish its read and then put it to | |
348 | * aovoid bh leak | |
349 | */ | |
350 | if (!buffer_jbd(bh)) | |
351 | wait_on_buffer(bh); | |
352 | put_bh(bh); | |
353 | bhs[i] = NULL; | |
354 | } else if (bh && buffer_uptodate(bh)) { | |
355 | clear_buffer_uptodate(bh); | |
356 | } | |
34237681 GR |
357 | continue; |
358 | } | |
aa958874 | 359 | /* We know this can't have changed as we hold the |
8cb471e8 | 360 | * owner sem. Avoid doing any work on the bh if the |
aa958874 MF |
361 | * journal has it. */ |
362 | if (!buffer_jbd(bh)) | |
363 | wait_on_buffer(bh); | |
364 | ||
365 | if (!buffer_uptodate(bh)) { | |
366 | /* Status won't be cleared from here on out, | |
367 | * so we can safely record this and loop back | |
368 | * to cleanup the other buffers. Don't need to | |
369 | * remove the clustered uptodate information | |
370 | * for this bh as it's not marked locally | |
371 | * uptodate. */ | |
372 | status = -EIO; | |
234b69e3 | 373 | clear_buffer_needs_validate(bh); |
cf76c785 | 374 | goto read_failure; |
aa958874 | 375 | } |
970e4936 JB |
376 | |
377 | if (buffer_needs_validate(bh)) { | |
378 | /* We never set NeedsValidate if the | |
379 | * buffer was held by the journal, so | |
380 | * that better not have changed */ | |
381 | BUG_ON(buffer_jbd(bh)); | |
382 | clear_buffer_needs_validate(bh); | |
8cb471e8 | 383 | status = validate(sb, bh); |
cf76c785 CG |
384 | if (status) |
385 | goto read_failure; | |
970e4936 | 386 | } |
ccd979bd MF |
387 | } |
388 | ||
aa958874 MF |
389 | /* Always set the buffer in the cache, even if it was |
390 | * a forced read, or read-ahead which hasn't yet | |
391 | * completed. */ | |
8cb471e8 | 392 | ocfs2_set_buffer_uptodate(ci, bh); |
ccd979bd | 393 | } |
8cb471e8 | 394 | ocfs2_metadata_cache_io_unlock(ci); |
ccd979bd | 395 | |
15057e98 TM |
396 | trace_ocfs2_read_blocks_end((unsigned long long)block, nr, |
397 | flags, ignore_cache); | |
ccd979bd MF |
398 | |
399 | bail: | |
400 | ||
ccd979bd MF |
401 | return status; |
402 | } | |
d659072f TM |
403 | |
404 | /* Check whether the blkno is the super block or one of the backups. */ | |
405 | static void ocfs2_check_super_or_backup(struct super_block *sb, | |
406 | sector_t blkno) | |
407 | { | |
408 | int i; | |
409 | u64 backup_blkno; | |
410 | ||
411 | if (blkno == OCFS2_SUPER_BLOCK_BLKNO) | |
412 | return; | |
413 | ||
414 | for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) { | |
415 | backup_blkno = ocfs2_backup_super_blkno(sb, i); | |
416 | if (backup_blkno == blkno) | |
417 | return; | |
418 | } | |
419 | ||
420 | BUG(); | |
421 | } | |
422 | ||
423 | /* | |
424 | * Write super block and backups doesn't need to collaborate with journal, | |
8cb471e8 | 425 | * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed |
d659072f TM |
426 | * into this function. |
427 | */ | |
428 | int ocfs2_write_super_or_backup(struct ocfs2_super *osb, | |
429 | struct buffer_head *bh) | |
430 | { | |
431 | int ret = 0; | |
a42ab8e1 | 432 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; |
d659072f | 433 | |
d659072f TM |
434 | BUG_ON(buffer_jbd(bh)); |
435 | ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr); | |
436 | ||
437 | if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) { | |
438 | ret = -EROFS; | |
c1e8d35e | 439 | mlog_errno(ret); |
d659072f TM |
440 | goto out; |
441 | } | |
442 | ||
443 | lock_buffer(bh); | |
444 | set_buffer_uptodate(bh); | |
445 | ||
446 | /* remove from dirty list before I/O. */ | |
447 | clear_buffer_dirty(bh); | |
448 | ||
449 | get_bh(bh); /* for end_buffer_write_sync() */ | |
450 | bh->b_end_io = end_buffer_write_sync; | |
a42ab8e1 | 451 | ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check); |
2a222ca9 | 452 | submit_bh(REQ_OP_WRITE, 0, bh); |
d659072f TM |
453 | |
454 | wait_on_buffer(bh); | |
455 | ||
456 | if (!buffer_uptodate(bh)) { | |
457 | ret = -EIO; | |
c1e8d35e | 458 | mlog_errno(ret); |
d659072f TM |
459 | } |
460 | ||
461 | out: | |
d659072f TM |
462 | return ret; |
463 | } |