Commit | Line | Data |
---|---|---|
86ffa471 DW |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. | |
4 | * All Rights Reserved. | |
5 | */ | |
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
8 | #include "xfs_shared.h" | |
9 | #include "xfs_format.h" | |
10 | #include "xfs_log_format.h" | |
11 | #include "xfs_trans_resv.h" | |
12 | #include "xfs_bit.h" | |
13 | #include "xfs_mount.h" | |
14 | #include "xfs_trans.h" | |
15 | #include "xfs_buf_item.h" | |
16 | #include "xfs_trans_priv.h" | |
17 | #include "xfs_trace.h" | |
18 | #include "xfs_log.h" | |
19 | #include "xfs_log_priv.h" | |
20 | #include "xfs_log_recover.h" | |
1094d3f1 DW |
21 | #include "xfs_error.h" |
22 | #include "xfs_inode.h" | |
23 | #include "xfs_dir2.h" | |
24 | #include "xfs_quota.h" | |
86ffa471 | 25 | |
17d29bf2 DW |
26 | /* |
27 | * This structure is used during recovery to record the buf log items which | |
28 | * have been canceled and should not be replayed. | |
29 | */ | |
30 | struct xfs_buf_cancel { | |
31 | xfs_daddr_t bc_blkno; | |
32 | uint bc_len; | |
33 | int bc_refcount; | |
34 | struct list_head bc_list; | |
35 | }; | |
36 | ||
37 | static struct xfs_buf_cancel * | |
38 | xlog_find_buffer_cancelled( | |
39 | struct xlog *log, | |
40 | xfs_daddr_t blkno, | |
41 | uint len) | |
42 | { | |
43 | struct list_head *bucket; | |
44 | struct xfs_buf_cancel *bcp; | |
45 | ||
46 | if (!log->l_buf_cancel_table) | |
47 | return NULL; | |
48 | ||
49 | bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); | |
50 | list_for_each_entry(bcp, bucket, bc_list) { | |
51 | if (bcp->bc_blkno == blkno && bcp->bc_len == len) | |
52 | return bcp; | |
53 | } | |
54 | ||
55 | return NULL; | |
56 | } | |
57 | ||
58 | static bool | |
59 | xlog_add_buffer_cancelled( | |
60 | struct xlog *log, | |
61 | xfs_daddr_t blkno, | |
62 | uint len) | |
63 | { | |
64 | struct xfs_buf_cancel *bcp; | |
65 | ||
66 | /* | |
67 | * If we find an existing cancel record, this indicates that the buffer | |
68 | * was cancelled multiple times. To ensure that during pass 2 we keep | |
69 | * the record in the table until we reach its last occurrence in the | |
70 | * log, a reference count is kept to tell how many times we expect to | |
71 | * see this record during the second pass. | |
72 | */ | |
73 | bcp = xlog_find_buffer_cancelled(log, blkno, len); | |
74 | if (bcp) { | |
75 | bcp->bc_refcount++; | |
76 | return false; | |
77 | } | |
78 | ||
79 | bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0); | |
80 | bcp->bc_blkno = blkno; | |
81 | bcp->bc_len = len; | |
82 | bcp->bc_refcount = 1; | |
83 | list_add_tail(&bcp->bc_list, XLOG_BUF_CANCEL_BUCKET(log, blkno)); | |
84 | return true; | |
85 | } | |
86 | ||
87 | /* | |
88 | * Check if there is and entry for blkno, len in the buffer cancel record table. | |
89 | */ | |
90 | bool | |
91 | xlog_is_buffer_cancelled( | |
92 | struct xlog *log, | |
93 | xfs_daddr_t blkno, | |
94 | uint len) | |
95 | { | |
96 | return xlog_find_buffer_cancelled(log, blkno, len) != NULL; | |
97 | } | |
98 | ||
99 | /* | |
100 | * Check if there is and entry for blkno, len in the buffer cancel record table, | |
101 | * and decremented the reference count on it if there is one. | |
102 | * | |
103 | * Remove the cancel record once the refcount hits zero, so that if the same | |
104 | * buffer is re-used again after its last cancellation we actually replay the | |
105 | * changes made at that point. | |
106 | */ | |
107 | static bool | |
108 | xlog_put_buffer_cancelled( | |
109 | struct xlog *log, | |
110 | xfs_daddr_t blkno, | |
111 | uint len) | |
112 | { | |
113 | struct xfs_buf_cancel *bcp; | |
114 | ||
115 | bcp = xlog_find_buffer_cancelled(log, blkno, len); | |
116 | if (!bcp) { | |
117 | ASSERT(0); | |
118 | return false; | |
119 | } | |
120 | ||
121 | if (--bcp->bc_refcount == 0) { | |
122 | list_del(&bcp->bc_list); | |
123 | kmem_free(bcp); | |
124 | } | |
125 | return true; | |
126 | } | |
127 | ||
128 | /* log buffer item recovery */ | |
129 | ||
86ffa471 DW |
130 | /* |
131 | * Sort buffer items for log recovery. Most buffer items should end up on the | |
132 | * buffer list and are recovered first, with the following exceptions: | |
133 | * | |
134 | * 1. XFS_BLF_CANCEL buffers must be processed last because some log items | |
135 | * might depend on the incor ecancellation record, and replaying a cancelled | |
136 | * buffer item can remove the incore record. | |
137 | * | |
138 | * 2. XFS_BLF_INODE_BUF buffers are handled after most regular items so that | |
139 | * we replay di_next_unlinked only after flushing the inode 'free' state | |
140 | * to the inode buffer. | |
141 | * | |
142 | * See xlog_recover_reorder_trans for more details. | |
143 | */ | |
144 | STATIC enum xlog_recover_reorder | |
145 | xlog_recover_buf_reorder( | |
146 | struct xlog_recover_item *item) | |
147 | { | |
148 | struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; | |
149 | ||
150 | if (buf_f->blf_flags & XFS_BLF_CANCEL) | |
151 | return XLOG_REORDER_CANCEL_LIST; | |
152 | if (buf_f->blf_flags & XFS_BLF_INODE_BUF) | |
153 | return XLOG_REORDER_INODE_BUFFER_LIST; | |
154 | return XLOG_REORDER_BUFFER_LIST; | |
155 | } | |
156 | ||
8ea5682d DW |
157 | STATIC void |
158 | xlog_recover_buf_ra_pass2( | |
159 | struct xlog *log, | |
160 | struct xlog_recover_item *item) | |
161 | { | |
162 | struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; | |
163 | ||
164 | xlog_buf_readahead(log, buf_f->blf_blkno, buf_f->blf_len, NULL); | |
165 | } | |
166 | ||
3304a4fa DW |
167 | /* |
168 | * Build up the table of buf cancel records so that we don't replay cancelled | |
169 | * data in the second pass. | |
170 | */ | |
171 | static int | |
172 | xlog_recover_buf_commit_pass1( | |
173 | struct xlog *log, | |
174 | struct xlog_recover_item *item) | |
175 | { | |
176 | struct xfs_buf_log_format *bf = item->ri_buf[0].i_addr; | |
177 | ||
178 | if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) { | |
179 | xfs_err(log->l_mp, "bad buffer log item size (%d)", | |
180 | item->ri_buf[0].i_len); | |
181 | return -EFSCORRUPTED; | |
182 | } | |
183 | ||
184 | if (!(bf->blf_flags & XFS_BLF_CANCEL)) | |
185 | trace_xfs_log_recover_buf_not_cancel(log, bf); | |
186 | else if (xlog_add_buffer_cancelled(log, bf->blf_blkno, bf->blf_len)) | |
187 | trace_xfs_log_recover_buf_cancel_add(log, bf); | |
188 | else | |
189 | trace_xfs_log_recover_buf_cancel_ref_inc(log, bf); | |
190 | return 0; | |
191 | } | |
192 | ||
1094d3f1 DW |
193 | /* |
194 | * Validate the recovered buffer is of the correct type and attach the | |
195 | * appropriate buffer operations to them for writeback. Magic numbers are in a | |
196 | * few places: | |
197 | * the first 16 bits of the buffer (inode buffer, dquot buffer), | |
198 | * the first 32 bits of the buffer (most blocks), | |
199 | * inside a struct xfs_da_blkinfo at the start of the buffer. | |
200 | */ | |
201 | static void | |
202 | xlog_recover_validate_buf_type( | |
203 | struct xfs_mount *mp, | |
204 | struct xfs_buf *bp, | |
205 | struct xfs_buf_log_format *buf_f, | |
206 | xfs_lsn_t current_lsn) | |
207 | { | |
208 | struct xfs_da_blkinfo *info = bp->b_addr; | |
209 | uint32_t magic32; | |
210 | uint16_t magic16; | |
211 | uint16_t magicda; | |
212 | char *warnmsg = NULL; | |
213 | ||
214 | /* | |
215 | * We can only do post recovery validation on items on CRC enabled | |
216 | * fielsystems as we need to know when the buffer was written to be able | |
217 | * to determine if we should have replayed the item. If we replay old | |
218 | * metadata over a newer buffer, then it will enter a temporarily | |
219 | * inconsistent state resulting in verification failures. Hence for now | |
220 | * just avoid the verification stage for non-crc filesystems | |
221 | */ | |
38c26bfd | 222 | if (!xfs_has_crc(mp)) |
1094d3f1 DW |
223 | return; |
224 | ||
225 | magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); | |
226 | magic16 = be16_to_cpu(*(__be16*)bp->b_addr); | |
227 | magicda = be16_to_cpu(info->magic); | |
228 | switch (xfs_blft_from_flags(buf_f)) { | |
229 | case XFS_BLFT_BTREE_BUF: | |
230 | switch (magic32) { | |
231 | case XFS_ABTB_CRC_MAGIC: | |
232 | case XFS_ABTB_MAGIC: | |
233 | bp->b_ops = &xfs_bnobt_buf_ops; | |
234 | break; | |
235 | case XFS_ABTC_CRC_MAGIC: | |
236 | case XFS_ABTC_MAGIC: | |
237 | bp->b_ops = &xfs_cntbt_buf_ops; | |
238 | break; | |
239 | case XFS_IBT_CRC_MAGIC: | |
240 | case XFS_IBT_MAGIC: | |
241 | bp->b_ops = &xfs_inobt_buf_ops; | |
242 | break; | |
243 | case XFS_FIBT_CRC_MAGIC: | |
244 | case XFS_FIBT_MAGIC: | |
245 | bp->b_ops = &xfs_finobt_buf_ops; | |
246 | break; | |
247 | case XFS_BMAP_CRC_MAGIC: | |
248 | case XFS_BMAP_MAGIC: | |
249 | bp->b_ops = &xfs_bmbt_buf_ops; | |
250 | break; | |
251 | case XFS_RMAP_CRC_MAGIC: | |
252 | bp->b_ops = &xfs_rmapbt_buf_ops; | |
253 | break; | |
254 | case XFS_REFC_CRC_MAGIC: | |
255 | bp->b_ops = &xfs_refcountbt_buf_ops; | |
256 | break; | |
257 | default: | |
258 | warnmsg = "Bad btree block magic!"; | |
259 | break; | |
260 | } | |
261 | break; | |
262 | case XFS_BLFT_AGF_BUF: | |
263 | if (magic32 != XFS_AGF_MAGIC) { | |
264 | warnmsg = "Bad AGF block magic!"; | |
265 | break; | |
266 | } | |
267 | bp->b_ops = &xfs_agf_buf_ops; | |
268 | break; | |
269 | case XFS_BLFT_AGFL_BUF: | |
270 | if (magic32 != XFS_AGFL_MAGIC) { | |
271 | warnmsg = "Bad AGFL block magic!"; | |
272 | break; | |
273 | } | |
274 | bp->b_ops = &xfs_agfl_buf_ops; | |
275 | break; | |
276 | case XFS_BLFT_AGI_BUF: | |
277 | if (magic32 != XFS_AGI_MAGIC) { | |
278 | warnmsg = "Bad AGI block magic!"; | |
279 | break; | |
280 | } | |
281 | bp->b_ops = &xfs_agi_buf_ops; | |
282 | break; | |
283 | case XFS_BLFT_UDQUOT_BUF: | |
284 | case XFS_BLFT_PDQUOT_BUF: | |
285 | case XFS_BLFT_GDQUOT_BUF: | |
286 | #ifdef CONFIG_XFS_QUOTA | |
287 | if (magic16 != XFS_DQUOT_MAGIC) { | |
288 | warnmsg = "Bad DQUOT block magic!"; | |
289 | break; | |
290 | } | |
291 | bp->b_ops = &xfs_dquot_buf_ops; | |
292 | #else | |
293 | xfs_alert(mp, | |
294 | "Trying to recover dquots without QUOTA support built in!"); | |
295 | ASSERT(0); | |
296 | #endif | |
297 | break; | |
298 | case XFS_BLFT_DINO_BUF: | |
299 | if (magic16 != XFS_DINODE_MAGIC) { | |
300 | warnmsg = "Bad INODE block magic!"; | |
301 | break; | |
302 | } | |
303 | bp->b_ops = &xfs_inode_buf_ops; | |
304 | break; | |
305 | case XFS_BLFT_SYMLINK_BUF: | |
306 | if (magic32 != XFS_SYMLINK_MAGIC) { | |
307 | warnmsg = "Bad symlink block magic!"; | |
308 | break; | |
309 | } | |
310 | bp->b_ops = &xfs_symlink_buf_ops; | |
311 | break; | |
312 | case XFS_BLFT_DIR_BLOCK_BUF: | |
313 | if (magic32 != XFS_DIR2_BLOCK_MAGIC && | |
314 | magic32 != XFS_DIR3_BLOCK_MAGIC) { | |
315 | warnmsg = "Bad dir block magic!"; | |
316 | break; | |
317 | } | |
318 | bp->b_ops = &xfs_dir3_block_buf_ops; | |
319 | break; | |
320 | case XFS_BLFT_DIR_DATA_BUF: | |
321 | if (magic32 != XFS_DIR2_DATA_MAGIC && | |
322 | magic32 != XFS_DIR3_DATA_MAGIC) { | |
323 | warnmsg = "Bad dir data magic!"; | |
324 | break; | |
325 | } | |
326 | bp->b_ops = &xfs_dir3_data_buf_ops; | |
327 | break; | |
328 | case XFS_BLFT_DIR_FREE_BUF: | |
329 | if (magic32 != XFS_DIR2_FREE_MAGIC && | |
330 | magic32 != XFS_DIR3_FREE_MAGIC) { | |
331 | warnmsg = "Bad dir3 free magic!"; | |
332 | break; | |
333 | } | |
334 | bp->b_ops = &xfs_dir3_free_buf_ops; | |
335 | break; | |
336 | case XFS_BLFT_DIR_LEAF1_BUF: | |
337 | if (magicda != XFS_DIR2_LEAF1_MAGIC && | |
338 | magicda != XFS_DIR3_LEAF1_MAGIC) { | |
339 | warnmsg = "Bad dir leaf1 magic!"; | |
340 | break; | |
341 | } | |
342 | bp->b_ops = &xfs_dir3_leaf1_buf_ops; | |
343 | break; | |
344 | case XFS_BLFT_DIR_LEAFN_BUF: | |
345 | if (magicda != XFS_DIR2_LEAFN_MAGIC && | |
346 | magicda != XFS_DIR3_LEAFN_MAGIC) { | |
347 | warnmsg = "Bad dir leafn magic!"; | |
348 | break; | |
349 | } | |
350 | bp->b_ops = &xfs_dir3_leafn_buf_ops; | |
351 | break; | |
352 | case XFS_BLFT_DA_NODE_BUF: | |
353 | if (magicda != XFS_DA_NODE_MAGIC && | |
354 | magicda != XFS_DA3_NODE_MAGIC) { | |
355 | warnmsg = "Bad da node magic!"; | |
356 | break; | |
357 | } | |
358 | bp->b_ops = &xfs_da3_node_buf_ops; | |
359 | break; | |
360 | case XFS_BLFT_ATTR_LEAF_BUF: | |
361 | if (magicda != XFS_ATTR_LEAF_MAGIC && | |
362 | magicda != XFS_ATTR3_LEAF_MAGIC) { | |
363 | warnmsg = "Bad attr leaf magic!"; | |
364 | break; | |
365 | } | |
366 | bp->b_ops = &xfs_attr3_leaf_buf_ops; | |
367 | break; | |
368 | case XFS_BLFT_ATTR_RMT_BUF: | |
369 | if (magic32 != XFS_ATTR3_RMT_MAGIC) { | |
370 | warnmsg = "Bad attr remote magic!"; | |
371 | break; | |
372 | } | |
373 | bp->b_ops = &xfs_attr3_rmt_buf_ops; | |
374 | break; | |
375 | case XFS_BLFT_SB_BUF: | |
376 | if (magic32 != XFS_SB_MAGIC) { | |
377 | warnmsg = "Bad SB block magic!"; | |
378 | break; | |
379 | } | |
380 | bp->b_ops = &xfs_sb_buf_ops; | |
381 | break; | |
382 | #ifdef CONFIG_XFS_RT | |
383 | case XFS_BLFT_RTBITMAP_BUF: | |
384 | case XFS_BLFT_RTSUMMARY_BUF: | |
385 | /* no magic numbers for verification of RT buffers */ | |
386 | bp->b_ops = &xfs_rtbuf_ops; | |
387 | break; | |
388 | #endif /* CONFIG_XFS_RT */ | |
389 | default: | |
390 | xfs_warn(mp, "Unknown buffer type %d!", | |
391 | xfs_blft_from_flags(buf_f)); | |
392 | break; | |
393 | } | |
394 | ||
395 | /* | |
396 | * Nothing else to do in the case of a NULL current LSN as this means | |
397 | * the buffer is more recent than the change in the log and will be | |
398 | * skipped. | |
399 | */ | |
400 | if (current_lsn == NULLCOMMITLSN) | |
401 | return; | |
402 | ||
403 | if (warnmsg) { | |
404 | xfs_warn(mp, warnmsg); | |
405 | ASSERT(0); | |
406 | } | |
407 | ||
408 | /* | |
409 | * We must update the metadata LSN of the buffer as it is written out to | |
410 | * ensure that older transactions never replay over this one and corrupt | |
411 | * the buffer. This can occur if log recovery is interrupted at some | |
412 | * point after the current transaction completes, at which point a | |
413 | * subsequent mount starts recovery from the beginning. | |
414 | * | |
415 | * Write verifiers update the metadata LSN from log items attached to | |
416 | * the buffer. Therefore, initialize a bli purely to carry the LSN to | |
22c10589 | 417 | * the verifier. |
1094d3f1 DW |
418 | */ |
419 | if (bp->b_ops) { | |
420 | struct xfs_buf_log_item *bip; | |
421 | ||
9fe5c77c | 422 | bp->b_flags |= _XBF_LOGRECOVERY; |
1094d3f1 DW |
423 | xfs_buf_item_init(bp, mp); |
424 | bip = bp->b_log_item; | |
425 | bip->bli_item.li_lsn = current_lsn; | |
426 | } | |
427 | } | |
428 | ||
429 | /* | |
430 | * Perform a 'normal' buffer recovery. Each logged region of the | |
431 | * buffer should be copied over the corresponding region in the | |
432 | * given buffer. The bitmap in the buf log format structure indicates | |
433 | * where to place the logged data. | |
434 | */ | |
435 | STATIC void | |
436 | xlog_recover_do_reg_buffer( | |
437 | struct xfs_mount *mp, | |
438 | struct xlog_recover_item *item, | |
439 | struct xfs_buf *bp, | |
440 | struct xfs_buf_log_format *buf_f, | |
441 | xfs_lsn_t current_lsn) | |
442 | { | |
443 | int i; | |
444 | int bit; | |
445 | int nbits; | |
446 | xfs_failaddr_t fa; | |
447 | const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot); | |
448 | ||
449 | trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); | |
450 | ||
451 | bit = 0; | |
452 | i = 1; /* 0 is the buf format structure */ | |
453 | while (1) { | |
454 | bit = xfs_next_bit(buf_f->blf_data_map, | |
455 | buf_f->blf_map_size, bit); | |
456 | if (bit == -1) | |
457 | break; | |
458 | nbits = xfs_contig_bits(buf_f->blf_data_map, | |
459 | buf_f->blf_map_size, bit); | |
460 | ASSERT(nbits > 0); | |
461 | ASSERT(item->ri_buf[i].i_addr != NULL); | |
462 | ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); | |
463 | ASSERT(BBTOB(bp->b_length) >= | |
464 | ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); | |
465 | ||
466 | /* | |
467 | * The dirty regions logged in the buffer, even though | |
468 | * contiguous, may span multiple chunks. This is because the | |
469 | * dirty region may span a physical page boundary in a buffer | |
470 | * and hence be split into two separate vectors for writing into | |
471 | * the log. Hence we need to trim nbits back to the length of | |
472 | * the current region being copied out of the log. | |
473 | */ | |
474 | if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT)) | |
475 | nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT; | |
476 | ||
477 | /* | |
478 | * Do a sanity check if this is a dquot buffer. Just checking | |
479 | * the first dquot in the buffer should do. XXXThis is | |
480 | * probably a good thing to do for other buf types also. | |
481 | */ | |
482 | fa = NULL; | |
483 | if (buf_f->blf_flags & | |
484 | (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { | |
485 | if (item->ri_buf[i].i_addr == NULL) { | |
486 | xfs_alert(mp, | |
487 | "XFS: NULL dquot in %s.", __func__); | |
488 | goto next; | |
489 | } | |
490 | if (item->ri_buf[i].i_len < size_disk_dquot) { | |
491 | xfs_alert(mp, | |
492 | "XFS: dquot too small (%d) in %s.", | |
493 | item->ri_buf[i].i_len, __func__); | |
494 | goto next; | |
495 | } | |
f9751c4a | 496 | fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr, -1); |
1094d3f1 DW |
497 | if (fa) { |
498 | xfs_alert(mp, | |
499 | "dquot corrupt at %pS trying to replay into block 0x%llx", | |
9343ee76 | 500 | fa, xfs_buf_daddr(bp)); |
1094d3f1 DW |
501 | goto next; |
502 | } | |
503 | } | |
504 | ||
505 | memcpy(xfs_buf_offset(bp, | |
506 | (uint)bit << XFS_BLF_SHIFT), /* dest */ | |
507 | item->ri_buf[i].i_addr, /* source */ | |
508 | nbits<<XFS_BLF_SHIFT); /* length */ | |
509 | next: | |
510 | i++; | |
511 | bit += nbits; | |
512 | } | |
513 | ||
514 | /* Shouldn't be any more regions */ | |
515 | ASSERT(i == item->ri_total); | |
516 | ||
517 | xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn); | |
518 | } | |
519 | ||
520 | /* | |
521 | * Perform a dquot buffer recovery. | |
522 | * Simple algorithm: if we have found a QUOTAOFF log item of the same type | |
523 | * (ie. USR or GRP), then just toss this buffer away; don't recover it. | |
524 | * Else, treat it as a regular buffer and do recovery. | |
525 | * | |
526 | * Return false if the buffer was tossed and true if we recovered the buffer to | |
527 | * indicate to the caller if the buffer needs writing. | |
528 | */ | |
529 | STATIC bool | |
530 | xlog_recover_do_dquot_buffer( | |
531 | struct xfs_mount *mp, | |
532 | struct xlog *log, | |
533 | struct xlog_recover_item *item, | |
534 | struct xfs_buf *bp, | |
535 | struct xfs_buf_log_format *buf_f) | |
536 | { | |
537 | uint type; | |
538 | ||
539 | trace_xfs_log_recover_buf_dquot_buf(log, buf_f); | |
540 | ||
541 | /* | |
542 | * Filesystems are required to send in quota flags at mount time. | |
543 | */ | |
544 | if (!mp->m_qflags) | |
545 | return false; | |
546 | ||
547 | type = 0; | |
548 | if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) | |
8cd4901d | 549 | type |= XFS_DQTYPE_USER; |
1094d3f1 | 550 | if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF) |
8cd4901d | 551 | type |= XFS_DQTYPE_PROJ; |
1094d3f1 | 552 | if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF) |
8cd4901d | 553 | type |= XFS_DQTYPE_GROUP; |
1094d3f1 DW |
554 | /* |
555 | * This type of quotas was turned off, so ignore this buffer | |
556 | */ | |
557 | if (log->l_quotaoffs_flag & type) | |
558 | return false; | |
559 | ||
560 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN); | |
561 | return true; | |
562 | } | |
563 | ||
564 | /* | |
565 | * Perform recovery for a buffer full of inodes. In these buffers, the only | |
566 | * data which should be recovered is that which corresponds to the | |
567 | * di_next_unlinked pointers in the on disk inode structures. The rest of the | |
568 | * data for the inodes is always logged through the inodes themselves rather | |
569 | * than the inode buffer and is recovered in xlog_recover_inode_pass2(). | |
570 | * | |
571 | * The only time when buffers full of inodes are fully recovered is when the | |
572 | * buffer is full of newly allocated inodes. In this case the buffer will | |
573 | * not be marked as an inode buffer and so will be sent to | |
574 | * xlog_recover_do_reg_buffer() below during recovery. | |
575 | */ | |
576 | STATIC int | |
577 | xlog_recover_do_inode_buffer( | |
578 | struct xfs_mount *mp, | |
579 | struct xlog_recover_item *item, | |
580 | struct xfs_buf *bp, | |
581 | struct xfs_buf_log_format *buf_f) | |
582 | { | |
583 | int i; | |
584 | int item_index = 0; | |
585 | int bit = 0; | |
586 | int nbits = 0; | |
587 | int reg_buf_offset = 0; | |
588 | int reg_buf_bytes = 0; | |
589 | int next_unlinked_offset; | |
590 | int inodes_per_buf; | |
591 | xfs_agino_t *logged_nextp; | |
592 | xfs_agino_t *buffer_nextp; | |
593 | ||
594 | trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); | |
595 | ||
596 | /* | |
597 | * Post recovery validation only works properly on CRC enabled | |
598 | * filesystems. | |
599 | */ | |
38c26bfd | 600 | if (xfs_has_crc(mp)) |
1094d3f1 DW |
601 | bp->b_ops = &xfs_inode_buf_ops; |
602 | ||
603 | inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog; | |
604 | for (i = 0; i < inodes_per_buf; i++) { | |
605 | next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + | |
606 | offsetof(xfs_dinode_t, di_next_unlinked); | |
607 | ||
608 | while (next_unlinked_offset >= | |
609 | (reg_buf_offset + reg_buf_bytes)) { | |
610 | /* | |
611 | * The next di_next_unlinked field is beyond | |
612 | * the current logged region. Find the next | |
613 | * logged region that contains or is beyond | |
614 | * the current di_next_unlinked field. | |
615 | */ | |
616 | bit += nbits; | |
617 | bit = xfs_next_bit(buf_f->blf_data_map, | |
618 | buf_f->blf_map_size, bit); | |
619 | ||
620 | /* | |
621 | * If there are no more logged regions in the | |
622 | * buffer, then we're done. | |
623 | */ | |
624 | if (bit == -1) | |
625 | return 0; | |
626 | ||
627 | nbits = xfs_contig_bits(buf_f->blf_data_map, | |
628 | buf_f->blf_map_size, bit); | |
629 | ASSERT(nbits > 0); | |
630 | reg_buf_offset = bit << XFS_BLF_SHIFT; | |
631 | reg_buf_bytes = nbits << XFS_BLF_SHIFT; | |
632 | item_index++; | |
633 | } | |
634 | ||
635 | /* | |
636 | * If the current logged region starts after the current | |
637 | * di_next_unlinked field, then move on to the next | |
638 | * di_next_unlinked field. | |
639 | */ | |
640 | if (next_unlinked_offset < reg_buf_offset) | |
641 | continue; | |
642 | ||
643 | ASSERT(item->ri_buf[item_index].i_addr != NULL); | |
644 | ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); | |
645 | ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length)); | |
646 | ||
647 | /* | |
648 | * The current logged region contains a copy of the | |
649 | * current di_next_unlinked field. Extract its value | |
650 | * and copy it to the buffer copy. | |
651 | */ | |
652 | logged_nextp = item->ri_buf[item_index].i_addr + | |
653 | next_unlinked_offset - reg_buf_offset; | |
654 | if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) { | |
655 | xfs_alert(mp, | |
656 | "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). " | |
657 | "Trying to replay bad (0) inode di_next_unlinked field.", | |
658 | item, bp); | |
659 | return -EFSCORRUPTED; | |
660 | } | |
661 | ||
662 | buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset); | |
663 | *buffer_nextp = *logged_nextp; | |
664 | ||
665 | /* | |
666 | * If necessary, recalculate the CRC in the on-disk inode. We | |
667 | * have to leave the inode in a consistent state for whoever | |
668 | * reads it next.... | |
669 | */ | |
670 | xfs_dinode_calc_crc(mp, | |
671 | xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); | |
672 | ||
673 | } | |
674 | ||
675 | return 0; | |
676 | } | |
677 | ||
678 | /* | |
679 | * V5 filesystems know the age of the buffer on disk being recovered. We can | |
680 | * have newer objects on disk than we are replaying, and so for these cases we | |
681 | * don't want to replay the current change as that will make the buffer contents | |
682 | * temporarily invalid on disk. | |
683 | * | |
684 | * The magic number might not match the buffer type we are going to recover | |
685 | * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence | |
686 | * extract the LSN of the existing object in the buffer based on it's current | |
687 | * magic number. If we don't recognise the magic number in the buffer, then | |
688 | * return a LSN of -1 so that the caller knows it was an unrecognised block and | |
689 | * so can recover the buffer. | |
690 | * | |
691 | * Note: we cannot rely solely on magic number matches to determine that the | |
692 | * buffer has a valid LSN - we also need to verify that it belongs to this | |
693 | * filesystem, so we need to extract the object's LSN and compare it to that | |
694 | * which we read from the superblock. If the UUIDs don't match, then we've got a | |
695 | * stale metadata block from an old filesystem instance that we need to recover | |
696 | * over the top of. | |
697 | */ | |
698 | static xfs_lsn_t | |
699 | xlog_recover_get_buf_lsn( | |
700 | struct xfs_mount *mp, | |
81a448d7 DW |
701 | struct xfs_buf *bp, |
702 | struct xfs_buf_log_format *buf_f) | |
1094d3f1 DW |
703 | { |
704 | uint32_t magic32; | |
705 | uint16_t magic16; | |
706 | uint16_t magicda; | |
707 | void *blk = bp->b_addr; | |
708 | uuid_t *uuid; | |
709 | xfs_lsn_t lsn = -1; | |
81a448d7 | 710 | uint16_t blft; |
1094d3f1 DW |
711 | |
712 | /* v4 filesystems always recover immediately */ | |
38c26bfd | 713 | if (!xfs_has_crc(mp)) |
1094d3f1 DW |
714 | goto recover_immediately; |
715 | ||
81a448d7 DW |
716 | /* |
717 | * realtime bitmap and summary file blocks do not have magic numbers or | |
718 | * UUIDs, so we must recover them immediately. | |
719 | */ | |
720 | blft = xfs_blft_from_flags(buf_f); | |
721 | if (blft == XFS_BLFT_RTBITMAP_BUF || blft == XFS_BLFT_RTSUMMARY_BUF) | |
722 | goto recover_immediately; | |
723 | ||
1094d3f1 DW |
724 | magic32 = be32_to_cpu(*(__be32 *)blk); |
725 | switch (magic32) { | |
726 | case XFS_ABTB_CRC_MAGIC: | |
727 | case XFS_ABTC_CRC_MAGIC: | |
728 | case XFS_ABTB_MAGIC: | |
729 | case XFS_ABTC_MAGIC: | |
730 | case XFS_RMAP_CRC_MAGIC: | |
731 | case XFS_REFC_CRC_MAGIC: | |
67145967 DC |
732 | case XFS_FIBT_CRC_MAGIC: |
733 | case XFS_FIBT_MAGIC: | |
1094d3f1 DW |
734 | case XFS_IBT_CRC_MAGIC: |
735 | case XFS_IBT_MAGIC: { | |
736 | struct xfs_btree_block *btb = blk; | |
737 | ||
738 | lsn = be64_to_cpu(btb->bb_u.s.bb_lsn); | |
739 | uuid = &btb->bb_u.s.bb_uuid; | |
740 | break; | |
741 | } | |
742 | case XFS_BMAP_CRC_MAGIC: | |
743 | case XFS_BMAP_MAGIC: { | |
744 | struct xfs_btree_block *btb = blk; | |
745 | ||
746 | lsn = be64_to_cpu(btb->bb_u.l.bb_lsn); | |
747 | uuid = &btb->bb_u.l.bb_uuid; | |
748 | break; | |
749 | } | |
750 | case XFS_AGF_MAGIC: | |
751 | lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); | |
752 | uuid = &((struct xfs_agf *)blk)->agf_uuid; | |
753 | break; | |
754 | case XFS_AGFL_MAGIC: | |
755 | lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); | |
756 | uuid = &((struct xfs_agfl *)blk)->agfl_uuid; | |
757 | break; | |
758 | case XFS_AGI_MAGIC: | |
759 | lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); | |
760 | uuid = &((struct xfs_agi *)blk)->agi_uuid; | |
761 | break; | |
762 | case XFS_SYMLINK_MAGIC: | |
763 | lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); | |
764 | uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid; | |
765 | break; | |
766 | case XFS_DIR3_BLOCK_MAGIC: | |
767 | case XFS_DIR3_DATA_MAGIC: | |
768 | case XFS_DIR3_FREE_MAGIC: | |
769 | lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); | |
770 | uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; | |
771 | break; | |
772 | case XFS_ATTR3_RMT_MAGIC: | |
773 | /* | |
774 | * Remote attr blocks are written synchronously, rather than | |
775 | * being logged. That means they do not contain a valid LSN | |
776 | * (i.e. transactionally ordered) in them, and hence any time we | |
777 | * see a buffer to replay over the top of a remote attribute | |
778 | * block we should simply do so. | |
779 | */ | |
780 | goto recover_immediately; | |
781 | case XFS_SB_MAGIC: | |
782 | /* | |
783 | * superblock uuids are magic. We may or may not have a | |
784 | * sb_meta_uuid on disk, but it will be set in the in-core | |
785 | * superblock. We set the uuid pointer for verification | |
786 | * according to the superblock feature mask to ensure we check | |
787 | * the relevant UUID in the superblock. | |
788 | */ | |
789 | lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); | |
38c26bfd | 790 | if (xfs_has_metauuid(mp)) |
1094d3f1 DW |
791 | uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid; |
792 | else | |
793 | uuid = &((struct xfs_dsb *)blk)->sb_uuid; | |
794 | break; | |
795 | default: | |
796 | break; | |
797 | } | |
798 | ||
799 | if (lsn != (xfs_lsn_t)-1) { | |
800 | if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid)) | |
801 | goto recover_immediately; | |
802 | return lsn; | |
803 | } | |
804 | ||
805 | magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); | |
806 | switch (magicda) { | |
807 | case XFS_DIR3_LEAF1_MAGIC: | |
808 | case XFS_DIR3_LEAFN_MAGIC: | |
d8f4c2d0 | 809 | case XFS_ATTR3_LEAF_MAGIC: |
1094d3f1 DW |
810 | case XFS_DA3_NODE_MAGIC: |
811 | lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); | |
812 | uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; | |
813 | break; | |
814 | default: | |
815 | break; | |
816 | } | |
817 | ||
818 | if (lsn != (xfs_lsn_t)-1) { | |
819 | if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) | |
820 | goto recover_immediately; | |
821 | return lsn; | |
822 | } | |
823 | ||
824 | /* | |
825 | * We do individual object checks on dquot and inode buffers as they | |
826 | * have their own individual LSN records. Also, we could have a stale | |
827 | * buffer here, so we have to at least recognise these buffer types. | |
828 | * | |
829 | * A notd complexity here is inode unlinked list processing - it logs | |
830 | * the inode directly in the buffer, but we don't know which inodes have | |
831 | * been modified, and there is no global buffer LSN. Hence we need to | |
832 | * recover all inode buffer types immediately. This problem will be | |
833 | * fixed by logical logging of the unlinked list modifications. | |
834 | */ | |
835 | magic16 = be16_to_cpu(*(__be16 *)blk); | |
836 | switch (magic16) { | |
837 | case XFS_DQUOT_MAGIC: | |
838 | case XFS_DINODE_MAGIC: | |
839 | goto recover_immediately; | |
840 | default: | |
841 | break; | |
842 | } | |
843 | ||
844 | /* unknown buffer contents, recover immediately */ | |
845 | ||
846 | recover_immediately: | |
847 | return (xfs_lsn_t)-1; | |
848 | ||
849 | } | |
850 | ||
851 | /* | |
852 | * This routine replays a modification made to a buffer at runtime. | |
853 | * There are actually two types of buffer, regular and inode, which | |
854 | * are handled differently. Inode buffers are handled differently | |
855 | * in that we only recover a specific set of data from them, namely | |
856 | * the inode di_next_unlinked fields. This is because all other inode | |
857 | * data is actually logged via inode records and any data we replay | |
858 | * here which overlaps that may be stale. | |
859 | * | |
860 | * When meta-data buffers are freed at run time we log a buffer item | |
861 | * with the XFS_BLF_CANCEL bit set to indicate that previous copies | |
862 | * of the buffer in the log should not be replayed at recovery time. | |
863 | * This is so that if the blocks covered by the buffer are reused for | |
864 | * file data before we crash we don't end up replaying old, freed | |
865 | * meta-data into a user's file. | |
866 | * | |
867 | * To handle the cancellation of buffer log items, we make two passes | |
868 | * over the log during recovery. During the first we build a table of | |
869 | * those buffers which have been cancelled, and during the second we | |
870 | * only replay those buffers which do not have corresponding cancel | |
871 | * records in the table. See xlog_recover_buf_pass[1,2] above | |
872 | * for more details on the implementation of the table of cancel records. | |
873 | */ | |
874 | STATIC int | |
875 | xlog_recover_buf_commit_pass2( | |
876 | struct xlog *log, | |
877 | struct list_head *buffer_list, | |
878 | struct xlog_recover_item *item, | |
879 | xfs_lsn_t current_lsn) | |
880 | { | |
881 | struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; | |
882 | struct xfs_mount *mp = log->l_mp; | |
883 | struct xfs_buf *bp; | |
884 | int error; | |
885 | uint buf_flags; | |
886 | xfs_lsn_t lsn; | |
887 | ||
888 | /* | |
889 | * In this pass we only want to recover all the buffers which have | |
890 | * not been cancelled and are not cancellation buffers themselves. | |
891 | */ | |
892 | if (buf_f->blf_flags & XFS_BLF_CANCEL) { | |
893 | if (xlog_put_buffer_cancelled(log, buf_f->blf_blkno, | |
894 | buf_f->blf_len)) | |
895 | goto cancelled; | |
896 | } else { | |
897 | ||
898 | if (xlog_is_buffer_cancelled(log, buf_f->blf_blkno, | |
899 | buf_f->blf_len)) | |
900 | goto cancelled; | |
901 | } | |
902 | ||
903 | trace_xfs_log_recover_buf_recover(log, buf_f); | |
904 | ||
905 | buf_flags = 0; | |
906 | if (buf_f->blf_flags & XFS_BLF_INODE_BUF) | |
907 | buf_flags |= XBF_UNMAPPED; | |
908 | ||
909 | error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, | |
910 | buf_flags, &bp, NULL); | |
911 | if (error) | |
912 | return error; | |
913 | ||
914 | /* | |
915 | * Recover the buffer only if we get an LSN from it and it's less than | |
916 | * the lsn of the transaction we are replaying. | |
917 | * | |
918 | * Note that we have to be extremely careful of readahead here. | |
919 | * Readahead does not attach verfiers to the buffers so if we don't | |
920 | * actually do any replay after readahead because of the LSN we found | |
921 | * in the buffer if more recent than that current transaction then we | |
922 | * need to attach the verifier directly. Failure to do so can lead to | |
923 | * future recovery actions (e.g. EFI and unlinked list recovery) can | |
924 | * operate on the buffers and they won't get the verifier attached. This | |
925 | * can lead to blocks on disk having the correct content but a stale | |
926 | * CRC. | |
927 | * | |
928 | * It is safe to assume these clean buffers are currently up to date. | |
929 | * If the buffer is dirtied by a later transaction being replayed, then | |
930 | * the verifier will be reset to match whatever recover turns that | |
931 | * buffer into. | |
932 | */ | |
81a448d7 | 933 | lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f); |
1094d3f1 DW |
934 | if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { |
935 | trace_xfs_log_recover_buf_skip(log, buf_f); | |
936 | xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN); | |
937 | goto out_release; | |
938 | } | |
939 | ||
940 | if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { | |
941 | error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); | |
942 | if (error) | |
943 | goto out_release; | |
944 | } else if (buf_f->blf_flags & | |
945 | (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { | |
946 | bool dirty; | |
947 | ||
948 | dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); | |
949 | if (!dirty) | |
950 | goto out_release; | |
951 | } else { | |
952 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn); | |
953 | } | |
954 | ||
955 | /* | |
956 | * Perform delayed write on the buffer. Asynchronous writes will be | |
957 | * slower when taking into account all the buffers to be flushed. | |
958 | * | |
959 | * Also make sure that only inode buffers with good sizes stay in | |
960 | * the buffer cache. The kernel moves inodes in buffers of 1 block | |
961 | * or inode_cluster_size bytes, whichever is bigger. The inode | |
962 | * buffers in the log can be a different size if the log was generated | |
963 | * by an older kernel using unclustered inode buffers or a newer kernel | |
b63da6c8 | 964 | * running with a different inode cluster size. Regardless, if |
1094d3f1 DW |
965 | * the inode buffer size isn't max(blocksize, inode_cluster_size) |
966 | * for *our* value of inode_cluster_size, then we need to keep | |
967 | * the buffer out of the buffer cache so that the buffer won't | |
968 | * overlap with future reads of those inodes. | |
969 | */ | |
970 | if (XFS_DINODE_MAGIC == | |
971 | be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && | |
972 | (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) { | |
973 | xfs_buf_stale(bp); | |
974 | error = xfs_bwrite(bp); | |
975 | } else { | |
976 | ASSERT(bp->b_mount == mp); | |
9fe5c77c | 977 | bp->b_flags |= _XBF_LOGRECOVERY; |
1094d3f1 DW |
978 | xfs_buf_delwri_queue(bp, buffer_list); |
979 | } | |
980 | ||
981 | out_release: | |
982 | xfs_buf_relse(bp); | |
983 | return error; | |
984 | cancelled: | |
985 | trace_xfs_log_recover_buf_cancel(log, buf_f); | |
986 | return 0; | |
987 | } | |
988 | ||
86ffa471 DW |
989 | const struct xlog_recover_item_ops xlog_buf_item_ops = { |
990 | .item_type = XFS_LI_BUF, | |
991 | .reorder = xlog_recover_buf_reorder, | |
8ea5682d | 992 | .ra_pass2 = xlog_recover_buf_ra_pass2, |
3304a4fa | 993 | .commit_pass1 = xlog_recover_buf_commit_pass1, |
1094d3f1 | 994 | .commit_pass2 = xlog_recover_buf_commit_pass2, |
86ffa471 | 995 | }; |