2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_trans.h"
32 #include "xfs_log_priv.h"
33 #include "xfs_log_recover.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_trans_priv.h"
37 #include "xfs_alloc.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_quota.h"
40 #include "xfs_cksum.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
43 #include "xfs_bmap_btree.h"
44 #include "xfs_error.h"
47 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
54 xlog_clear_stale_blocks(
59 xlog_recover_check_summary(
62 #define xlog_recover_check_summary(log)
65 xlog_do_recovery_pass(
66 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
69 * This structure is used during recovery to record the buf log items which
70 * have been canceled and should not be replayed.
72 struct xfs_buf_cancel {
76 struct list_head bc_list;
80 * Sector aligned buffer routines for buffer create/read/write/access
84 * Verify the given count of basic blocks is valid number of blocks
85 * to specify for an operation involving the given XFS log buffer.
86 * Returns nonzero if the count is valid, 0 otherwise.
90 xlog_buf_bbcount_valid(
94 return bbcount > 0 && bbcount <= log->l_logBBsize;
98 * Allocate a buffer to hold log data. The buffer needs to be able
99 * to map to a range of nbblks basic blocks at any valid (basic
100 * block) offset within the log.
109 if (!xlog_buf_bbcount_valid(log, nbblks)) {
110 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
112 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
117 * We do log I/O in units of log sectors (a power-of-2
118 * multiple of the basic block size), so we round up the
119 * requested size to accommodate the basic blocks required
120 * for complete log sectors.
122 * In addition, the buffer may be used for a non-sector-
123 * aligned block offset, in which case an I/O of the
124 * requested size could extend beyond the end of the
125 * buffer. If the requested size is only 1 basic block it
126 * will never straddle a sector boundary, so this won't be
127 * an issue. Nor will this be a problem if the log I/O is
128 * done in basic blocks (sector size 1). But otherwise we
129 * extend the buffer by one extra log sector to ensure
130 * there's space to accommodate this possibility.
132 if (nbblks > 1 && log->l_sectBBsize > 1)
133 nbblks += log->l_sectBBsize;
134 nbblks = round_up(nbblks, log->l_sectBBsize);
136 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
150 * Return the address of the start of the given block number's data
151 * in a log buffer. The buffer covers a log sector-aligned region.
160 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
162 ASSERT(offset + nbblks <= bp->b_length);
163 return bp->b_addr + BBTOB(offset);
168 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
179 if (!xlog_buf_bbcount_valid(log, nbblks)) {
180 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
182 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
183 return -EFSCORRUPTED;
186 blk_no = round_down(blk_no, log->l_sectBBsize);
187 nbblks = round_up(nbblks, log->l_sectBBsize);
190 ASSERT(nbblks <= bp->b_length);
192 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
194 bp->b_io_length = nbblks;
197 error = xfs_buf_submit_wait(bp);
198 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
199 xfs_buf_ioerror_alert(bp, __func__);
213 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
217 *offset = xlog_align(log, blk_no, nbblks, bp);
222 * Read at an offset into the buffer. Returns with the buffer in it's original
223 * state regardless of the result of the read.
228 xfs_daddr_t blk_no, /* block to read from */
229 int nbblks, /* blocks to read */
233 char *orig_offset = bp->b_addr;
234 int orig_len = BBTOB(bp->b_length);
237 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
241 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
243 /* must reset buffer pointer even on error */
244 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
251 * Write out the buffer at the given block for the given number of blocks.
252 * The buffer is kept locked across the write and is returned locked.
253 * This can only be used for synchronous log writes.
264 if (!xlog_buf_bbcount_valid(log, nbblks)) {
265 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
267 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
268 return -EFSCORRUPTED;
271 blk_no = round_down(blk_no, log->l_sectBBsize);
272 nbblks = round_up(nbblks, log->l_sectBBsize);
275 ASSERT(nbblks <= bp->b_length);
277 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
278 XFS_BUF_ZEROFLAGS(bp);
281 bp->b_io_length = nbblks;
284 error = xfs_bwrite(bp);
286 xfs_buf_ioerror_alert(bp, __func__);
293 * dump debug superblock and log record information
296 xlog_header_check_dump(
298 xlog_rec_header_t *head)
300 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
301 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
302 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
303 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
306 #define xlog_header_check_dump(mp, head)
310 * check log record header for recovery
313 xlog_header_check_recover(
315 xlog_rec_header_t *head)
317 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
320 * IRIX doesn't write the h_fmt field and leaves it zeroed
321 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
322 * a dirty log created in IRIX.
324 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
326 "dirty log written in incompatible format - can't recover");
327 xlog_header_check_dump(mp, head);
328 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
329 XFS_ERRLEVEL_HIGH, mp);
330 return -EFSCORRUPTED;
331 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
333 "dirty log entry has mismatched uuid - can't recover");
334 xlog_header_check_dump(mp, head);
335 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
336 XFS_ERRLEVEL_HIGH, mp);
337 return -EFSCORRUPTED;
343 * read the head block of the log and check the header
346 xlog_header_check_mount(
348 xlog_rec_header_t *head)
350 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
352 if (uuid_is_nil(&head->h_fs_uuid)) {
354 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
355 * h_fs_uuid is nil, we assume this log was last mounted
356 * by IRIX and continue.
358 xfs_warn(mp, "nil uuid in log - IRIX style log");
359 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
360 xfs_warn(mp, "log has mismatched uuid - can't recover");
361 xlog_header_check_dump(mp, head);
362 XFS_ERROR_REPORT("xlog_header_check_mount",
363 XFS_ERRLEVEL_HIGH, mp);
364 return -EFSCORRUPTED;
375 * We're not going to bother about retrying
376 * this during recovery. One strike!
378 if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
379 xfs_buf_ioerror_alert(bp, __func__);
380 xfs_force_shutdown(bp->b_target->bt_mount,
381 SHUTDOWN_META_IO_ERROR);
389 * This routine finds (to an approximation) the first block in the physical
390 * log which contains the given cycle. It uses a binary search algorithm.
391 * Note that the algorithm can not be perfect because the disk will not
392 * necessarily be perfect.
395 xlog_find_cycle_start(
398 xfs_daddr_t first_blk,
399 xfs_daddr_t *last_blk,
409 mid_blk = BLK_AVG(first_blk, end_blk);
410 while (mid_blk != first_blk && mid_blk != end_blk) {
411 error = xlog_bread(log, mid_blk, 1, bp, &offset);
414 mid_cycle = xlog_get_cycle(offset);
415 if (mid_cycle == cycle)
416 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
418 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
419 mid_blk = BLK_AVG(first_blk, end_blk);
421 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
422 (mid_blk == end_blk && mid_blk-1 == first_blk));
430 * Check that a range of blocks does not contain stop_on_cycle_no.
431 * Fill in *new_blk with the block offset where such a block is
432 * found, or with -1 (an invalid block number) if there is no such
433 * block in the range. The scan needs to occur from front to back
434 * and the pointer into the region must be updated since a later
435 * routine will need to perform another test.
438 xlog_find_verify_cycle(
440 xfs_daddr_t start_blk,
442 uint stop_on_cycle_no,
443 xfs_daddr_t *new_blk)
453 * Greedily allocate a buffer big enough to handle the full
454 * range of basic blocks we'll be examining. If that fails,
455 * try a smaller size. We need to be able to read at least
456 * a log sector, or we're out of luck.
458 bufblks = 1 << ffs(nbblks);
459 while (bufblks > log->l_logBBsize)
461 while (!(bp = xlog_get_bp(log, bufblks))) {
463 if (bufblks < log->l_sectBBsize)
467 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
470 bcount = min(bufblks, (start_blk + nbblks - i));
472 error = xlog_bread(log, i, bcount, bp, &buf);
476 for (j = 0; j < bcount; j++) {
477 cycle = xlog_get_cycle(buf);
478 if (cycle == stop_on_cycle_no) {
495 * Potentially backup over partial log record write.
497 * In the typical case, last_blk is the number of the block directly after
498 * a good log record. Therefore, we subtract one to get the block number
499 * of the last block in the given buffer. extra_bblks contains the number
500 * of blocks we would have read on a previous read. This happens when the
501 * last log record is split over the end of the physical log.
503 * extra_bblks is the number of blocks potentially verified on a previous
504 * call to this routine.
507 xlog_find_verify_log_record(
509 xfs_daddr_t start_blk,
510 xfs_daddr_t *last_blk,
516 xlog_rec_header_t *head = NULL;
519 int num_blks = *last_blk - start_blk;
522 ASSERT(start_blk != 0 || *last_blk != start_blk);
524 if (!(bp = xlog_get_bp(log, num_blks))) {
525 if (!(bp = xlog_get_bp(log, 1)))
529 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
532 offset += ((num_blks - 1) << BBSHIFT);
535 for (i = (*last_blk) - 1; i >= 0; i--) {
537 /* valid log record not found */
539 "Log inconsistent (didn't find previous header)");
546 error = xlog_bread(log, i, 1, bp, &offset);
551 head = (xlog_rec_header_t *)offset;
553 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
561 * We hit the beginning of the physical log & still no header. Return
562 * to caller. If caller can handle a return of -1, then this routine
563 * will be called again for the end of the physical log.
571 * We have the final block of the good log (the first block
572 * of the log record _before_ the head. So we check the uuid.
574 if ((error = xlog_header_check_mount(log->l_mp, head)))
578 * We may have found a log record header before we expected one.
579 * last_blk will be the 1st block # with a given cycle #. We may end
580 * up reading an entire log record. In this case, we don't want to
581 * reset last_blk. Only when last_blk points in the middle of a log
582 * record do we update last_blk.
584 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
585 uint h_size = be32_to_cpu(head->h_size);
587 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
588 if (h_size % XLOG_HEADER_CYCLE_SIZE)
594 if (*last_blk - i + extra_bblks !=
595 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
604 * Head is defined to be the point of the log where the next log write
605 * could go. This means that incomplete LR writes at the end are
606 * eliminated when calculating the head. We aren't guaranteed that previous
607 * LR have complete transactions. We only know that a cycle number of
608 * current cycle number -1 won't be present in the log if we start writing
609 * from our current block number.
611 * last_blk contains the block number of the first block with a given
614 * Return: zero if normal, non-zero if error.
619 xfs_daddr_t *return_head_blk)
623 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
625 uint first_half_cycle, last_half_cycle;
627 int error, log_bbnum = log->l_logBBsize;
629 /* Is the end of the log device zeroed? */
630 error = xlog_find_zeroed(log, &first_blk);
632 xfs_warn(log->l_mp, "empty log check failed");
636 *return_head_blk = first_blk;
638 /* Is the whole lot zeroed? */
640 /* Linux XFS shouldn't generate totally zeroed logs -
641 * mkfs etc write a dummy unmount record to a fresh
642 * log so we can store the uuid in there
644 xfs_warn(log->l_mp, "totally zeroed log");
650 first_blk = 0; /* get cycle # of 1st block */
651 bp = xlog_get_bp(log, 1);
655 error = xlog_bread(log, 0, 1, bp, &offset);
659 first_half_cycle = xlog_get_cycle(offset);
661 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
662 error = xlog_bread(log, last_blk, 1, bp, &offset);
666 last_half_cycle = xlog_get_cycle(offset);
667 ASSERT(last_half_cycle != 0);
670 * If the 1st half cycle number is equal to the last half cycle number,
671 * then the entire log is stamped with the same cycle number. In this
672 * case, head_blk can't be set to zero (which makes sense). The below
673 * math doesn't work out properly with head_blk equal to zero. Instead,
674 * we set it to log_bbnum which is an invalid block number, but this
675 * value makes the math correct. If head_blk doesn't changed through
676 * all the tests below, *head_blk is set to zero at the very end rather
677 * than log_bbnum. In a sense, log_bbnum and zero are the same block
678 * in a circular file.
680 if (first_half_cycle == last_half_cycle) {
682 * In this case we believe that the entire log should have
683 * cycle number last_half_cycle. We need to scan backwards
684 * from the end verifying that there are no holes still
685 * containing last_half_cycle - 1. If we find such a hole,
686 * then the start of that hole will be the new head. The
687 * simple case looks like
688 * x | x ... | x - 1 | x
689 * Another case that fits this picture would be
690 * x | x + 1 | x ... | x
691 * In this case the head really is somewhere at the end of the
692 * log, as one of the latest writes at the beginning was
695 * x | x + 1 | x ... | x - 1 | x
696 * This is really the combination of the above two cases, and
697 * the head has to end up at the start of the x-1 hole at the
700 * In the 256k log case, we will read from the beginning to the
701 * end of the log and search for cycle numbers equal to x-1.
702 * We don't worry about the x+1 blocks that we encounter,
703 * because we know that they cannot be the head since the log
706 head_blk = log_bbnum;
707 stop_on_cycle = last_half_cycle - 1;
710 * In this case we want to find the first block with cycle
711 * number matching last_half_cycle. We expect the log to be
713 * x + 1 ... | x ... | x
714 * The first block with cycle number x (last_half_cycle) will
715 * be where the new head belongs. First we do a binary search
716 * for the first occurrence of last_half_cycle. The binary
717 * search may not be totally accurate, so then we scan back
718 * from there looking for occurrences of last_half_cycle before
719 * us. If that backwards scan wraps around the beginning of
720 * the log, then we look for occurrences of last_half_cycle - 1
721 * at the end of the log. The cases we're looking for look
723 * v binary search stopped here
724 * x + 1 ... | x | x + 1 | x ... | x
725 * ^ but we want to locate this spot
727 * <---------> less than scan distance
728 * x + 1 ... | x ... | x - 1 | x
729 * ^ we want to locate this spot
731 stop_on_cycle = last_half_cycle;
732 if ((error = xlog_find_cycle_start(log, bp, first_blk,
733 &head_blk, last_half_cycle)))
738 * Now validate the answer. Scan back some number of maximum possible
739 * blocks and make sure each one has the expected cycle number. The
740 * maximum is determined by the total possible amount of buffering
741 * in the in-core log. The following number can be made tighter if
742 * we actually look at the block size of the filesystem.
744 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
745 if (head_blk >= num_scan_bblks) {
747 * We are guaranteed that the entire check can be performed
750 start_blk = head_blk - num_scan_bblks;
751 if ((error = xlog_find_verify_cycle(log,
752 start_blk, num_scan_bblks,
753 stop_on_cycle, &new_blk)))
757 } else { /* need to read 2 parts of log */
759 * We are going to scan backwards in the log in two parts.
760 * First we scan the physical end of the log. In this part
761 * of the log, we are looking for blocks with cycle number
762 * last_half_cycle - 1.
763 * If we find one, then we know that the log starts there, as
764 * we've found a hole that didn't get written in going around
765 * the end of the physical log. The simple case for this is
766 * x + 1 ... | x ... | x - 1 | x
767 * <---------> less than scan distance
768 * If all of the blocks at the end of the log have cycle number
769 * last_half_cycle, then we check the blocks at the start of
770 * the log looking for occurrences of last_half_cycle. If we
771 * find one, then our current estimate for the location of the
772 * first occurrence of last_half_cycle is wrong and we move
773 * back to the hole we've found. This case looks like
774 * x + 1 ... | x | x + 1 | x ...
775 * ^ binary search stopped here
776 * Another case we need to handle that only occurs in 256k
778 * x + 1 ... | x ... | x+1 | x ...
779 * ^ binary search stops here
780 * In a 256k log, the scan at the end of the log will see the
781 * x + 1 blocks. We need to skip past those since that is
782 * certainly not the head of the log. By searching for
783 * last_half_cycle-1 we accomplish that.
785 ASSERT(head_blk <= INT_MAX &&
786 (xfs_daddr_t) num_scan_bblks >= head_blk);
787 start_blk = log_bbnum - (num_scan_bblks - head_blk);
788 if ((error = xlog_find_verify_cycle(log, start_blk,
789 num_scan_bblks - (int)head_blk,
790 (stop_on_cycle - 1), &new_blk)))
798 * Scan beginning of log now. The last part of the physical
799 * log is good. This scan needs to verify that it doesn't find
800 * the last_half_cycle.
803 ASSERT(head_blk <= INT_MAX);
804 if ((error = xlog_find_verify_cycle(log,
805 start_blk, (int)head_blk,
806 stop_on_cycle, &new_blk)))
814 * Now we need to make sure head_blk is not pointing to a block in
815 * the middle of a log record.
817 num_scan_bblks = XLOG_REC_SHIFT(log);
818 if (head_blk >= num_scan_bblks) {
819 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
821 /* start ptr at last block ptr before head_blk */
822 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
829 ASSERT(head_blk <= INT_MAX);
830 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
834 /* We hit the beginning of the log during our search */
835 start_blk = log_bbnum - (num_scan_bblks - head_blk);
837 ASSERT(start_blk <= INT_MAX &&
838 (xfs_daddr_t) log_bbnum-start_blk >= 0);
839 ASSERT(head_blk <= INT_MAX);
840 error = xlog_find_verify_log_record(log, start_blk,
841 &new_blk, (int)head_blk);
846 if (new_blk != log_bbnum)
853 if (head_blk == log_bbnum)
854 *return_head_blk = 0;
856 *return_head_blk = head_blk;
858 * When returning here, we have a good block number. Bad block
859 * means that during a previous crash, we didn't have a clean break
860 * from cycle number N to cycle number N-1. In this case, we need
861 * to find the first block with cycle number N-1.
869 xfs_warn(log->l_mp, "failed to find log head");
874 * Seek backwards in the log for log record headers.
876 * Given a starting log block, walk backwards until we find the provided number
877 * of records or hit the provided tail block. The return value is the number of
878 * records encountered or a negative error code. The log block and buffer
879 * pointer of the last record seen are returned in rblk and rhead respectively.
882 xlog_rseek_logrec_hdr(
884 xfs_daddr_t head_blk,
885 xfs_daddr_t tail_blk,
889 struct xlog_rec_header **rhead,
901 * Walk backwards from the head block until we hit the tail or the first
904 end_blk = head_blk > tail_blk ? tail_blk : 0;
905 for (i = (int) head_blk - 1; i >= end_blk; i--) {
906 error = xlog_bread(log, i, 1, bp, &offset);
910 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
912 *rhead = (struct xlog_rec_header *) offset;
913 if (++found == count)
919 * If we haven't hit the tail block or the log record header count,
920 * start looking again from the end of the physical log. Note that
921 * callers can pass head == tail if the tail is not yet known.
923 if (tail_blk >= head_blk && found != count) {
924 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
925 error = xlog_bread(log, i, 1, bp, &offset);
929 if (*(__be32 *)offset ==
930 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
933 *rhead = (struct xlog_rec_header *) offset;
934 if (++found == count)
947 * Seek forward in the log for log record headers.
949 * Given head and tail blocks, walk forward from the tail block until we find
950 * the provided number of records or hit the head block. The return value is the
951 * number of records encountered or a negative error code. The log block and
952 * buffer pointer of the last record seen are returned in rblk and rhead
956 xlog_seek_logrec_hdr(
958 xfs_daddr_t head_blk,
959 xfs_daddr_t tail_blk,
963 struct xlog_rec_header **rhead,
975 * Walk forward from the tail block until we hit the head or the last
978 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
979 for (i = (int) tail_blk; i <= end_blk; i++) {
980 error = xlog_bread(log, i, 1, bp, &offset);
984 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
986 *rhead = (struct xlog_rec_header *) offset;
987 if (++found == count)
993 * If we haven't hit the head block or the log record header count,
994 * start looking again from the start of the physical log.
996 if (tail_blk > head_blk && found != count) {
997 for (i = 0; i < (int) head_blk; i++) {
998 error = xlog_bread(log, i, 1, bp, &offset);
1002 if (*(__be32 *)offset ==
1003 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1006 *rhead = (struct xlog_rec_header *) offset;
1007 if (++found == count)
1020 * Check the log tail for torn writes. This is required when torn writes are
1021 * detected at the head and the head had to be walked back to a previous record.
1022 * The tail of the previous record must now be verified to ensure the torn
1023 * writes didn't corrupt the previous tail.
1025 * Return an error if CRC verification fails as recovery cannot proceed.
1030 xfs_daddr_t head_blk,
1031 xfs_daddr_t tail_blk)
1033 struct xlog_rec_header *thead;
1035 xfs_daddr_t first_bad;
1039 xfs_daddr_t tmp_head;
1041 bp = xlog_get_bp(log, 1);
1046 * Seek XLOG_MAX_ICLOGS + 1 records past the current tail record to get
1047 * a temporary head block that points after the last possible
1048 * concurrently written record of the tail.
1050 count = xlog_seek_logrec_hdr(log, head_blk, tail_blk,
1051 XLOG_MAX_ICLOGS + 1, bp, &tmp_head, &thead,
1059 * If the call above didn't find XLOG_MAX_ICLOGS + 1 records, we ran
1060 * into the actual log head. tmp_head points to the start of the record
1061 * so update it to the actual head block.
1063 if (count < XLOG_MAX_ICLOGS + 1)
1064 tmp_head = head_blk;
1067 * We now have a tail and temporary head block that covers at least
1068 * XLOG_MAX_ICLOGS records from the tail. We need to verify that these
1069 * records were completely written. Run a CRC verification pass from
1070 * tail to head and return the result.
1072 error = xlog_do_recovery_pass(log, tmp_head, tail_blk,
1073 XLOG_RECOVER_CRCPASS, &first_bad);
1081 * Detect and trim torn writes from the head of the log.
1083 * Storage without sector atomicity guarantees can result in torn writes in the
1084 * log in the event of a crash. Our only means to detect this scenario is via
1085 * CRC verification. While we can't always be certain that CRC verification
1086 * failure is due to a torn write vs. an unrelated corruption, we do know that
1087 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1088 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1089 * the log and treat failures in this range as torn writes as a matter of
1090 * policy. In the event of CRC failure, the head is walked back to the last good
1091 * record in the log and the tail is updated from that record and verified.
1096 xfs_daddr_t *head_blk, /* in/out: unverified head */
1097 xfs_daddr_t *tail_blk, /* out: tail block */
1099 xfs_daddr_t *rhead_blk, /* start blk of last record */
1100 struct xlog_rec_header **rhead, /* ptr to last record */
1101 bool *wrapped) /* last rec. wraps phys. log */
1103 struct xlog_rec_header *tmp_rhead;
1104 struct xfs_buf *tmp_bp;
1105 xfs_daddr_t first_bad;
1106 xfs_daddr_t tmp_rhead_blk;
1112 * Check the head of the log for torn writes. Search backwards from the
1113 * head until we hit the tail or the maximum number of log record I/Os
1114 * that could have been in flight at one time. Use a temporary buffer so
1115 * we don't trash the rhead/bp pointers from the caller.
1117 tmp_bp = xlog_get_bp(log, 1);
1120 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1121 XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk,
1122 &tmp_rhead, &tmp_wrapped);
1123 xlog_put_bp(tmp_bp);
1128 * Now run a CRC verification pass over the records starting at the
1129 * block found above to the current head. If a CRC failure occurs, the
1130 * log block of the first bad record is saved in first_bad.
1132 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1133 XLOG_RECOVER_CRCPASS, &first_bad);
1134 if (error == -EFSBADCRC) {
1136 * We've hit a potential torn write. Reset the error and warn
1141 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1142 first_bad, *head_blk);
1145 * Get the header block and buffer pointer for the last good
1146 * record before the bad record.
1148 * Note that xlog_find_tail() clears the blocks at the new head
1149 * (i.e., the records with invalid CRC) if the cycle number
1150 * matches the the current cycle.
1152 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp,
1153 rhead_blk, rhead, wrapped);
1156 if (found == 0) /* XXX: right thing to do here? */
1160 * Reset the head block to the starting block of the first bad
1161 * log record and set the tail block based on the last good
1164 * Bail out if the updated head/tail match as this indicates
1165 * possible corruption outside of the acceptable
1166 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1168 *head_blk = first_bad;
1169 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1170 if (*head_blk == *tail_blk) {
1176 * Now verify the tail based on the updated head. This is
1177 * required because the torn writes trimmed from the head could
1178 * have been written over the tail of a previous record. Return
1179 * any errors since recovery cannot proceed if the tail is
1182 * XXX: This leaves a gap in truly robust protection from torn
1183 * writes in the log. If the head is behind the tail, the tail
1184 * pushes forward to create some space and then a crash occurs
1185 * causing the writes into the previous record's tail region to
1186 * tear, log recovery isn't able to recover.
1188 * How likely is this to occur? If possible, can we do something
1189 * more intelligent here? Is it safe to push the tail forward if
1190 * we can determine that the tail is within the range of the
1191 * torn write (e.g., the kernel can only overwrite the tail if
1192 * it has actually been pushed forward)? Alternatively, could we
1193 * somehow prevent this condition at runtime?
1195 error = xlog_verify_tail(log, *head_blk, *tail_blk);
1202 * Check whether the head of the log points to an unmount record. In other
1203 * words, determine whether the log is clean. If so, update the in-core state
1207 xlog_check_unmount_rec(
1209 xfs_daddr_t *head_blk,
1210 xfs_daddr_t *tail_blk,
1211 struct xlog_rec_header *rhead,
1212 xfs_daddr_t rhead_blk,
1216 struct xlog_op_header *op_head;
1217 xfs_daddr_t umount_data_blk;
1218 xfs_daddr_t after_umount_blk;
1226 * Look for unmount record. If we find it, then we know there was a
1227 * clean unmount. Since 'i' could be the last block in the physical
1228 * log, we convert to a log block before comparing to the head_blk.
1230 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1231 * below. We won't want to clear the unmount record if there is one, so
1232 * we pass the lsn of the unmount record rather than the block after it.
1234 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1235 int h_size = be32_to_cpu(rhead->h_size);
1236 int h_version = be32_to_cpu(rhead->h_version);
1238 if ((h_version & XLOG_VERSION_2) &&
1239 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1240 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1241 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1249 after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
1250 after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
1251 if (*head_blk == after_umount_blk &&
1252 be32_to_cpu(rhead->h_num_logops) == 1) {
1253 umount_data_blk = rhead_blk + hblks;
1254 umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
1255 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1259 op_head = (struct xlog_op_header *)offset;
1260 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1262 * Set tail and last sync so that newly written log
1263 * records will point recovery to after the current
1266 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1267 log->l_curr_cycle, after_umount_blk);
1268 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1269 log->l_curr_cycle, after_umount_blk);
1270 *tail_blk = after_umount_blk;
1282 xfs_daddr_t head_blk,
1283 struct xlog_rec_header *rhead,
1284 xfs_daddr_t rhead_blk,
1288 * Reset log values according to the state of the log when we
1289 * crashed. In the case where head_blk == 0, we bump curr_cycle
1290 * one because the next write starts a new cycle rather than
1291 * continuing the cycle of the last good log record. At this
1292 * point we have guaranteed that all partial log records have been
1293 * accounted for. Therefore, we know that the last good log record
1294 * written was complete and ended exactly on the end boundary
1295 * of the physical log.
1297 log->l_prev_block = rhead_blk;
1298 log->l_curr_block = (int)head_blk;
1299 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1301 log->l_curr_cycle++;
1302 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1303 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1304 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1305 BBTOB(log->l_curr_block));
1306 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1307 BBTOB(log->l_curr_block));
1311 * Find the sync block number or the tail of the log.
1313 * This will be the block number of the last record to have its
1314 * associated buffers synced to disk. Every log record header has
1315 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1316 * to get a sync block number. The only concern is to figure out which
1317 * log record header to believe.
1319 * The following algorithm uses the log record header with the largest
1320 * lsn. The entire log record does not need to be valid. We only care
1321 * that the header is valid.
1323 * We could speed up search by using current head_blk buffer, but it is not
1329 xfs_daddr_t *head_blk,
1330 xfs_daddr_t *tail_blk)
1332 xlog_rec_header_t *rhead;
1333 char *offset = NULL;
1336 xfs_daddr_t rhead_blk;
1338 bool wrapped = false;
1342 * Find previous log record
1344 if ((error = xlog_find_head(log, head_blk)))
1346 ASSERT(*head_blk < INT_MAX);
1348 bp = xlog_get_bp(log, 1);
1351 if (*head_blk == 0) { /* special case */
1352 error = xlog_bread(log, 0, 1, bp, &offset);
1356 if (xlog_get_cycle(offset) == 0) {
1358 /* leave all other log inited values alone */
1364 * Search backwards through the log looking for the log record header
1365 * block. This wraps all the way back around to the head so something is
1366 * seriously wrong if we can't find it.
1368 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
1369 &rhead_blk, &rhead, &wrapped);
1373 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1376 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1379 * Trim the head block back to skip over torn records. We can have
1380 * multiple log I/Os in flight at any time, so we assume CRC failures
1381 * back through the previous several records are torn writes and skip
1384 error = xlog_verify_head(log, head_blk, tail_blk, bp, &rhead_blk,
1390 * Set the log state based on the current head record.
1392 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1393 tail_lsn = atomic64_read(&log->l_tail_lsn);
1396 * Look for an unmount record at the head of the log. This sets the log
1397 * state to determine whether recovery is necessary.
1399 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1400 rhead_blk, bp, &clean);
1405 * Note that the unmount was clean. If the unmount was not clean, we
1406 * need to know this to rebuild the superblock counters from the perag
1407 * headers if we have a filesystem using non-persistent counters.
1410 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1413 * Make sure that there are no blocks in front of the head
1414 * with the same cycle number as the head. This can happen
1415 * because we allow multiple outstanding log writes concurrently,
1416 * and the later writes might make it out before earlier ones.
1418 * We use the lsn from before modifying it so that we'll never
1419 * overwrite the unmount record after a clean unmount.
1421 * Do this only if we are going to recover the filesystem
1423 * NOTE: This used to say "if (!readonly)"
1424 * However on Linux, we can & do recover a read-only filesystem.
1425 * We only skip recovery if NORECOVERY is specified on mount,
1426 * in which case we would not be here.
1428 * But... if the -device- itself is readonly, just skip this.
1429 * We can't recover this device anyway, so it won't matter.
1431 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1432 error = xlog_clear_stale_blocks(log, tail_lsn);
1438 xfs_warn(log->l_mp, "failed to locate log tail");
1443 * Is the log zeroed at all?
1445 * The last binary search should be changed to perform an X block read
1446 * once X becomes small enough. You can then search linearly through
1447 * the X blocks. This will cut down on the number of reads we need to do.
1449 * If the log is partially zeroed, this routine will pass back the blkno
1450 * of the first block with cycle number 0. It won't have a complete LR
1454 * 0 => the log is completely written to
1455 * 1 => use *blk_no as the first block of the log
1456 * <0 => error has occurred
1461 xfs_daddr_t *blk_no)
1465 uint first_cycle, last_cycle;
1466 xfs_daddr_t new_blk, last_blk, start_blk;
1467 xfs_daddr_t num_scan_bblks;
1468 int error, log_bbnum = log->l_logBBsize;
1472 /* check totally zeroed log */
1473 bp = xlog_get_bp(log, 1);
1476 error = xlog_bread(log, 0, 1, bp, &offset);
1480 first_cycle = xlog_get_cycle(offset);
1481 if (first_cycle == 0) { /* completely zeroed log */
1487 /* check partially zeroed log */
1488 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1492 last_cycle = xlog_get_cycle(offset);
1493 if (last_cycle != 0) { /* log completely written to */
1496 } else if (first_cycle != 1) {
1498 * If the cycle of the last block is zero, the cycle of
1499 * the first block must be 1. If it's not, maybe we're
1500 * not looking at a log... Bail out.
1503 "Log inconsistent or not a log (last==0, first!=1)");
1508 /* we have a partially zeroed log */
1509 last_blk = log_bbnum-1;
1510 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1514 * Validate the answer. Because there is no way to guarantee that
1515 * the entire log is made up of log records which are the same size,
1516 * we scan over the defined maximum blocks. At this point, the maximum
1517 * is not chosen to mean anything special. XXXmiken
1519 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1520 ASSERT(num_scan_bblks <= INT_MAX);
1522 if (last_blk < num_scan_bblks)
1523 num_scan_bblks = last_blk;
1524 start_blk = last_blk - num_scan_bblks;
1527 * We search for any instances of cycle number 0 that occur before
1528 * our current estimate of the head. What we're trying to detect is
1529 * 1 ... | 0 | 1 | 0...
1530 * ^ binary search ends here
1532 if ((error = xlog_find_verify_cycle(log, start_blk,
1533 (int)num_scan_bblks, 0, &new_blk)))
1539 * Potentially backup over partial log record write. We don't need
1540 * to search the end of the log because we know it is zero.
1542 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1557 * These are simple subroutines used by xlog_clear_stale_blocks() below
1558 * to initialize a buffer full of empty log record headers and write
1559 * them into the log.
1570 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1572 memset(buf, 0, BBSIZE);
1573 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1574 recp->h_cycle = cpu_to_be32(cycle);
1575 recp->h_version = cpu_to_be32(
1576 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1577 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1578 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1579 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1580 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1584 xlog_write_log_records(
1595 int sectbb = log->l_sectBBsize;
1596 int end_block = start_block + blocks;
1602 * Greedily allocate a buffer big enough to handle the full
1603 * range of basic blocks to be written. If that fails, try
1604 * a smaller size. We need to be able to write at least a
1605 * log sector, or we're out of luck.
1607 bufblks = 1 << ffs(blocks);
1608 while (bufblks > log->l_logBBsize)
1610 while (!(bp = xlog_get_bp(log, bufblks))) {
1612 if (bufblks < sectbb)
1616 /* We may need to do a read at the start to fill in part of
1617 * the buffer in the starting sector not covered by the first
1620 balign = round_down(start_block, sectbb);
1621 if (balign != start_block) {
1622 error = xlog_bread_noalign(log, start_block, 1, bp);
1626 j = start_block - balign;
1629 for (i = start_block; i < end_block; i += bufblks) {
1630 int bcount, endcount;
1632 bcount = min(bufblks, end_block - start_block);
1633 endcount = bcount - j;
1635 /* We may need to do a read at the end to fill in part of
1636 * the buffer in the final sector not covered by the write.
1637 * If this is the same sector as the above read, skip it.
1639 ealign = round_down(end_block, sectbb);
1640 if (j == 0 && (start_block + endcount > ealign)) {
1641 offset = bp->b_addr + BBTOB(ealign - start_block);
1642 error = xlog_bread_offset(log, ealign, sectbb,
1649 offset = xlog_align(log, start_block, endcount, bp);
1650 for (; j < endcount; j++) {
1651 xlog_add_record(log, offset, cycle, i+j,
1652 tail_cycle, tail_block);
1655 error = xlog_bwrite(log, start_block, endcount, bp);
1658 start_block += endcount;
1668 * This routine is called to blow away any incomplete log writes out
1669 * in front of the log head. We do this so that we won't become confused
1670 * if we come up, write only a little bit more, and then crash again.
1671 * If we leave the partial log records out there, this situation could
1672 * cause us to think those partial writes are valid blocks since they
1673 * have the current cycle number. We get rid of them by overwriting them
1674 * with empty log records with the old cycle number rather than the
1677 * The tail lsn is passed in rather than taken from
1678 * the log so that we will not write over the unmount record after a
1679 * clean unmount in a 512 block log. Doing so would leave the log without
1680 * any valid log records in it until a new one was written. If we crashed
1681 * during that time we would not be able to recover.
1684 xlog_clear_stale_blocks(
1688 int tail_cycle, head_cycle;
1689 int tail_block, head_block;
1690 int tail_distance, max_distance;
1694 tail_cycle = CYCLE_LSN(tail_lsn);
1695 tail_block = BLOCK_LSN(tail_lsn);
1696 head_cycle = log->l_curr_cycle;
1697 head_block = log->l_curr_block;
1700 * Figure out the distance between the new head of the log
1701 * and the tail. We want to write over any blocks beyond the
1702 * head that we may have written just before the crash, but
1703 * we don't want to overwrite the tail of the log.
1705 if (head_cycle == tail_cycle) {
1707 * The tail is behind the head in the physical log,
1708 * so the distance from the head to the tail is the
1709 * distance from the head to the end of the log plus
1710 * the distance from the beginning of the log to the
1713 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1714 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1715 XFS_ERRLEVEL_LOW, log->l_mp);
1716 return -EFSCORRUPTED;
1718 tail_distance = tail_block + (log->l_logBBsize - head_block);
1721 * The head is behind the tail in the physical log,
1722 * so the distance from the head to the tail is just
1723 * the tail block minus the head block.
1725 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1726 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1727 XFS_ERRLEVEL_LOW, log->l_mp);
1728 return -EFSCORRUPTED;
1730 tail_distance = tail_block - head_block;
1734 * If the head is right up against the tail, we can't clear
1737 if (tail_distance <= 0) {
1738 ASSERT(tail_distance == 0);
1742 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1744 * Take the smaller of the maximum amount of outstanding I/O
1745 * we could have and the distance to the tail to clear out.
1746 * We take the smaller so that we don't overwrite the tail and
1747 * we don't waste all day writing from the head to the tail
1750 max_distance = MIN(max_distance, tail_distance);
1752 if ((head_block + max_distance) <= log->l_logBBsize) {
1754 * We can stomp all the blocks we need to without
1755 * wrapping around the end of the log. Just do it
1756 * in a single write. Use the cycle number of the
1757 * current cycle minus one so that the log will look like:
1760 error = xlog_write_log_records(log, (head_cycle - 1),
1761 head_block, max_distance, tail_cycle,
1767 * We need to wrap around the end of the physical log in
1768 * order to clear all the blocks. Do it in two separate
1769 * I/Os. The first write should be from the head to the
1770 * end of the physical log, and it should use the current
1771 * cycle number minus one just like above.
1773 distance = log->l_logBBsize - head_block;
1774 error = xlog_write_log_records(log, (head_cycle - 1),
1775 head_block, distance, tail_cycle,
1782 * Now write the blocks at the start of the physical log.
1783 * This writes the remainder of the blocks we want to clear.
1784 * It uses the current cycle number since we're now on the
1785 * same cycle as the head so that we get:
1786 * n ... n ... | n - 1 ...
1787 * ^^^^^ blocks we're writing
1789 distance = max_distance - (log->l_logBBsize - head_block);
1790 error = xlog_write_log_records(log, head_cycle, 0, distance,
1791 tail_cycle, tail_block);
1799 /******************************************************************************
1801 * Log recover routines
1803 ******************************************************************************
1807 * Sort the log items in the transaction.
1809 * The ordering constraints are defined by the inode allocation and unlink
1810 * behaviour. The rules are:
1812 * 1. Every item is only logged once in a given transaction. Hence it
1813 * represents the last logged state of the item. Hence ordering is
1814 * dependent on the order in which operations need to be performed so
1815 * required initial conditions are always met.
1817 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1818 * there's nothing to replay from them so we can simply cull them
1819 * from the transaction. However, we can't do that until after we've
1820 * replayed all the other items because they may be dependent on the
1821 * cancelled buffer and replaying the cancelled buffer can remove it
1822 * form the cancelled buffer table. Hence they have tobe done last.
1824 * 3. Inode allocation buffers must be replayed before inode items that
1825 * read the buffer and replay changes into it. For filesystems using the
1826 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1827 * treated the same as inode allocation buffers as they create and
1828 * initialise the buffers directly.
1830 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1831 * This ensures that inodes are completely flushed to the inode buffer
1832 * in a "free" state before we remove the unlinked inode list pointer.
1834 * Hence the ordering needs to be inode allocation buffers first, inode items
1835 * second, inode unlink buffers third and cancelled buffers last.
1837 * But there's a problem with that - we can't tell an inode allocation buffer
1838 * apart from a regular buffer, so we can't separate them. We can, however,
1839 * tell an inode unlink buffer from the others, and so we can separate them out
1840 * from all the other buffers and move them to last.
1842 * Hence, 4 lists, in order from head to tail:
1843 * - buffer_list for all buffers except cancelled/inode unlink buffers
1844 * - item_list for all non-buffer items
1845 * - inode_buffer_list for inode unlink buffers
1846 * - cancel_list for the cancelled buffers
1848 * Note that we add objects to the tail of the lists so that first-to-last
1849 * ordering is preserved within the lists. Adding objects to the head of the
1850 * list means when we traverse from the head we walk them in last-to-first
1851 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1852 * but for all other items there may be specific ordering that we need to
1856 xlog_recover_reorder_trans(
1858 struct xlog_recover *trans,
1861 xlog_recover_item_t *item, *n;
1863 LIST_HEAD(sort_list);
1864 LIST_HEAD(cancel_list);
1865 LIST_HEAD(buffer_list);
1866 LIST_HEAD(inode_buffer_list);
1867 LIST_HEAD(inode_list);
1869 list_splice_init(&trans->r_itemq, &sort_list);
1870 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1871 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1873 switch (ITEM_TYPE(item)) {
1874 case XFS_LI_ICREATE:
1875 list_move_tail(&item->ri_list, &buffer_list);
1878 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1879 trace_xfs_log_recover_item_reorder_head(log,
1881 list_move(&item->ri_list, &cancel_list);
1884 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1885 list_move(&item->ri_list, &inode_buffer_list);
1888 list_move_tail(&item->ri_list, &buffer_list);
1892 case XFS_LI_QUOTAOFF:
1895 trace_xfs_log_recover_item_reorder_tail(log,
1897 list_move_tail(&item->ri_list, &inode_list);
1901 "%s: unrecognized type of log operation",
1905 * return the remaining items back to the transaction
1906 * item list so they can be freed in caller.
1908 if (!list_empty(&sort_list))
1909 list_splice_init(&sort_list, &trans->r_itemq);
1915 ASSERT(list_empty(&sort_list));
1916 if (!list_empty(&buffer_list))
1917 list_splice(&buffer_list, &trans->r_itemq);
1918 if (!list_empty(&inode_list))
1919 list_splice_tail(&inode_list, &trans->r_itemq);
1920 if (!list_empty(&inode_buffer_list))
1921 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1922 if (!list_empty(&cancel_list))
1923 list_splice_tail(&cancel_list, &trans->r_itemq);
1928 * Build up the table of buf cancel records so that we don't replay
1929 * cancelled data in the second pass. For buffer records that are
1930 * not cancel records, there is nothing to do here so we just return.
1932 * If we get a cancel record which is already in the table, this indicates
1933 * that the buffer was cancelled multiple times. In order to ensure
1934 * that during pass 2 we keep the record in the table until we reach its
1935 * last occurrence in the log, we keep a reference count in the cancel
1936 * record in the table to tell us how many times we expect to see this
1937 * record during the second pass.
1940 xlog_recover_buffer_pass1(
1942 struct xlog_recover_item *item)
1944 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1945 struct list_head *bucket;
1946 struct xfs_buf_cancel *bcp;
1949 * If this isn't a cancel buffer item, then just return.
1951 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1952 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1957 * Insert an xfs_buf_cancel record into the hash table of them.
1958 * If there is already an identical record, bump its reference count.
1960 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1961 list_for_each_entry(bcp, bucket, bc_list) {
1962 if (bcp->bc_blkno == buf_f->blf_blkno &&
1963 bcp->bc_len == buf_f->blf_len) {
1965 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1970 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1971 bcp->bc_blkno = buf_f->blf_blkno;
1972 bcp->bc_len = buf_f->blf_len;
1973 bcp->bc_refcount = 1;
1974 list_add_tail(&bcp->bc_list, bucket);
1976 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1981 * Check to see whether the buffer being recovered has a corresponding
1982 * entry in the buffer cancel record table. If it is, return the cancel
1983 * buffer structure to the caller.
1985 STATIC struct xfs_buf_cancel *
1986 xlog_peek_buffer_cancelled(
1992 struct list_head *bucket;
1993 struct xfs_buf_cancel *bcp;
1995 if (!log->l_buf_cancel_table) {
1996 /* empty table means no cancelled buffers in the log */
1997 ASSERT(!(flags & XFS_BLF_CANCEL));
2001 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
2002 list_for_each_entry(bcp, bucket, bc_list) {
2003 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
2008 * We didn't find a corresponding entry in the table, so return 0 so
2009 * that the buffer is NOT cancelled.
2011 ASSERT(!(flags & XFS_BLF_CANCEL));
2016 * If the buffer is being cancelled then return 1 so that it will be cancelled,
2017 * otherwise return 0. If the buffer is actually a buffer cancel item
2018 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2019 * table and remove it from the table if this is the last reference.
2021 * We remove the cancel record from the table when we encounter its last
2022 * occurrence in the log so that if the same buffer is re-used again after its
2023 * last cancellation we actually replay the changes made at that point.
2026 xlog_check_buffer_cancelled(
2032 struct xfs_buf_cancel *bcp;
2034 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2039 * We've go a match, so return 1 so that the recovery of this buffer
2040 * is cancelled. If this buffer is actually a buffer cancel log
2041 * item, then decrement the refcount on the one in the table and
2042 * remove it if this is the last reference.
2044 if (flags & XFS_BLF_CANCEL) {
2045 if (--bcp->bc_refcount == 0) {
2046 list_del(&bcp->bc_list);
2054 * Perform recovery for a buffer full of inodes. In these buffers, the only
2055 * data which should be recovered is that which corresponds to the
2056 * di_next_unlinked pointers in the on disk inode structures. The rest of the
2057 * data for the inodes is always logged through the inodes themselves rather
2058 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2060 * The only time when buffers full of inodes are fully recovered is when the
2061 * buffer is full of newly allocated inodes. In this case the buffer will
2062 * not be marked as an inode buffer and so will be sent to
2063 * xlog_recover_do_reg_buffer() below during recovery.
2066 xlog_recover_do_inode_buffer(
2067 struct xfs_mount *mp,
2068 xlog_recover_item_t *item,
2070 xfs_buf_log_format_t *buf_f)
2076 int reg_buf_offset = 0;
2077 int reg_buf_bytes = 0;
2078 int next_unlinked_offset;
2080 xfs_agino_t *logged_nextp;
2081 xfs_agino_t *buffer_nextp;
2083 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2086 * Post recovery validation only works properly on CRC enabled
2089 if (xfs_sb_version_hascrc(&mp->m_sb))
2090 bp->b_ops = &xfs_inode_buf_ops;
2092 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
2093 for (i = 0; i < inodes_per_buf; i++) {
2094 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2095 offsetof(xfs_dinode_t, di_next_unlinked);
2097 while (next_unlinked_offset >=
2098 (reg_buf_offset + reg_buf_bytes)) {
2100 * The next di_next_unlinked field is beyond
2101 * the current logged region. Find the next
2102 * logged region that contains or is beyond
2103 * the current di_next_unlinked field.
2106 bit = xfs_next_bit(buf_f->blf_data_map,
2107 buf_f->blf_map_size, bit);
2110 * If there are no more logged regions in the
2111 * buffer, then we're done.
2116 nbits = xfs_contig_bits(buf_f->blf_data_map,
2117 buf_f->blf_map_size, bit);
2119 reg_buf_offset = bit << XFS_BLF_SHIFT;
2120 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2125 * If the current logged region starts after the current
2126 * di_next_unlinked field, then move on to the next
2127 * di_next_unlinked field.
2129 if (next_unlinked_offset < reg_buf_offset)
2132 ASSERT(item->ri_buf[item_index].i_addr != NULL);
2133 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2134 ASSERT((reg_buf_offset + reg_buf_bytes) <=
2135 BBTOB(bp->b_io_length));
2138 * The current logged region contains a copy of the
2139 * current di_next_unlinked field. Extract its value
2140 * and copy it to the buffer copy.
2142 logged_nextp = item->ri_buf[item_index].i_addr +
2143 next_unlinked_offset - reg_buf_offset;
2144 if (unlikely(*logged_nextp == 0)) {
2146 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
2147 "Trying to replay bad (0) inode di_next_unlinked field.",
2149 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2150 XFS_ERRLEVEL_LOW, mp);
2151 return -EFSCORRUPTED;
2154 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2155 *buffer_nextp = *logged_nextp;
2158 * If necessary, recalculate the CRC in the on-disk inode. We
2159 * have to leave the inode in a consistent state for whoever
2162 xfs_dinode_calc_crc(mp,
2163 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2171 * V5 filesystems know the age of the buffer on disk being recovered. We can
2172 * have newer objects on disk than we are replaying, and so for these cases we
2173 * don't want to replay the current change as that will make the buffer contents
2174 * temporarily invalid on disk.
2176 * The magic number might not match the buffer type we are going to recover
2177 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
2178 * extract the LSN of the existing object in the buffer based on it's current
2179 * magic number. If we don't recognise the magic number in the buffer, then
2180 * return a LSN of -1 so that the caller knows it was an unrecognised block and
2181 * so can recover the buffer.
2183 * Note: we cannot rely solely on magic number matches to determine that the
2184 * buffer has a valid LSN - we also need to verify that it belongs to this
2185 * filesystem, so we need to extract the object's LSN and compare it to that
2186 * which we read from the superblock. If the UUIDs don't match, then we've got a
2187 * stale metadata block from an old filesystem instance that we need to recover
2191 xlog_recover_get_buf_lsn(
2192 struct xfs_mount *mp,
2198 void *blk = bp->b_addr;
2202 /* v4 filesystems always recover immediately */
2203 if (!xfs_sb_version_hascrc(&mp->m_sb))
2204 goto recover_immediately;
2206 magic32 = be32_to_cpu(*(__be32 *)blk);
2208 case XFS_ABTB_CRC_MAGIC:
2209 case XFS_ABTC_CRC_MAGIC:
2210 case XFS_ABTB_MAGIC:
2211 case XFS_ABTC_MAGIC:
2212 case XFS_IBT_CRC_MAGIC:
2213 case XFS_IBT_MAGIC: {
2214 struct xfs_btree_block *btb = blk;
2216 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2217 uuid = &btb->bb_u.s.bb_uuid;
2220 case XFS_BMAP_CRC_MAGIC:
2221 case XFS_BMAP_MAGIC: {
2222 struct xfs_btree_block *btb = blk;
2224 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2225 uuid = &btb->bb_u.l.bb_uuid;
2229 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2230 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2232 case XFS_AGFL_MAGIC:
2233 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2234 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2237 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2238 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2240 case XFS_SYMLINK_MAGIC:
2241 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2242 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2244 case XFS_DIR3_BLOCK_MAGIC:
2245 case XFS_DIR3_DATA_MAGIC:
2246 case XFS_DIR3_FREE_MAGIC:
2247 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2248 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2250 case XFS_ATTR3_RMT_MAGIC:
2252 * Remote attr blocks are written synchronously, rather than
2253 * being logged. That means they do not contain a valid LSN
2254 * (i.e. transactionally ordered) in them, and hence any time we
2255 * see a buffer to replay over the top of a remote attribute
2256 * block we should simply do so.
2258 goto recover_immediately;
2261 * superblock uuids are magic. We may or may not have a
2262 * sb_meta_uuid on disk, but it will be set in the in-core
2263 * superblock. We set the uuid pointer for verification
2264 * according to the superblock feature mask to ensure we check
2265 * the relevant UUID in the superblock.
2267 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2268 if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2269 uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2271 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2277 if (lsn != (xfs_lsn_t)-1) {
2278 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2279 goto recover_immediately;
2283 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2285 case XFS_DIR3_LEAF1_MAGIC:
2286 case XFS_DIR3_LEAFN_MAGIC:
2287 case XFS_DA3_NODE_MAGIC:
2288 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2289 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2295 if (lsn != (xfs_lsn_t)-1) {
2296 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2297 goto recover_immediately;
2302 * We do individual object checks on dquot and inode buffers as they
2303 * have their own individual LSN records. Also, we could have a stale
2304 * buffer here, so we have to at least recognise these buffer types.
2306 * A notd complexity here is inode unlinked list processing - it logs
2307 * the inode directly in the buffer, but we don't know which inodes have
2308 * been modified, and there is no global buffer LSN. Hence we need to
2309 * recover all inode buffer types immediately. This problem will be
2310 * fixed by logical logging of the unlinked list modifications.
2312 magic16 = be16_to_cpu(*(__be16 *)blk);
2314 case XFS_DQUOT_MAGIC:
2315 case XFS_DINODE_MAGIC:
2316 goto recover_immediately;
2321 /* unknown buffer contents, recover immediately */
2323 recover_immediately:
2324 return (xfs_lsn_t)-1;
2329 * Validate the recovered buffer is of the correct type and attach the
2330 * appropriate buffer operations to them for writeback. Magic numbers are in a
2332 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2333 * the first 32 bits of the buffer (most blocks),
2334 * inside a struct xfs_da_blkinfo at the start of the buffer.
2337 xlog_recover_validate_buf_type(
2338 struct xfs_mount *mp,
2340 xfs_buf_log_format_t *buf_f)
2342 struct xfs_da_blkinfo *info = bp->b_addr;
2348 * We can only do post recovery validation on items on CRC enabled
2349 * fielsystems as we need to know when the buffer was written to be able
2350 * to determine if we should have replayed the item. If we replay old
2351 * metadata over a newer buffer, then it will enter a temporarily
2352 * inconsistent state resulting in verification failures. Hence for now
2353 * just avoid the verification stage for non-crc filesystems
2355 if (!xfs_sb_version_hascrc(&mp->m_sb))
2358 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2359 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2360 magicda = be16_to_cpu(info->magic);
2361 switch (xfs_blft_from_flags(buf_f)) {
2362 case XFS_BLFT_BTREE_BUF:
2364 case XFS_ABTB_CRC_MAGIC:
2365 case XFS_ABTC_CRC_MAGIC:
2366 case XFS_ABTB_MAGIC:
2367 case XFS_ABTC_MAGIC:
2368 bp->b_ops = &xfs_allocbt_buf_ops;
2370 case XFS_IBT_CRC_MAGIC:
2371 case XFS_FIBT_CRC_MAGIC:
2373 case XFS_FIBT_MAGIC:
2374 bp->b_ops = &xfs_inobt_buf_ops;
2376 case XFS_BMAP_CRC_MAGIC:
2377 case XFS_BMAP_MAGIC:
2378 bp->b_ops = &xfs_bmbt_buf_ops;
2381 xfs_warn(mp, "Bad btree block magic!");
2386 case XFS_BLFT_AGF_BUF:
2387 if (magic32 != XFS_AGF_MAGIC) {
2388 xfs_warn(mp, "Bad AGF block magic!");
2392 bp->b_ops = &xfs_agf_buf_ops;
2394 case XFS_BLFT_AGFL_BUF:
2395 if (magic32 != XFS_AGFL_MAGIC) {
2396 xfs_warn(mp, "Bad AGFL block magic!");
2400 bp->b_ops = &xfs_agfl_buf_ops;
2402 case XFS_BLFT_AGI_BUF:
2403 if (magic32 != XFS_AGI_MAGIC) {
2404 xfs_warn(mp, "Bad AGI block magic!");
2408 bp->b_ops = &xfs_agi_buf_ops;
2410 case XFS_BLFT_UDQUOT_BUF:
2411 case XFS_BLFT_PDQUOT_BUF:
2412 case XFS_BLFT_GDQUOT_BUF:
2413 #ifdef CONFIG_XFS_QUOTA
2414 if (magic16 != XFS_DQUOT_MAGIC) {
2415 xfs_warn(mp, "Bad DQUOT block magic!");
2419 bp->b_ops = &xfs_dquot_buf_ops;
2422 "Trying to recover dquots without QUOTA support built in!");
2426 case XFS_BLFT_DINO_BUF:
2427 if (magic16 != XFS_DINODE_MAGIC) {
2428 xfs_warn(mp, "Bad INODE block magic!");
2432 bp->b_ops = &xfs_inode_buf_ops;
2434 case XFS_BLFT_SYMLINK_BUF:
2435 if (magic32 != XFS_SYMLINK_MAGIC) {
2436 xfs_warn(mp, "Bad symlink block magic!");
2440 bp->b_ops = &xfs_symlink_buf_ops;
2442 case XFS_BLFT_DIR_BLOCK_BUF:
2443 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2444 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2445 xfs_warn(mp, "Bad dir block magic!");
2449 bp->b_ops = &xfs_dir3_block_buf_ops;
2451 case XFS_BLFT_DIR_DATA_BUF:
2452 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2453 magic32 != XFS_DIR3_DATA_MAGIC) {
2454 xfs_warn(mp, "Bad dir data magic!");
2458 bp->b_ops = &xfs_dir3_data_buf_ops;
2460 case XFS_BLFT_DIR_FREE_BUF:
2461 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2462 magic32 != XFS_DIR3_FREE_MAGIC) {
2463 xfs_warn(mp, "Bad dir3 free magic!");
2467 bp->b_ops = &xfs_dir3_free_buf_ops;
2469 case XFS_BLFT_DIR_LEAF1_BUF:
2470 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2471 magicda != XFS_DIR3_LEAF1_MAGIC) {
2472 xfs_warn(mp, "Bad dir leaf1 magic!");
2476 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2478 case XFS_BLFT_DIR_LEAFN_BUF:
2479 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2480 magicda != XFS_DIR3_LEAFN_MAGIC) {
2481 xfs_warn(mp, "Bad dir leafn magic!");
2485 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2487 case XFS_BLFT_DA_NODE_BUF:
2488 if (magicda != XFS_DA_NODE_MAGIC &&
2489 magicda != XFS_DA3_NODE_MAGIC) {
2490 xfs_warn(mp, "Bad da node magic!");
2494 bp->b_ops = &xfs_da3_node_buf_ops;
2496 case XFS_BLFT_ATTR_LEAF_BUF:
2497 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2498 magicda != XFS_ATTR3_LEAF_MAGIC) {
2499 xfs_warn(mp, "Bad attr leaf magic!");
2503 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2505 case XFS_BLFT_ATTR_RMT_BUF:
2506 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2507 xfs_warn(mp, "Bad attr remote magic!");
2511 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2513 case XFS_BLFT_SB_BUF:
2514 if (magic32 != XFS_SB_MAGIC) {
2515 xfs_warn(mp, "Bad SB block magic!");
2519 bp->b_ops = &xfs_sb_buf_ops;
2522 xfs_warn(mp, "Unknown buffer type %d!",
2523 xfs_blft_from_flags(buf_f));
2529 * Perform a 'normal' buffer recovery. Each logged region of the
2530 * buffer should be copied over the corresponding region in the
2531 * given buffer. The bitmap in the buf log format structure indicates
2532 * where to place the logged data.
2535 xlog_recover_do_reg_buffer(
2536 struct xfs_mount *mp,
2537 xlog_recover_item_t *item,
2539 xfs_buf_log_format_t *buf_f)
2546 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2549 i = 1; /* 0 is the buf format structure */
2551 bit = xfs_next_bit(buf_f->blf_data_map,
2552 buf_f->blf_map_size, bit);
2555 nbits = xfs_contig_bits(buf_f->blf_data_map,
2556 buf_f->blf_map_size, bit);
2558 ASSERT(item->ri_buf[i].i_addr != NULL);
2559 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2560 ASSERT(BBTOB(bp->b_io_length) >=
2561 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2564 * The dirty regions logged in the buffer, even though
2565 * contiguous, may span multiple chunks. This is because the
2566 * dirty region may span a physical page boundary in a buffer
2567 * and hence be split into two separate vectors for writing into
2568 * the log. Hence we need to trim nbits back to the length of
2569 * the current region being copied out of the log.
2571 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2572 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2575 * Do a sanity check if this is a dquot buffer. Just checking
2576 * the first dquot in the buffer should do. XXXThis is
2577 * probably a good thing to do for other buf types also.
2580 if (buf_f->blf_flags &
2581 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2582 if (item->ri_buf[i].i_addr == NULL) {
2584 "XFS: NULL dquot in %s.", __func__);
2587 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2589 "XFS: dquot too small (%d) in %s.",
2590 item->ri_buf[i].i_len, __func__);
2593 error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
2594 -1, 0, XFS_QMOPT_DOWARN,
2595 "dquot_buf_recover");
2600 memcpy(xfs_buf_offset(bp,
2601 (uint)bit << XFS_BLF_SHIFT), /* dest */
2602 item->ri_buf[i].i_addr, /* source */
2603 nbits<<XFS_BLF_SHIFT); /* length */
2609 /* Shouldn't be any more regions */
2610 ASSERT(i == item->ri_total);
2612 xlog_recover_validate_buf_type(mp, bp, buf_f);
2616 * Perform a dquot buffer recovery.
2617 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2618 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2619 * Else, treat it as a regular buffer and do recovery.
2621 * Return false if the buffer was tossed and true if we recovered the buffer to
2622 * indicate to the caller if the buffer needs writing.
2625 xlog_recover_do_dquot_buffer(
2626 struct xfs_mount *mp,
2628 struct xlog_recover_item *item,
2630 struct xfs_buf_log_format *buf_f)
2634 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2637 * Filesystems are required to send in quota flags at mount time.
2643 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2644 type |= XFS_DQ_USER;
2645 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2646 type |= XFS_DQ_PROJ;
2647 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2648 type |= XFS_DQ_GROUP;
2650 * This type of quotas was turned off, so ignore this buffer
2652 if (log->l_quotaoffs_flag & type)
2655 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2660 * This routine replays a modification made to a buffer at runtime.
2661 * There are actually two types of buffer, regular and inode, which
2662 * are handled differently. Inode buffers are handled differently
2663 * in that we only recover a specific set of data from them, namely
2664 * the inode di_next_unlinked fields. This is because all other inode
2665 * data is actually logged via inode records and any data we replay
2666 * here which overlaps that may be stale.
2668 * When meta-data buffers are freed at run time we log a buffer item
2669 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2670 * of the buffer in the log should not be replayed at recovery time.
2671 * This is so that if the blocks covered by the buffer are reused for
2672 * file data before we crash we don't end up replaying old, freed
2673 * meta-data into a user's file.
2675 * To handle the cancellation of buffer log items, we make two passes
2676 * over the log during recovery. During the first we build a table of
2677 * those buffers which have been cancelled, and during the second we
2678 * only replay those buffers which do not have corresponding cancel
2679 * records in the table. See xlog_recover_buffer_pass[1,2] above
2680 * for more details on the implementation of the table of cancel records.
2683 xlog_recover_buffer_pass2(
2685 struct list_head *buffer_list,
2686 struct xlog_recover_item *item,
2687 xfs_lsn_t current_lsn)
2689 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2690 xfs_mount_t *mp = log->l_mp;
2697 * In this pass we only want to recover all the buffers which have
2698 * not been cancelled and are not cancellation buffers themselves.
2700 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2701 buf_f->blf_len, buf_f->blf_flags)) {
2702 trace_xfs_log_recover_buf_cancel(log, buf_f);
2706 trace_xfs_log_recover_buf_recover(log, buf_f);
2709 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2710 buf_flags |= XBF_UNMAPPED;
2712 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2716 error = bp->b_error;
2718 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2723 * Recover the buffer only if we get an LSN from it and it's less than
2724 * the lsn of the transaction we are replaying.
2726 * Note that we have to be extremely careful of readahead here.
2727 * Readahead does not attach verfiers to the buffers so if we don't
2728 * actually do any replay after readahead because of the LSN we found
2729 * in the buffer if more recent than that current transaction then we
2730 * need to attach the verifier directly. Failure to do so can lead to
2731 * future recovery actions (e.g. EFI and unlinked list recovery) can
2732 * operate on the buffers and they won't get the verifier attached. This
2733 * can lead to blocks on disk having the correct content but a stale
2736 * It is safe to assume these clean buffers are currently up to date.
2737 * If the buffer is dirtied by a later transaction being replayed, then
2738 * the verifier will be reset to match whatever recover turns that
2741 lsn = xlog_recover_get_buf_lsn(mp, bp);
2742 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2743 xlog_recover_validate_buf_type(mp, bp, buf_f);
2747 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2748 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2751 } else if (buf_f->blf_flags &
2752 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2755 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2759 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2763 * Perform delayed write on the buffer. Asynchronous writes will be
2764 * slower when taking into account all the buffers to be flushed.
2766 * Also make sure that only inode buffers with good sizes stay in
2767 * the buffer cache. The kernel moves inodes in buffers of 1 block
2768 * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode
2769 * buffers in the log can be a different size if the log was generated
2770 * by an older kernel using unclustered inode buffers or a newer kernel
2771 * running with a different inode cluster size. Regardless, if the
2772 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2773 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2774 * the buffer out of the buffer cache so that the buffer won't
2775 * overlap with future reads of those inodes.
2777 if (XFS_DINODE_MAGIC ==
2778 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2779 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2780 (__uint32_t)log->l_mp->m_inode_cluster_size))) {
2782 error = xfs_bwrite(bp);
2784 ASSERT(bp->b_target->bt_mount == mp);
2785 bp->b_iodone = xlog_recover_iodone;
2786 xfs_buf_delwri_queue(bp, buffer_list);
2795 * Inode fork owner changes
2797 * If we have been told that we have to reparent the inode fork, it's because an
2798 * extent swap operation on a CRC enabled filesystem has been done and we are
2799 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2802 * The complexity here is that we don't have an inode context to work with, so
2803 * after we've replayed the inode we need to instantiate one. This is where the
2806 * We are in the middle of log recovery, so we can't run transactions. That
2807 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2808 * that will result in the corresponding iput() running the inode through
2809 * xfs_inactive(). If we've just replayed an inode core that changes the link
2810 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2811 * transactions (bad!).
2813 * So, to avoid this, we instantiate an inode directly from the inode core we've
2814 * just recovered. We have the buffer still locked, and all we really need to
2815 * instantiate is the inode core and the forks being modified. We can do this
2816 * manually, then run the inode btree owner change, and then tear down the
2817 * xfs_inode without having to run any transactions at all.
2819 * Also, because we don't have a transaction context available here but need to
2820 * gather all the buffers we modify for writeback so we pass the buffer_list
2821 * instead for the operation to use.
2825 xfs_recover_inode_owner_change(
2826 struct xfs_mount *mp,
2827 struct xfs_dinode *dip,
2828 struct xfs_inode_log_format *in_f,
2829 struct list_head *buffer_list)
2831 struct xfs_inode *ip;
2834 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2836 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2840 /* instantiate the inode */
2841 xfs_dinode_from_disk(&ip->i_d, dip);
2842 ASSERT(ip->i_d.di_version >= 3);
2844 error = xfs_iformat_fork(ip, dip);
2849 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2850 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2851 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2852 ip->i_ino, buffer_list);
2857 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2858 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2859 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2860 ip->i_ino, buffer_list);
2871 xlog_recover_inode_pass2(
2873 struct list_head *buffer_list,
2874 struct xlog_recover_item *item,
2875 xfs_lsn_t current_lsn)
2877 xfs_inode_log_format_t *in_f;
2878 xfs_mount_t *mp = log->l_mp;
2887 xfs_icdinode_t *dicp;
2891 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2892 in_f = item->ri_buf[0].i_addr;
2894 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2896 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2902 * Inode buffers can be freed, look out for it,
2903 * and do not replay the inode.
2905 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2906 in_f->ilf_len, 0)) {
2908 trace_xfs_log_recover_inode_cancel(log, in_f);
2911 trace_xfs_log_recover_inode_recover(log, in_f);
2913 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2914 &xfs_inode_buf_ops);
2919 error = bp->b_error;
2921 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2924 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2925 dip = xfs_buf_offset(bp, in_f->ilf_boffset);
2928 * Make sure the place we're flushing out to really looks
2931 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2933 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2934 __func__, dip, bp, in_f->ilf_ino);
2935 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2936 XFS_ERRLEVEL_LOW, mp);
2937 error = -EFSCORRUPTED;
2940 dicp = item->ri_buf[1].i_addr;
2941 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2943 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2944 __func__, item, in_f->ilf_ino);
2945 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2946 XFS_ERRLEVEL_LOW, mp);
2947 error = -EFSCORRUPTED;
2952 * If the inode has an LSN in it, recover the inode only if it's less
2953 * than the lsn of the transaction we are replaying. Note: we still
2954 * need to replay an owner change even though the inode is more recent
2955 * than the transaction as there is no guarantee that all the btree
2956 * blocks are more recent than this transaction, too.
2958 if (dip->di_version >= 3) {
2959 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
2961 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2962 trace_xfs_log_recover_inode_skip(log, in_f);
2964 goto out_owner_change;
2969 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2970 * are transactional and if ordering is necessary we can determine that
2971 * more accurately by the LSN field in the V3 inode core. Don't trust
2972 * the inode versions we might be changing them here - use the
2973 * superblock flag to determine whether we need to look at di_flushiter
2974 * to skip replay when the on disk inode is newer than the log one
2976 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2977 dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2979 * Deal with the wrap case, DI_MAX_FLUSH is less
2980 * than smaller numbers
2982 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2983 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2986 trace_xfs_log_recover_inode_skip(log, in_f);
2992 /* Take the opportunity to reset the flush iteration count */
2993 dicp->di_flushiter = 0;
2995 if (unlikely(S_ISREG(dicp->di_mode))) {
2996 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2997 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2998 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2999 XFS_ERRLEVEL_LOW, mp, dicp);
3001 "%s: Bad regular inode log record, rec ptr 0x%p, "
3002 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3003 __func__, item, dip, bp, in_f->ilf_ino);
3004 error = -EFSCORRUPTED;
3007 } else if (unlikely(S_ISDIR(dicp->di_mode))) {
3008 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
3009 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
3010 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
3011 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3012 XFS_ERRLEVEL_LOW, mp, dicp);
3014 "%s: Bad dir inode log record, rec ptr 0x%p, "
3015 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3016 __func__, item, dip, bp, in_f->ilf_ino);
3017 error = -EFSCORRUPTED;
3021 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
3022 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3023 XFS_ERRLEVEL_LOW, mp, dicp);
3025 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3026 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
3027 __func__, item, dip, bp, in_f->ilf_ino,
3028 dicp->di_nextents + dicp->di_anextents,
3030 error = -EFSCORRUPTED;
3033 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
3034 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3035 XFS_ERRLEVEL_LOW, mp, dicp);
3037 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3038 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
3039 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
3040 error = -EFSCORRUPTED;
3043 isize = xfs_icdinode_size(dicp->di_version);
3044 if (unlikely(item->ri_buf[1].i_len > isize)) {
3045 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3046 XFS_ERRLEVEL_LOW, mp, dicp);
3048 "%s: Bad inode log record length %d, rec ptr 0x%p",
3049 __func__, item->ri_buf[1].i_len, item);
3050 error = -EFSCORRUPTED;
3054 /* The core is in in-core format */
3055 xfs_dinode_to_disk(dip, dicp);
3057 /* the rest is in on-disk format */
3058 if (item->ri_buf[1].i_len > isize) {
3059 memcpy((char *)dip + isize,
3060 item->ri_buf[1].i_addr + isize,
3061 item->ri_buf[1].i_len - isize);
3064 fields = in_f->ilf_fields;
3065 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
3067 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3070 memcpy(XFS_DFORK_DPTR(dip),
3071 &in_f->ilf_u.ilfu_uuid,
3076 if (in_f->ilf_size == 2)
3077 goto out_owner_change;
3078 len = item->ri_buf[2].i_len;
3079 src = item->ri_buf[2].i_addr;
3080 ASSERT(in_f->ilf_size <= 4);
3081 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3082 ASSERT(!(fields & XFS_ILOG_DFORK) ||
3083 (len == in_f->ilf_dsize));
3085 switch (fields & XFS_ILOG_DFORK) {
3086 case XFS_ILOG_DDATA:
3088 memcpy(XFS_DFORK_DPTR(dip), src, len);
3091 case XFS_ILOG_DBROOT:
3092 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3093 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3094 XFS_DFORK_DSIZE(dip, mp));
3099 * There are no data fork flags set.
3101 ASSERT((fields & XFS_ILOG_DFORK) == 0);
3106 * If we logged any attribute data, recover it. There may or
3107 * may not have been any other non-core data logged in this
3110 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3111 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3116 len = item->ri_buf[attr_index].i_len;
3117 src = item->ri_buf[attr_index].i_addr;
3118 ASSERT(len == in_f->ilf_asize);
3120 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3121 case XFS_ILOG_ADATA:
3123 dest = XFS_DFORK_APTR(dip);
3124 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3125 memcpy(dest, src, len);
3128 case XFS_ILOG_ABROOT:
3129 dest = XFS_DFORK_APTR(dip);
3130 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3131 len, (xfs_bmdr_block_t*)dest,
3132 XFS_DFORK_ASIZE(dip, mp));
3136 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3144 if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
3145 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3147 /* re-generate the checksum. */
3148 xfs_dinode_calc_crc(log->l_mp, dip);
3150 ASSERT(bp->b_target->bt_mount == mp);
3151 bp->b_iodone = xlog_recover_iodone;
3152 xfs_buf_delwri_queue(bp, buffer_list);
3163 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3164 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3168 xlog_recover_quotaoff_pass1(
3170 struct xlog_recover_item *item)
3172 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
3176 * The logitem format's flag tells us if this was user quotaoff,
3177 * group/project quotaoff or both.
3179 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3180 log->l_quotaoffs_flag |= XFS_DQ_USER;
3181 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3182 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3183 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3184 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3190 * Recover a dquot record
3193 xlog_recover_dquot_pass2(
3195 struct list_head *buffer_list,
3196 struct xlog_recover_item *item,
3197 xfs_lsn_t current_lsn)
3199 xfs_mount_t *mp = log->l_mp;
3201 struct xfs_disk_dquot *ddq, *recddq;
3203 xfs_dq_logformat_t *dq_f;
3208 * Filesystems are required to send in quota flags at mount time.
3210 if (mp->m_qflags == 0)
3213 recddq = item->ri_buf[1].i_addr;
3214 if (recddq == NULL) {
3215 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3218 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3219 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3220 item->ri_buf[1].i_len, __func__);
3225 * This type of quotas was turned off, so ignore this record.
3227 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3229 if (log->l_quotaoffs_flag & type)
3233 * At this point we know that quota was _not_ turned off.
3234 * Since the mount flags are not indicating to us otherwise, this
3235 * must mean that quota is on, and the dquot needs to be replayed.
3236 * Remember that we may not have fully recovered the superblock yet,
3237 * so we can't do the usual trick of looking at the SB quota bits.
3239 * The other possibility, of course, is that the quota subsystem was
3240 * removed since the last mount - ENOSYS.
3242 dq_f = item->ri_buf[0].i_addr;
3244 error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3245 "xlog_recover_dquot_pass2 (log copy)");
3248 ASSERT(dq_f->qlf_len == 1);
3251 * At this point we are assuming that the dquots have been allocated
3252 * and hence the buffer has valid dquots stamped in it. It should,
3253 * therefore, pass verifier validation. If the dquot is bad, then the
3254 * we'll return an error here, so we don't need to specifically check
3255 * the dquot in the buffer after the verifier has run.
3257 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3258 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3259 &xfs_dquot_buf_ops);
3264 ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3267 * If the dquot has an LSN in it, recover the dquot only if it's less
3268 * than the lsn of the transaction we are replaying.
3270 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3271 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3272 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3274 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3279 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3280 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3281 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3285 ASSERT(dq_f->qlf_size == 2);
3286 ASSERT(bp->b_target->bt_mount == mp);
3287 bp->b_iodone = xlog_recover_iodone;
3288 xfs_buf_delwri_queue(bp, buffer_list);
3296 * This routine is called to create an in-core extent free intent
3297 * item from the efi format structure which was logged on disk.
3298 * It allocates an in-core efi, copies the extents from the format
3299 * structure into it, and adds the efi to the AIL with the given
3303 xlog_recover_efi_pass2(
3305 struct xlog_recover_item *item,
3309 struct xfs_mount *mp = log->l_mp;
3310 struct xfs_efi_log_item *efip;
3311 struct xfs_efi_log_format *efi_formatp;
3313 efi_formatp = item->ri_buf[0].i_addr;
3315 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3316 error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3318 xfs_efi_item_free(efip);
3321 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3323 spin_lock(&log->l_ailp->xa_lock);
3325 * The EFI has two references. One for the EFD and one for EFI to ensure
3326 * it makes it into the AIL. Insert the EFI into the AIL directly and
3327 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3330 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3331 xfs_efi_release(efip);
3337 * This routine is called when an EFD format structure is found in a committed
3338 * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3339 * was still in the log. To do this it searches the AIL for the EFI with an id
3340 * equal to that in the EFD format structure. If we find it we drop the EFD
3341 * reference, which removes the EFI from the AIL and frees it.
3344 xlog_recover_efd_pass2(
3346 struct xlog_recover_item *item)
3348 xfs_efd_log_format_t *efd_formatp;
3349 xfs_efi_log_item_t *efip = NULL;
3350 xfs_log_item_t *lip;
3352 struct xfs_ail_cursor cur;
3353 struct xfs_ail *ailp = log->l_ailp;
3355 efd_formatp = item->ri_buf[0].i_addr;
3356 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3357 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3358 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3359 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3360 efi_id = efd_formatp->efd_efi_id;
3363 * Search for the EFI with the id in the EFD format structure in the
3366 spin_lock(&ailp->xa_lock);
3367 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3368 while (lip != NULL) {
3369 if (lip->li_type == XFS_LI_EFI) {
3370 efip = (xfs_efi_log_item_t *)lip;
3371 if (efip->efi_format.efi_id == efi_id) {
3373 * Drop the EFD reference to the EFI. This
3374 * removes the EFI from the AIL and frees it.
3376 spin_unlock(&ailp->xa_lock);
3377 xfs_efi_release(efip);
3378 spin_lock(&ailp->xa_lock);
3382 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3385 xfs_trans_ail_cursor_done(&cur);
3386 spin_unlock(&ailp->xa_lock);
3392 * This routine is called when an inode create format structure is found in a
3393 * committed transaction in the log. It's purpose is to initialise the inodes
3394 * being allocated on disk. This requires us to get inode cluster buffers that
3395 * match the range to be intialised, stamped with inode templates and written
3396 * by delayed write so that subsequent modifications will hit the cached buffer
3397 * and only need writing out at the end of recovery.
3400 xlog_recover_do_icreate_pass2(
3402 struct list_head *buffer_list,
3403 xlog_recover_item_t *item)
3405 struct xfs_mount *mp = log->l_mp;
3406 struct xfs_icreate_log *icl;
3407 xfs_agnumber_t agno;
3408 xfs_agblock_t agbno;
3411 xfs_agblock_t length;
3412 int blks_per_cluster;
3418 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3419 if (icl->icl_type != XFS_LI_ICREATE) {
3420 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3424 if (icl->icl_size != 1) {
3425 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3429 agno = be32_to_cpu(icl->icl_ag);
3430 if (agno >= mp->m_sb.sb_agcount) {
3431 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3434 agbno = be32_to_cpu(icl->icl_agbno);
3435 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3436 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3439 isize = be32_to_cpu(icl->icl_isize);
3440 if (isize != mp->m_sb.sb_inodesize) {
3441 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3444 count = be32_to_cpu(icl->icl_count);
3446 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3449 length = be32_to_cpu(icl->icl_length);
3450 if (!length || length >= mp->m_sb.sb_agblocks) {
3451 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3456 * The inode chunk is either full or sparse and we only support
3457 * m_ialloc_min_blks sized sparse allocations at this time.
3459 if (length != mp->m_ialloc_blks &&
3460 length != mp->m_ialloc_min_blks) {
3462 "%s: unsupported chunk length", __FUNCTION__);
3466 /* verify inode count is consistent with extent length */
3467 if ((count >> mp->m_sb.sb_inopblog) != length) {
3469 "%s: inconsistent inode count and chunk length",
3475 * The icreate transaction can cover multiple cluster buffers and these
3476 * buffers could have been freed and reused. Check the individual
3477 * buffers for cancellation so we don't overwrite anything written after
3480 blks_per_cluster = xfs_icluster_size_fsb(mp);
3481 bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
3482 nbufs = length / blks_per_cluster;
3483 for (i = 0, cancel_count = 0; i < nbufs; i++) {
3486 daddr = XFS_AGB_TO_DADDR(mp, agno,
3487 agbno + i * blks_per_cluster);
3488 if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3493 * We currently only use icreate for a single allocation at a time. This
3494 * means we should expect either all or none of the buffers to be
3495 * cancelled. Be conservative and skip replay if at least one buffer is
3496 * cancelled, but warn the user that something is awry if the buffers
3497 * are not consistent.
3499 * XXX: This must be refined to only skip cancelled clusters once we use
3500 * icreate for multiple chunk allocations.
3502 ASSERT(!cancel_count || cancel_count == nbufs);
3504 if (cancel_count != nbufs)
3506 "WARNING: partial inode chunk cancellation, skipped icreate.");
3507 trace_xfs_log_recover_icreate_cancel(log, icl);
3511 trace_xfs_log_recover_icreate_recover(log, icl);
3512 return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3513 length, be32_to_cpu(icl->icl_gen));
3517 xlog_recover_buffer_ra_pass2(
3519 struct xlog_recover_item *item)
3521 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3522 struct xfs_mount *mp = log->l_mp;
3524 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3525 buf_f->blf_len, buf_f->blf_flags)) {
3529 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3530 buf_f->blf_len, NULL);
3534 xlog_recover_inode_ra_pass2(
3536 struct xlog_recover_item *item)
3538 struct xfs_inode_log_format ilf_buf;
3539 struct xfs_inode_log_format *ilfp;
3540 struct xfs_mount *mp = log->l_mp;
3543 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3544 ilfp = item->ri_buf[0].i_addr;
3547 memset(ilfp, 0, sizeof(*ilfp));
3548 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3553 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3556 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3557 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3561 xlog_recover_dquot_ra_pass2(
3563 struct xlog_recover_item *item)
3565 struct xfs_mount *mp = log->l_mp;
3566 struct xfs_disk_dquot *recddq;
3567 struct xfs_dq_logformat *dq_f;
3572 if (mp->m_qflags == 0)
3575 recddq = item->ri_buf[1].i_addr;
3578 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3581 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3583 if (log->l_quotaoffs_flag & type)
3586 dq_f = item->ri_buf[0].i_addr;
3588 ASSERT(dq_f->qlf_len == 1);
3590 len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
3591 if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
3594 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
3595 &xfs_dquot_buf_ra_ops);
3599 xlog_recover_ra_pass2(
3601 struct xlog_recover_item *item)
3603 switch (ITEM_TYPE(item)) {
3605 xlog_recover_buffer_ra_pass2(log, item);
3608 xlog_recover_inode_ra_pass2(log, item);
3611 xlog_recover_dquot_ra_pass2(log, item);
3615 case XFS_LI_QUOTAOFF:
3622 xlog_recover_commit_pass1(
3624 struct xlog_recover *trans,
3625 struct xlog_recover_item *item)
3627 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3629 switch (ITEM_TYPE(item)) {
3631 return xlog_recover_buffer_pass1(log, item);
3632 case XFS_LI_QUOTAOFF:
3633 return xlog_recover_quotaoff_pass1(log, item);
3638 case XFS_LI_ICREATE:
3639 /* nothing to do in pass 1 */
3642 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3643 __func__, ITEM_TYPE(item));
3650 xlog_recover_commit_pass2(
3652 struct xlog_recover *trans,
3653 struct list_head *buffer_list,
3654 struct xlog_recover_item *item)
3656 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3658 switch (ITEM_TYPE(item)) {
3660 return xlog_recover_buffer_pass2(log, buffer_list, item,
3663 return xlog_recover_inode_pass2(log, buffer_list, item,
3666 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3668 return xlog_recover_efd_pass2(log, item);
3670 return xlog_recover_dquot_pass2(log, buffer_list, item,
3672 case XFS_LI_ICREATE:
3673 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3674 case XFS_LI_QUOTAOFF:
3675 /* nothing to do in pass2 */
3678 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3679 __func__, ITEM_TYPE(item));
3686 xlog_recover_items_pass2(
3688 struct xlog_recover *trans,
3689 struct list_head *buffer_list,
3690 struct list_head *item_list)
3692 struct xlog_recover_item *item;
3695 list_for_each_entry(item, item_list, ri_list) {
3696 error = xlog_recover_commit_pass2(log, trans,
3706 * Perform the transaction.
3708 * If the transaction modifies a buffer or inode, do it now. Otherwise,
3709 * EFIs and EFDs get queued up by adding entries into the AIL for them.
3712 xlog_recover_commit_trans(
3714 struct xlog_recover *trans,
3719 int items_queued = 0;
3720 struct xlog_recover_item *item;
3721 struct xlog_recover_item *next;
3722 LIST_HEAD (buffer_list);
3723 LIST_HEAD (ra_list);
3724 LIST_HEAD (done_list);
3726 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3728 hlist_del(&trans->r_list);
3730 error = xlog_recover_reorder_trans(log, trans, pass);
3734 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
3736 case XLOG_RECOVER_PASS1:
3737 error = xlog_recover_commit_pass1(log, trans, item);
3739 case XLOG_RECOVER_PASS2:
3740 xlog_recover_ra_pass2(log, item);
3741 list_move_tail(&item->ri_list, &ra_list);
3743 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
3744 error = xlog_recover_items_pass2(log, trans,
3745 &buffer_list, &ra_list);
3746 list_splice_tail_init(&ra_list, &done_list);
3760 if (!list_empty(&ra_list)) {
3762 error = xlog_recover_items_pass2(log, trans,
3763 &buffer_list, &ra_list);
3764 list_splice_tail_init(&ra_list, &done_list);
3767 if (!list_empty(&done_list))
3768 list_splice_init(&done_list, &trans->r_itemq);
3770 error2 = xfs_buf_delwri_submit(&buffer_list);
3771 return error ? error : error2;
3775 xlog_recover_add_item(
3776 struct list_head *head)
3778 xlog_recover_item_t *item;
3780 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
3781 INIT_LIST_HEAD(&item->ri_list);
3782 list_add_tail(&item->ri_list, head);
3786 xlog_recover_add_to_cont_trans(
3788 struct xlog_recover *trans,
3792 xlog_recover_item_t *item;
3793 char *ptr, *old_ptr;
3797 * If the transaction is empty, the header was split across this and the
3798 * previous record. Copy the rest of the header.
3800 if (list_empty(&trans->r_itemq)) {
3801 ASSERT(len <= sizeof(struct xfs_trans_header));
3802 if (len > sizeof(struct xfs_trans_header)) {
3803 xfs_warn(log->l_mp, "%s: bad header length", __func__);
3807 xlog_recover_add_item(&trans->r_itemq);
3808 ptr = (char *)&trans->r_theader +
3809 sizeof(struct xfs_trans_header) - len;
3810 memcpy(ptr, dp, len);
3814 /* take the tail entry */
3815 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
3817 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
3818 old_len = item->ri_buf[item->ri_cnt-1].i_len;
3820 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
3821 memcpy(&ptr[old_len], dp, len);
3822 item->ri_buf[item->ri_cnt-1].i_len += len;
3823 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
3824 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
3829 * The next region to add is the start of a new region. It could be
3830 * a whole region or it could be the first part of a new region. Because
3831 * of this, the assumption here is that the type and size fields of all
3832 * format structures fit into the first 32 bits of the structure.
3834 * This works because all regions must be 32 bit aligned. Therefore, we
3835 * either have both fields or we have neither field. In the case we have
3836 * neither field, the data part of the region is zero length. We only have
3837 * a log_op_header and can throw away the header since a new one will appear
3838 * later. If we have at least 4 bytes, then we can determine how many regions
3839 * will appear in the current log item.
3842 xlog_recover_add_to_trans(
3844 struct xlog_recover *trans,
3848 xfs_inode_log_format_t *in_f; /* any will do */
3849 xlog_recover_item_t *item;
3854 if (list_empty(&trans->r_itemq)) {
3855 /* we need to catch log corruptions here */
3856 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
3857 xfs_warn(log->l_mp, "%s: bad header magic number",
3863 if (len > sizeof(struct xfs_trans_header)) {
3864 xfs_warn(log->l_mp, "%s: bad header length", __func__);
3870 * The transaction header can be arbitrarily split across op
3871 * records. If we don't have the whole thing here, copy what we
3872 * do have and handle the rest in the next record.
3874 if (len == sizeof(struct xfs_trans_header))
3875 xlog_recover_add_item(&trans->r_itemq);
3876 memcpy(&trans->r_theader, dp, len);
3880 ptr = kmem_alloc(len, KM_SLEEP);
3881 memcpy(ptr, dp, len);
3882 in_f = (xfs_inode_log_format_t *)ptr;
3884 /* take the tail entry */
3885 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
3886 if (item->ri_total != 0 &&
3887 item->ri_total == item->ri_cnt) {
3888 /* tail item is in use, get a new one */
3889 xlog_recover_add_item(&trans->r_itemq);
3890 item = list_entry(trans->r_itemq.prev,
3891 xlog_recover_item_t, ri_list);
3894 if (item->ri_total == 0) { /* first region to be added */
3895 if (in_f->ilf_size == 0 ||
3896 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
3898 "bad number of regions (%d) in inode log format",
3905 item->ri_total = in_f->ilf_size;
3907 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
3910 ASSERT(item->ri_total > item->ri_cnt);
3911 /* Description region is ri_buf[0] */
3912 item->ri_buf[item->ri_cnt].i_addr = ptr;
3913 item->ri_buf[item->ri_cnt].i_len = len;
3915 trace_xfs_log_recover_item_add(log, trans, item, 0);
3920 * Free up any resources allocated by the transaction
3922 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3925 xlog_recover_free_trans(
3926 struct xlog_recover *trans)
3928 xlog_recover_item_t *item, *n;
3931 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3932 /* Free the regions in the item. */
3933 list_del(&item->ri_list);
3934 for (i = 0; i < item->ri_cnt; i++)
3935 kmem_free(item->ri_buf[i].i_addr);
3936 /* Free the item itself */
3937 kmem_free(item->ri_buf);
3940 /* Free the transaction recover structure */
3945 * On error or completion, trans is freed.
3948 xlog_recovery_process_trans(
3950 struct xlog_recover *trans,
3957 bool freeit = false;
3959 /* mask off ophdr transaction container flags */
3960 flags &= ~XLOG_END_TRANS;
3961 if (flags & XLOG_WAS_CONT_TRANS)
3962 flags &= ~XLOG_CONTINUE_TRANS;
3965 * Callees must not free the trans structure. We'll decide if we need to
3966 * free it or not based on the operation being done and it's result.
3969 /* expected flag values */
3971 case XLOG_CONTINUE_TRANS:
3972 error = xlog_recover_add_to_trans(log, trans, dp, len);
3974 case XLOG_WAS_CONT_TRANS:
3975 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
3977 case XLOG_COMMIT_TRANS:
3978 error = xlog_recover_commit_trans(log, trans, pass);
3979 /* success or fail, we are now done with this transaction. */
3983 /* unexpected flag values */
3984 case XLOG_UNMOUNT_TRANS:
3985 /* just skip trans */
3986 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3989 case XLOG_START_TRANS:
3991 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
3996 if (error || freeit)
3997 xlog_recover_free_trans(trans);
4002 * Lookup the transaction recovery structure associated with the ID in the
4003 * current ophdr. If the transaction doesn't exist and the start flag is set in
4004 * the ophdr, then allocate a new transaction for future ID matches to find.
4005 * Either way, return what we found during the lookup - an existing transaction
4008 STATIC struct xlog_recover *
4009 xlog_recover_ophdr_to_trans(
4010 struct hlist_head rhash[],
4011 struct xlog_rec_header *rhead,
4012 struct xlog_op_header *ohead)
4014 struct xlog_recover *trans;
4016 struct hlist_head *rhp;
4018 tid = be32_to_cpu(ohead->oh_tid);
4019 rhp = &rhash[XLOG_RHASH(tid)];
4020 hlist_for_each_entry(trans, rhp, r_list) {
4021 if (trans->r_log_tid == tid)
4026 * skip over non-start transaction headers - we could be
4027 * processing slack space before the next transaction starts
4029 if (!(ohead->oh_flags & XLOG_START_TRANS))
4032 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4035 * This is a new transaction so allocate a new recovery container to
4036 * hold the recovery ops that will follow.
4038 trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
4039 trans->r_log_tid = tid;
4040 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4041 INIT_LIST_HEAD(&trans->r_itemq);
4042 INIT_HLIST_NODE(&trans->r_list);
4043 hlist_add_head(&trans->r_list, rhp);
4046 * Nothing more to do for this ophdr. Items to be added to this new
4047 * transaction will be in subsequent ophdr containers.
4053 xlog_recover_process_ophdr(
4055 struct hlist_head rhash[],
4056 struct xlog_rec_header *rhead,
4057 struct xlog_op_header *ohead,
4062 struct xlog_recover *trans;
4065 /* Do we understand who wrote this op? */
4066 if (ohead->oh_clientid != XFS_TRANSACTION &&
4067 ohead->oh_clientid != XFS_LOG) {
4068 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4069 __func__, ohead->oh_clientid);
4075 * Check the ophdr contains all the data it is supposed to contain.
4077 len = be32_to_cpu(ohead->oh_len);
4078 if (dp + len > end) {
4079 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4084 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4086 /* nothing to do, so skip over this ophdr */
4090 return xlog_recovery_process_trans(log, trans, dp, len,
4091 ohead->oh_flags, pass);
4095 * There are two valid states of the r_state field. 0 indicates that the
4096 * transaction structure is in a normal state. We have either seen the
4097 * start of the transaction or the last operation we added was not a partial
4098 * operation. If the last operation we added to the transaction was a
4099 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4101 * NOTE: skip LRs with 0 data length.
4104 xlog_recover_process_data(
4106 struct hlist_head rhash[],
4107 struct xlog_rec_header *rhead,
4111 struct xlog_op_header *ohead;
4116 end = dp + be32_to_cpu(rhead->h_len);
4117 num_logops = be32_to_cpu(rhead->h_num_logops);
4119 /* check the log format matches our own - else we can't recover */
4120 if (xlog_header_check_recover(log->l_mp, rhead))
4123 while ((dp < end) && num_logops) {
4125 ohead = (struct xlog_op_header *)dp;
4126 dp += sizeof(*ohead);
4129 /* errors will abort recovery */
4130 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4135 dp += be32_to_cpu(ohead->oh_len);
4142 * Process an extent free intent item that was recovered from
4143 * the log. We need to free the extents that it describes.
4146 xlog_recover_process_efi(
4148 xfs_efi_log_item_t *efip)
4150 xfs_efd_log_item_t *efdp;
4155 xfs_fsblock_t startblock_fsb;
4157 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
4160 * First check the validity of the extents described by the
4161 * EFI. If any are bad, then assume that all are bad and
4162 * just toss the EFI.
4164 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
4165 extp = &(efip->efi_format.efi_extents[i]);
4166 startblock_fsb = XFS_BB_TO_FSB(mp,
4167 XFS_FSB_TO_DADDR(mp, extp->ext_start));
4168 if ((startblock_fsb == 0) ||
4169 (extp->ext_len == 0) ||
4170 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
4171 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
4173 * This will pull the EFI from the AIL and
4174 * free the memory associated with it.
4176 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
4177 xfs_efi_release(efip);
4182 tp = xfs_trans_alloc(mp, 0);
4183 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
4186 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
4188 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
4189 extp = &(efip->efi_format.efi_extents[i]);
4190 error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
4197 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
4198 error = xfs_trans_commit(tp);
4202 xfs_trans_cancel(tp);
4207 * When this is called, all of the EFIs which did not have
4208 * corresponding EFDs should be in the AIL. What we do now
4209 * is free the extents associated with each one.
4211 * Since we process the EFIs in normal transactions, they
4212 * will be removed at some point after the commit. This prevents
4213 * us from just walking down the list processing each one.
4214 * We'll use a flag in the EFI to skip those that we've already
4215 * processed and use the AIL iteration mechanism's generation
4216 * count to try to speed this up at least a bit.
4218 * When we start, we know that the EFIs are the only things in
4219 * the AIL. As we process them, however, other items are added
4220 * to the AIL. Since everything added to the AIL must come after
4221 * everything already in the AIL, we stop processing as soon as
4222 * we see something other than an EFI in the AIL.
4225 xlog_recover_process_efis(
4228 struct xfs_log_item *lip;
4229 struct xfs_efi_log_item *efip;
4231 struct xfs_ail_cursor cur;
4232 struct xfs_ail *ailp;
4235 spin_lock(&ailp->xa_lock);
4236 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4237 while (lip != NULL) {
4239 * We're done when we see something other than an EFI.
4240 * There should be no EFIs left in the AIL now.
4242 if (lip->li_type != XFS_LI_EFI) {
4244 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4245 ASSERT(lip->li_type != XFS_LI_EFI);
4251 * Skip EFIs that we've already processed.
4253 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4254 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
4255 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4259 spin_unlock(&ailp->xa_lock);
4260 error = xlog_recover_process_efi(log->l_mp, efip);
4261 spin_lock(&ailp->xa_lock);
4264 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4267 xfs_trans_ail_cursor_done(&cur);
4268 spin_unlock(&ailp->xa_lock);
4273 * A cancel occurs when the mount has failed and we're bailing out. Release all
4274 * pending EFIs so they don't pin the AIL.
4277 xlog_recover_cancel_efis(
4280 struct xfs_log_item *lip;
4281 struct xfs_efi_log_item *efip;
4283 struct xfs_ail_cursor cur;
4284 struct xfs_ail *ailp;
4287 spin_lock(&ailp->xa_lock);
4288 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4289 while (lip != NULL) {
4291 * We're done when we see something other than an EFI.
4292 * There should be no EFIs left in the AIL now.
4294 if (lip->li_type != XFS_LI_EFI) {
4296 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4297 ASSERT(lip->li_type != XFS_LI_EFI);
4302 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4304 spin_unlock(&ailp->xa_lock);
4305 xfs_efi_release(efip);
4306 spin_lock(&ailp->xa_lock);
4308 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4311 xfs_trans_ail_cursor_done(&cur);
4312 spin_unlock(&ailp->xa_lock);
4317 * This routine performs a transaction to null out a bad inode pointer
4318 * in an agi unlinked inode hash bucket.
4321 xlog_recover_clear_agi_bucket(
4323 xfs_agnumber_t agno,
4332 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
4333 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
4337 error = xfs_read_agi(mp, tp, agno, &agibp);
4341 agi = XFS_BUF_TO_AGI(agibp);
4342 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
4343 offset = offsetof(xfs_agi_t, agi_unlinked) +
4344 (sizeof(xfs_agino_t) * bucket);
4345 xfs_trans_log_buf(tp, agibp, offset,
4346 (offset + sizeof(xfs_agino_t) - 1));
4348 error = xfs_trans_commit(tp);
4354 xfs_trans_cancel(tp);
4356 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
4361 xlog_recover_process_one_iunlink(
4362 struct xfs_mount *mp,
4363 xfs_agnumber_t agno,
4367 struct xfs_buf *ibp;
4368 struct xfs_dinode *dip;
4369 struct xfs_inode *ip;
4373 ino = XFS_AGINO_TO_INO(mp, agno, agino);
4374 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
4379 * Get the on disk inode to find the next inode in the bucket.
4381 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
4385 ASSERT(ip->i_d.di_nlink == 0);
4386 ASSERT(ip->i_d.di_mode != 0);
4388 /* setup for the next pass */
4389 agino = be32_to_cpu(dip->di_next_unlinked);
4393 * Prevent any DMAPI event from being sent when the reference on
4394 * the inode is dropped.
4396 ip->i_d.di_dmevmask = 0;
4405 * We can't read in the inode this bucket points to, or this inode
4406 * is messed up. Just ditch this bucket of inodes. We will lose
4407 * some inodes and space, but at least we won't hang.
4409 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
4410 * clear the inode pointer in the bucket.
4412 xlog_recover_clear_agi_bucket(mp, agno, bucket);
4417 * xlog_iunlink_recover
4419 * This is called during recovery to process any inodes which
4420 * we unlinked but not freed when the system crashed. These
4421 * inodes will be on the lists in the AGI blocks. What we do
4422 * here is scan all the AGIs and fully truncate and free any
4423 * inodes found on the lists. Each inode is removed from the
4424 * lists when it has been fully truncated and is freed. The
4425 * freeing of the inode and its removal from the list must be
4429 xlog_recover_process_iunlinks(
4433 xfs_agnumber_t agno;
4444 * Prevent any DMAPI event from being sent while in this function.
4446 mp_dmevmask = mp->m_dmevmask;
4449 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4451 * Find the agi for this ag.
4453 error = xfs_read_agi(mp, NULL, agno, &agibp);
4456 * AGI is b0rked. Don't process it.
4458 * We should probably mark the filesystem as corrupt
4459 * after we've recovered all the ag's we can....
4464 * Unlock the buffer so that it can be acquired in the normal
4465 * course of the transaction to truncate and free each inode.
4466 * Because we are not racing with anyone else here for the AGI
4467 * buffer, we don't even need to hold it locked to read the
4468 * initial unlinked bucket entries out of the buffer. We keep
4469 * buffer reference though, so that it stays pinned in memory
4470 * while we need the buffer.
4472 agi = XFS_BUF_TO_AGI(agibp);
4473 xfs_buf_unlock(agibp);
4475 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
4476 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
4477 while (agino != NULLAGINO) {
4478 agino = xlog_recover_process_one_iunlink(mp,
4479 agno, agino, bucket);
4482 xfs_buf_rele(agibp);
4485 mp->m_dmevmask = mp_dmevmask;
4490 struct xlog_rec_header *rhead,
4496 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
4497 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
4498 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
4502 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4503 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
4504 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
4505 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4506 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4507 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
4516 * CRC check, unpack and process a log record.
4519 xlog_recover_process(
4521 struct hlist_head rhash[],
4522 struct xlog_rec_header *rhead,
4529 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
4532 * Nothing else to do if this is a CRC verification pass. Just return
4533 * if this a record with a non-zero crc. Unfortunately, mkfs always
4534 * sets h_crc to 0 so we must consider this valid even on v5 supers.
4535 * Otherwise, return EFSBADCRC on failure so the callers up the stack
4536 * know precisely what failed.
4538 if (pass == XLOG_RECOVER_CRCPASS) {
4539 if (rhead->h_crc && crc != le32_to_cpu(rhead->h_crc))
4545 * We're in the normal recovery path. Issue a warning if and only if the
4546 * CRC in the header is non-zero. This is an advisory warning and the
4547 * zero CRC check prevents warnings from being emitted when upgrading
4548 * the kernel from one that does not add CRCs by default.
4550 if (crc != le32_to_cpu(rhead->h_crc)) {
4551 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
4552 xfs_alert(log->l_mp,
4553 "log record CRC mismatch: found 0x%x, expected 0x%x.",
4554 le32_to_cpu(rhead->h_crc),
4556 xfs_hex_dump(dp, 32);
4560 * If the filesystem is CRC enabled, this mismatch becomes a
4561 * fatal log corruption failure.
4563 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
4564 return -EFSCORRUPTED;
4567 error = xlog_unpack_data(rhead, dp, log);
4571 return xlog_recover_process_data(log, rhash, rhead, dp, pass);
4575 xlog_valid_rec_header(
4577 struct xlog_rec_header *rhead,
4582 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
4583 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4584 XFS_ERRLEVEL_LOW, log->l_mp);
4585 return -EFSCORRUPTED;
4588 (!rhead->h_version ||
4589 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
4590 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
4591 __func__, be32_to_cpu(rhead->h_version));
4595 /* LR body must have data or it wouldn't have been written */
4596 hlen = be32_to_cpu(rhead->h_len);
4597 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
4598 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4599 XFS_ERRLEVEL_LOW, log->l_mp);
4600 return -EFSCORRUPTED;
4602 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
4603 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4604 XFS_ERRLEVEL_LOW, log->l_mp);
4605 return -EFSCORRUPTED;
4611 * Read the log from tail to head and process the log records found.
4612 * Handle the two cases where the tail and head are in the same cycle
4613 * and where the active portion of the log wraps around the end of
4614 * the physical log separately. The pass parameter is passed through
4615 * to the routines called to process the data and is not looked at
4619 xlog_do_recovery_pass(
4621 xfs_daddr_t head_blk,
4622 xfs_daddr_t tail_blk,
4624 xfs_daddr_t *first_bad) /* out: first bad log rec */
4626 xlog_rec_header_t *rhead;
4628 xfs_daddr_t rhead_blk;
4630 xfs_buf_t *hbp, *dbp;
4631 int error = 0, h_size, h_len;
4632 int bblks, split_bblks;
4633 int hblks, split_hblks, wrapped_hblks;
4634 struct hlist_head rhash[XLOG_RHASH_SIZE];
4636 ASSERT(head_blk != tail_blk);
4640 * Read the header of the tail block and get the iclog buffer size from
4641 * h_size. Use this to tell how many sectors make up the log header.
4643 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4645 * When using variable length iclogs, read first sector of
4646 * iclog header and extract the header size from it. Get a
4647 * new hbp that is the correct size.
4649 hbp = xlog_get_bp(log, 1);
4653 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
4657 rhead = (xlog_rec_header_t *)offset;
4658 error = xlog_valid_rec_header(log, rhead, tail_blk);
4663 * xfsprogs has a bug where record length is based on lsunit but
4664 * h_size (iclog size) is hardcoded to 32k. Now that we
4665 * unconditionally CRC verify the unmount record, this means the
4666 * log buffer can be too small for the record and cause an
4669 * Detect this condition here. Use lsunit for the buffer size as
4670 * long as this looks like the mkfs case. Otherwise, return an
4671 * error to avoid a buffer overrun.
4673 h_size = be32_to_cpu(rhead->h_size);
4674 h_len = be32_to_cpu(rhead->h_len);
4675 if (h_len > h_size) {
4676 if (h_len <= log->l_mp->m_logbsize &&
4677 be32_to_cpu(rhead->h_num_logops) == 1) {
4679 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
4680 h_size, log->l_mp->m_logbsize);
4681 h_size = log->l_mp->m_logbsize;
4683 return -EFSCORRUPTED;
4686 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
4687 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
4688 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
4689 if (h_size % XLOG_HEADER_CYCLE_SIZE)
4692 hbp = xlog_get_bp(log, hblks);
4697 ASSERT(log->l_sectBBsize == 1);
4699 hbp = xlog_get_bp(log, 1);
4700 h_size = XLOG_BIG_RECORD_BSIZE;
4705 dbp = xlog_get_bp(log, BTOBB(h_size));
4711 memset(rhash, 0, sizeof(rhash));
4712 blk_no = rhead_blk = tail_blk;
4713 if (tail_blk > head_blk) {
4715 * Perform recovery around the end of the physical log.
4716 * When the head is not on the same cycle number as the tail,
4717 * we can't do a sequential recovery.
4719 while (blk_no < log->l_logBBsize) {
4721 * Check for header wrapping around physical end-of-log
4723 offset = hbp->b_addr;
4726 if (blk_no + hblks <= log->l_logBBsize) {
4727 /* Read header in one read */
4728 error = xlog_bread(log, blk_no, hblks, hbp,
4733 /* This LR is split across physical log end */
4734 if (blk_no != log->l_logBBsize) {
4735 /* some data before physical log end */
4736 ASSERT(blk_no <= INT_MAX);
4737 split_hblks = log->l_logBBsize - (int)blk_no;
4738 ASSERT(split_hblks > 0);
4739 error = xlog_bread(log, blk_no,
4747 * Note: this black magic still works with
4748 * large sector sizes (non-512) only because:
4749 * - we increased the buffer size originally
4750 * by 1 sector giving us enough extra space
4751 * for the second read;
4752 * - the log start is guaranteed to be sector
4754 * - we read the log end (LR header start)
4755 * _first_, then the log start (LR header end)
4756 * - order is important.
4758 wrapped_hblks = hblks - split_hblks;
4759 error = xlog_bread_offset(log, 0,
4761 offset + BBTOB(split_hblks));
4765 rhead = (xlog_rec_header_t *)offset;
4766 error = xlog_valid_rec_header(log, rhead,
4767 split_hblks ? blk_no : 0);
4771 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4774 /* Read in data for log record */
4775 if (blk_no + bblks <= log->l_logBBsize) {
4776 error = xlog_bread(log, blk_no, bblks, dbp,
4781 /* This log record is split across the
4782 * physical end of log */
4783 offset = dbp->b_addr;
4785 if (blk_no != log->l_logBBsize) {
4786 /* some data is before the physical
4788 ASSERT(!wrapped_hblks);
4789 ASSERT(blk_no <= INT_MAX);
4791 log->l_logBBsize - (int)blk_no;
4792 ASSERT(split_bblks > 0);
4793 error = xlog_bread(log, blk_no,
4801 * Note: this black magic still works with
4802 * large sector sizes (non-512) only because:
4803 * - we increased the buffer size originally
4804 * by 1 sector giving us enough extra space
4805 * for the second read;
4806 * - the log start is guaranteed to be sector
4808 * - we read the log end (LR header start)
4809 * _first_, then the log start (LR header end)
4810 * - order is important.
4812 error = xlog_bread_offset(log, 0,
4813 bblks - split_bblks, dbp,
4814 offset + BBTOB(split_bblks));
4819 error = xlog_recover_process(log, rhash, rhead, offset,
4828 ASSERT(blk_no >= log->l_logBBsize);
4829 blk_no -= log->l_logBBsize;
4833 /* read first part of physical log */
4834 while (blk_no < head_blk) {
4835 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4839 rhead = (xlog_rec_header_t *)offset;
4840 error = xlog_valid_rec_header(log, rhead, blk_no);
4844 /* blocks in data section */
4845 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4846 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
4851 error = xlog_recover_process(log, rhash, rhead, offset, pass);
4855 blk_no += bblks + hblks;
4864 if (error && first_bad)
4865 *first_bad = rhead_blk;
4871 * Do the recovery of the log. We actually do this in two phases.
4872 * The two passes are necessary in order to implement the function
4873 * of cancelling a record written into the log. The first pass
4874 * determines those things which have been cancelled, and the
4875 * second pass replays log items normally except for those which
4876 * have been cancelled. The handling of the replay and cancellations
4877 * takes place in the log item type specific routines.
4879 * The table of items which have cancel records in the log is allocated
4880 * and freed at this level, since only here do we know when all of
4881 * the log recovery has been completed.
4884 xlog_do_log_recovery(
4886 xfs_daddr_t head_blk,
4887 xfs_daddr_t tail_blk)
4891 ASSERT(head_blk != tail_blk);
4894 * First do a pass to find all of the cancelled buf log items.
4895 * Store them in the buf_cancel_table for use in the second pass.
4897 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4898 sizeof(struct list_head),
4900 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4901 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4903 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4904 XLOG_RECOVER_PASS1, NULL);
4906 kmem_free(log->l_buf_cancel_table);
4907 log->l_buf_cancel_table = NULL;
4911 * Then do a second pass to actually recover the items in the log.
4912 * When it is complete free the table of buf cancel items.
4914 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4915 XLOG_RECOVER_PASS2, NULL);
4920 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4921 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4925 kmem_free(log->l_buf_cancel_table);
4926 log->l_buf_cancel_table = NULL;
4932 * Do the actual recovery
4937 xfs_daddr_t head_blk,
4938 xfs_daddr_t tail_blk)
4945 * First replay the images in the log.
4947 error = xlog_do_log_recovery(log, head_blk, tail_blk);
4952 * If IO errors happened during recovery, bail out.
4954 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4959 * We now update the tail_lsn since much of the recovery has completed
4960 * and there may be space available to use. If there were no extent
4961 * or iunlinks, we can free up the entire log and set the tail_lsn to
4962 * be the last_sync_lsn. This was set in xlog_find_tail to be the
4963 * lsn of the last known good LR on disk. If there are extent frees
4964 * or iunlinks they will have some entries in the AIL; so we look at
4965 * the AIL to determine how to set the tail_lsn.
4967 xlog_assign_tail_lsn(log->l_mp);
4970 * Now that we've finished replaying all buffer and inode
4971 * updates, re-read in the superblock and reverify it.
4973 bp = xfs_getsb(log->l_mp, 0);
4975 ASSERT(!(XFS_BUF_ISWRITE(bp)));
4977 XFS_BUF_UNASYNC(bp);
4978 bp->b_ops = &xfs_sb_buf_ops;
4980 error = xfs_buf_submit_wait(bp);
4982 if (!XFS_FORCED_SHUTDOWN(log->l_mp)) {
4983 xfs_buf_ioerror_alert(bp, __func__);
4990 /* Convert superblock from on-disk format */
4991 sbp = &log->l_mp->m_sb;
4992 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4993 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4994 ASSERT(xfs_sb_good_version(sbp));
4995 xfs_reinit_percpu_counters(log->l_mp);
5000 xlog_recover_check_summary(log);
5002 /* Normal transactions can now occur */
5003 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5008 * Perform recovery and re-initialize some log variables in xlog_find_tail.
5010 * Return error or zero.
5016 xfs_daddr_t head_blk, tail_blk;
5019 /* find the tail of the log */
5020 error = xlog_find_tail(log, &head_blk, &tail_blk);
5025 * The superblock was read before the log was available and thus the LSN
5026 * could not be verified. Check the superblock LSN against the current
5027 * LSN now that it's known.
5029 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5030 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5033 if (tail_blk != head_blk) {
5034 /* There used to be a comment here:
5036 * disallow recovery on read-only mounts. note -- mount
5037 * checks for ENOSPC and turns it into an intelligent
5039 * ...but this is no longer true. Now, unless you specify
5040 * NORECOVERY (in which case this function would never be
5041 * called), we just go ahead and recover. We do this all
5042 * under the vfs layer, so we can get away with it unless
5043 * the device itself is read-only, in which case we fail.
5045 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5050 * Version 5 superblock log feature mask validation. We know the
5051 * log is dirty so check if there are any unknown log features
5052 * in what we need to recover. If there are unknown features
5053 * (e.g. unsupported transactions, then simply reject the
5054 * attempt at recovery before touching anything.
5056 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5057 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5058 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5060 "Superblock has unknown incompatible log features (0x%x) enabled.",
5061 (log->l_mp->m_sb.sb_features_log_incompat &
5062 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5064 "The log can not be fully and/or safely recovered by this kernel.");
5066 "Please recover the log on a kernel that supports the unknown features.");
5071 * Delay log recovery if the debug hook is set. This is debug
5072 * instrumention to coordinate simulation of I/O failures with
5075 if (xfs_globals.log_recovery_delay) {
5076 xfs_notice(log->l_mp,
5077 "Delaying log recovery for %d seconds.",
5078 xfs_globals.log_recovery_delay);
5079 msleep(xfs_globals.log_recovery_delay * 1000);
5082 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5083 log->l_mp->m_logname ? log->l_mp->m_logname
5086 error = xlog_do_recover(log, head_blk, tail_blk);
5087 log->l_flags |= XLOG_RECOVERY_NEEDED;
5093 * In the first part of recovery we replay inodes and buffers and build
5094 * up the list of extent free items which need to be processed. Here
5095 * we process the extent free items and clean up the on disk unlinked
5096 * inode lists. This is separated from the first part of recovery so
5097 * that the root and real-time bitmap inodes can be read in from disk in
5098 * between the two stages. This is necessary so that we can free space
5099 * in the real-time portion of the file system.
5102 xlog_recover_finish(
5106 * Now we're ready to do the transactions needed for the
5107 * rest of recovery. Start with completing all the extent
5108 * free intent records and then process the unlinked inode
5109 * lists. At this point, we essentially run in normal mode
5110 * except that we're still performing recovery actions
5111 * rather than accepting new requests.
5113 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5115 error = xlog_recover_process_efis(log);
5117 xfs_alert(log->l_mp, "Failed to recover EFIs");
5121 * Sync the log to get all the EFIs out of the AIL.
5122 * This isn't absolutely necessary, but it helps in
5123 * case the unlink transactions would have problems
5124 * pushing the EFIs out of the way.
5126 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5128 xlog_recover_process_iunlinks(log);
5130 xlog_recover_check_summary(log);
5132 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5133 log->l_mp->m_logname ? log->l_mp->m_logname
5135 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5137 xfs_info(log->l_mp, "Ending clean mount");
5143 xlog_recover_cancel(
5148 if (log->l_flags & XLOG_RECOVERY_NEEDED)
5149 error = xlog_recover_cancel_efis(log);
5156 * Read all of the agf and agi counters and check that they
5157 * are consistent with the superblock counters.
5160 xlog_recover_check_summary(
5167 xfs_agnumber_t agno;
5168 __uint64_t freeblks;
5178 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5179 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5181 xfs_alert(mp, "%s agf read failed agno %d error %d",
5182 __func__, agno, error);
5184 agfp = XFS_BUF_TO_AGF(agfbp);
5185 freeblks += be32_to_cpu(agfp->agf_freeblks) +
5186 be32_to_cpu(agfp->agf_flcount);
5187 xfs_buf_relse(agfbp);
5190 error = xfs_read_agi(mp, NULL, agno, &agibp);
5192 xfs_alert(mp, "%s agi read failed agno %d error %d",
5193 __func__, agno, error);
5195 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
5197 itotal += be32_to_cpu(agi->agi_count);
5198 ifree += be32_to_cpu(agi->agi_freecount);
5199 xfs_buf_relse(agibp);