2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_trans.h"
33 #include "xfs_log_priv.h"
34 #include "xfs_log_recover.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_extfree_item.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_alloc.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_quota.h"
41 #include "xfs_cksum.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 #include "xfs_bmap_btree.h"
45 #include "xfs_error.h"
47 #include "xfs_rmap_item.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_refcount_item.h"
50 #include "xfs_bmap_item.h"
52 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
59 xlog_clear_stale_blocks(
64 xlog_recover_check_summary(
67 #define xlog_recover_check_summary(log)
70 xlog_do_recovery_pass(
71 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
74 * This structure is used during recovery to record the buf log items which
75 * have been canceled and should not be replayed.
77 struct xfs_buf_cancel {
81 struct list_head bc_list;
85 * Sector aligned buffer routines for buffer create/read/write/access
89 * Verify the log-relative block number and length in basic blocks are valid for
90 * an operation involving the given XFS log buffer. Returns true if the fields
91 * are valid, false otherwise.
99 if (blk_no < 0 || blk_no >= log->l_logBBsize)
101 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
107 * Allocate a buffer to hold log data. The buffer needs to be able
108 * to map to a range of nbblks basic blocks at any valid (basic
109 * block) offset within the log.
119 * Pass log block 0 since we don't have an addr yet, buffer will be
122 if (!xlog_verify_bp(log, 0, nbblks)) {
123 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
125 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
130 * We do log I/O in units of log sectors (a power-of-2
131 * multiple of the basic block size), so we round up the
132 * requested size to accommodate the basic blocks required
133 * for complete log sectors.
135 * In addition, the buffer may be used for a non-sector-
136 * aligned block offset, in which case an I/O of the
137 * requested size could extend beyond the end of the
138 * buffer. If the requested size is only 1 basic block it
139 * will never straddle a sector boundary, so this won't be
140 * an issue. Nor will this be a problem if the log I/O is
141 * done in basic blocks (sector size 1). But otherwise we
142 * extend the buffer by one extra log sector to ensure
143 * there's space to accommodate this possibility.
145 if (nbblks > 1 && log->l_sectBBsize > 1)
146 nbblks += log->l_sectBBsize;
147 nbblks = round_up(nbblks, log->l_sectBBsize);
149 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
163 * Return the address of the start of the given block number's data
164 * in a log buffer. The buffer covers a log sector-aligned region.
173 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
175 ASSERT(offset + nbblks <= bp->b_length);
176 return bp->b_addr + BBTOB(offset);
181 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
192 if (!xlog_verify_bp(log, blk_no, nbblks)) {
194 "Invalid log block/length (0x%llx, 0x%x) for buffer",
196 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
197 return -EFSCORRUPTED;
200 blk_no = round_down(blk_no, log->l_sectBBsize);
201 nbblks = round_up(nbblks, log->l_sectBBsize);
204 ASSERT(nbblks <= bp->b_length);
206 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
207 bp->b_flags |= XBF_READ;
208 bp->b_io_length = nbblks;
211 error = xfs_buf_submit_wait(bp);
212 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
213 xfs_buf_ioerror_alert(bp, __func__);
227 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
231 *offset = xlog_align(log, blk_no, nbblks, bp);
236 * Read at an offset into the buffer. Returns with the buffer in it's original
237 * state regardless of the result of the read.
242 xfs_daddr_t blk_no, /* block to read from */
243 int nbblks, /* blocks to read */
247 char *orig_offset = bp->b_addr;
248 int orig_len = BBTOB(bp->b_length);
251 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
255 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
257 /* must reset buffer pointer even on error */
258 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
265 * Write out the buffer at the given block for the given number of blocks.
266 * The buffer is kept locked across the write and is returned locked.
267 * This can only be used for synchronous log writes.
278 if (!xlog_verify_bp(log, blk_no, nbblks)) {
280 "Invalid log block/length (0x%llx, 0x%x) for buffer",
282 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
283 return -EFSCORRUPTED;
286 blk_no = round_down(blk_no, log->l_sectBBsize);
287 nbblks = round_up(nbblks, log->l_sectBBsize);
290 ASSERT(nbblks <= bp->b_length);
292 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
295 bp->b_io_length = nbblks;
298 error = xfs_bwrite(bp);
300 xfs_buf_ioerror_alert(bp, __func__);
307 * dump debug superblock and log record information
310 xlog_header_check_dump(
312 xlog_rec_header_t *head)
314 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
315 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
316 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
317 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
320 #define xlog_header_check_dump(mp, head)
324 * check log record header for recovery
327 xlog_header_check_recover(
329 xlog_rec_header_t *head)
331 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
334 * IRIX doesn't write the h_fmt field and leaves it zeroed
335 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
336 * a dirty log created in IRIX.
338 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
340 "dirty log written in incompatible format - can't recover");
341 xlog_header_check_dump(mp, head);
342 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
343 XFS_ERRLEVEL_HIGH, mp);
344 return -EFSCORRUPTED;
345 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
347 "dirty log entry has mismatched uuid - can't recover");
348 xlog_header_check_dump(mp, head);
349 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
350 XFS_ERRLEVEL_HIGH, mp);
351 return -EFSCORRUPTED;
357 * read the head block of the log and check the header
360 xlog_header_check_mount(
362 xlog_rec_header_t *head)
364 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
366 if (uuid_is_null(&head->h_fs_uuid)) {
368 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
369 * h_fs_uuid is null, we assume this log was last mounted
370 * by IRIX and continue.
372 xfs_warn(mp, "null uuid in log - IRIX style log");
373 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
374 xfs_warn(mp, "log has mismatched uuid - can't recover");
375 xlog_header_check_dump(mp, head);
376 XFS_ERROR_REPORT("xlog_header_check_mount",
377 XFS_ERRLEVEL_HIGH, mp);
378 return -EFSCORRUPTED;
389 * We're not going to bother about retrying
390 * this during recovery. One strike!
392 if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
393 xfs_buf_ioerror_alert(bp, __func__);
394 xfs_force_shutdown(bp->b_target->bt_mount,
395 SHUTDOWN_META_IO_ERROR);
400 * On v5 supers, a bli could be attached to update the metadata LSN.
404 xfs_buf_item_relse(bp);
405 ASSERT(bp->b_log_item == NULL);
412 * This routine finds (to an approximation) the first block in the physical
413 * log which contains the given cycle. It uses a binary search algorithm.
414 * Note that the algorithm can not be perfect because the disk will not
415 * necessarily be perfect.
418 xlog_find_cycle_start(
421 xfs_daddr_t first_blk,
422 xfs_daddr_t *last_blk,
432 mid_blk = BLK_AVG(first_blk, end_blk);
433 while (mid_blk != first_blk && mid_blk != end_blk) {
434 error = xlog_bread(log, mid_blk, 1, bp, &offset);
437 mid_cycle = xlog_get_cycle(offset);
438 if (mid_cycle == cycle)
439 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
441 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
442 mid_blk = BLK_AVG(first_blk, end_blk);
444 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
445 (mid_blk == end_blk && mid_blk-1 == first_blk));
453 * Check that a range of blocks does not contain stop_on_cycle_no.
454 * Fill in *new_blk with the block offset where such a block is
455 * found, or with -1 (an invalid block number) if there is no such
456 * block in the range. The scan needs to occur from front to back
457 * and the pointer into the region must be updated since a later
458 * routine will need to perform another test.
461 xlog_find_verify_cycle(
463 xfs_daddr_t start_blk,
465 uint stop_on_cycle_no,
466 xfs_daddr_t *new_blk)
476 * Greedily allocate a buffer big enough to handle the full
477 * range of basic blocks we'll be examining. If that fails,
478 * try a smaller size. We need to be able to read at least
479 * a log sector, or we're out of luck.
481 bufblks = 1 << ffs(nbblks);
482 while (bufblks > log->l_logBBsize)
484 while (!(bp = xlog_get_bp(log, bufblks))) {
486 if (bufblks < log->l_sectBBsize)
490 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
493 bcount = min(bufblks, (start_blk + nbblks - i));
495 error = xlog_bread(log, i, bcount, bp, &buf);
499 for (j = 0; j < bcount; j++) {
500 cycle = xlog_get_cycle(buf);
501 if (cycle == stop_on_cycle_no) {
518 * Potentially backup over partial log record write.
520 * In the typical case, last_blk is the number of the block directly after
521 * a good log record. Therefore, we subtract one to get the block number
522 * of the last block in the given buffer. extra_bblks contains the number
523 * of blocks we would have read on a previous read. This happens when the
524 * last log record is split over the end of the physical log.
526 * extra_bblks is the number of blocks potentially verified on a previous
527 * call to this routine.
530 xlog_find_verify_log_record(
532 xfs_daddr_t start_blk,
533 xfs_daddr_t *last_blk,
539 xlog_rec_header_t *head = NULL;
542 int num_blks = *last_blk - start_blk;
545 ASSERT(start_blk != 0 || *last_blk != start_blk);
547 if (!(bp = xlog_get_bp(log, num_blks))) {
548 if (!(bp = xlog_get_bp(log, 1)))
552 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
555 offset += ((num_blks - 1) << BBSHIFT);
558 for (i = (*last_blk) - 1; i >= 0; i--) {
560 /* valid log record not found */
562 "Log inconsistent (didn't find previous header)");
569 error = xlog_bread(log, i, 1, bp, &offset);
574 head = (xlog_rec_header_t *)offset;
576 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
584 * We hit the beginning of the physical log & still no header. Return
585 * to caller. If caller can handle a return of -1, then this routine
586 * will be called again for the end of the physical log.
594 * We have the final block of the good log (the first block
595 * of the log record _before_ the head. So we check the uuid.
597 if ((error = xlog_header_check_mount(log->l_mp, head)))
601 * We may have found a log record header before we expected one.
602 * last_blk will be the 1st block # with a given cycle #. We may end
603 * up reading an entire log record. In this case, we don't want to
604 * reset last_blk. Only when last_blk points in the middle of a log
605 * record do we update last_blk.
607 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
608 uint h_size = be32_to_cpu(head->h_size);
610 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
611 if (h_size % XLOG_HEADER_CYCLE_SIZE)
617 if (*last_blk - i + extra_bblks !=
618 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
627 * Head is defined to be the point of the log where the next log write
628 * could go. This means that incomplete LR writes at the end are
629 * eliminated when calculating the head. We aren't guaranteed that previous
630 * LR have complete transactions. We only know that a cycle number of
631 * current cycle number -1 won't be present in the log if we start writing
632 * from our current block number.
634 * last_blk contains the block number of the first block with a given
637 * Return: zero if normal, non-zero if error.
642 xfs_daddr_t *return_head_blk)
646 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
648 uint first_half_cycle, last_half_cycle;
650 int error, log_bbnum = log->l_logBBsize;
652 /* Is the end of the log device zeroed? */
653 error = xlog_find_zeroed(log, &first_blk);
655 xfs_warn(log->l_mp, "empty log check failed");
659 *return_head_blk = first_blk;
661 /* Is the whole lot zeroed? */
663 /* Linux XFS shouldn't generate totally zeroed logs -
664 * mkfs etc write a dummy unmount record to a fresh
665 * log so we can store the uuid in there
667 xfs_warn(log->l_mp, "totally zeroed log");
673 first_blk = 0; /* get cycle # of 1st block */
674 bp = xlog_get_bp(log, 1);
678 error = xlog_bread(log, 0, 1, bp, &offset);
682 first_half_cycle = xlog_get_cycle(offset);
684 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
685 error = xlog_bread(log, last_blk, 1, bp, &offset);
689 last_half_cycle = xlog_get_cycle(offset);
690 ASSERT(last_half_cycle != 0);
693 * If the 1st half cycle number is equal to the last half cycle number,
694 * then the entire log is stamped with the same cycle number. In this
695 * case, head_blk can't be set to zero (which makes sense). The below
696 * math doesn't work out properly with head_blk equal to zero. Instead,
697 * we set it to log_bbnum which is an invalid block number, but this
698 * value makes the math correct. If head_blk doesn't changed through
699 * all the tests below, *head_blk is set to zero at the very end rather
700 * than log_bbnum. In a sense, log_bbnum and zero are the same block
701 * in a circular file.
703 if (first_half_cycle == last_half_cycle) {
705 * In this case we believe that the entire log should have
706 * cycle number last_half_cycle. We need to scan backwards
707 * from the end verifying that there are no holes still
708 * containing last_half_cycle - 1. If we find such a hole,
709 * then the start of that hole will be the new head. The
710 * simple case looks like
711 * x | x ... | x - 1 | x
712 * Another case that fits this picture would be
713 * x | x + 1 | x ... | x
714 * In this case the head really is somewhere at the end of the
715 * log, as one of the latest writes at the beginning was
718 * x | x + 1 | x ... | x - 1 | x
719 * This is really the combination of the above two cases, and
720 * the head has to end up at the start of the x-1 hole at the
723 * In the 256k log case, we will read from the beginning to the
724 * end of the log and search for cycle numbers equal to x-1.
725 * We don't worry about the x+1 blocks that we encounter,
726 * because we know that they cannot be the head since the log
729 head_blk = log_bbnum;
730 stop_on_cycle = last_half_cycle - 1;
733 * In this case we want to find the first block with cycle
734 * number matching last_half_cycle. We expect the log to be
736 * x + 1 ... | x ... | x
737 * The first block with cycle number x (last_half_cycle) will
738 * be where the new head belongs. First we do a binary search
739 * for the first occurrence of last_half_cycle. The binary
740 * search may not be totally accurate, so then we scan back
741 * from there looking for occurrences of last_half_cycle before
742 * us. If that backwards scan wraps around the beginning of
743 * the log, then we look for occurrences of last_half_cycle - 1
744 * at the end of the log. The cases we're looking for look
746 * v binary search stopped here
747 * x + 1 ... | x | x + 1 | x ... | x
748 * ^ but we want to locate this spot
750 * <---------> less than scan distance
751 * x + 1 ... | x ... | x - 1 | x
752 * ^ we want to locate this spot
754 stop_on_cycle = last_half_cycle;
755 if ((error = xlog_find_cycle_start(log, bp, first_blk,
756 &head_blk, last_half_cycle)))
761 * Now validate the answer. Scan back some number of maximum possible
762 * blocks and make sure each one has the expected cycle number. The
763 * maximum is determined by the total possible amount of buffering
764 * in the in-core log. The following number can be made tighter if
765 * we actually look at the block size of the filesystem.
767 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
768 if (head_blk >= num_scan_bblks) {
770 * We are guaranteed that the entire check can be performed
773 start_blk = head_blk - num_scan_bblks;
774 if ((error = xlog_find_verify_cycle(log,
775 start_blk, num_scan_bblks,
776 stop_on_cycle, &new_blk)))
780 } else { /* need to read 2 parts of log */
782 * We are going to scan backwards in the log in two parts.
783 * First we scan the physical end of the log. In this part
784 * of the log, we are looking for blocks with cycle number
785 * last_half_cycle - 1.
786 * If we find one, then we know that the log starts there, as
787 * we've found a hole that didn't get written in going around
788 * the end of the physical log. The simple case for this is
789 * x + 1 ... | x ... | x - 1 | x
790 * <---------> less than scan distance
791 * If all of the blocks at the end of the log have cycle number
792 * last_half_cycle, then we check the blocks at the start of
793 * the log looking for occurrences of last_half_cycle. If we
794 * find one, then our current estimate for the location of the
795 * first occurrence of last_half_cycle is wrong and we move
796 * back to the hole we've found. This case looks like
797 * x + 1 ... | x | x + 1 | x ...
798 * ^ binary search stopped here
799 * Another case we need to handle that only occurs in 256k
801 * x + 1 ... | x ... | x+1 | x ...
802 * ^ binary search stops here
803 * In a 256k log, the scan at the end of the log will see the
804 * x + 1 blocks. We need to skip past those since that is
805 * certainly not the head of the log. By searching for
806 * last_half_cycle-1 we accomplish that.
808 ASSERT(head_blk <= INT_MAX &&
809 (xfs_daddr_t) num_scan_bblks >= head_blk);
810 start_blk = log_bbnum - (num_scan_bblks - head_blk);
811 if ((error = xlog_find_verify_cycle(log, start_blk,
812 num_scan_bblks - (int)head_blk,
813 (stop_on_cycle - 1), &new_blk)))
821 * Scan beginning of log now. The last part of the physical
822 * log is good. This scan needs to verify that it doesn't find
823 * the last_half_cycle.
826 ASSERT(head_blk <= INT_MAX);
827 if ((error = xlog_find_verify_cycle(log,
828 start_blk, (int)head_blk,
829 stop_on_cycle, &new_blk)))
837 * Now we need to make sure head_blk is not pointing to a block in
838 * the middle of a log record.
840 num_scan_bblks = XLOG_REC_SHIFT(log);
841 if (head_blk >= num_scan_bblks) {
842 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
844 /* start ptr at last block ptr before head_blk */
845 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
852 ASSERT(head_blk <= INT_MAX);
853 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
857 /* We hit the beginning of the log during our search */
858 start_blk = log_bbnum - (num_scan_bblks - head_blk);
860 ASSERT(start_blk <= INT_MAX &&
861 (xfs_daddr_t) log_bbnum-start_blk >= 0);
862 ASSERT(head_blk <= INT_MAX);
863 error = xlog_find_verify_log_record(log, start_blk,
864 &new_blk, (int)head_blk);
869 if (new_blk != log_bbnum)
876 if (head_blk == log_bbnum)
877 *return_head_blk = 0;
879 *return_head_blk = head_blk;
881 * When returning here, we have a good block number. Bad block
882 * means that during a previous crash, we didn't have a clean break
883 * from cycle number N to cycle number N-1. In this case, we need
884 * to find the first block with cycle number N-1.
892 xfs_warn(log->l_mp, "failed to find log head");
897 * Seek backwards in the log for log record headers.
899 * Given a starting log block, walk backwards until we find the provided number
900 * of records or hit the provided tail block. The return value is the number of
901 * records encountered or a negative error code. The log block and buffer
902 * pointer of the last record seen are returned in rblk and rhead respectively.
905 xlog_rseek_logrec_hdr(
907 xfs_daddr_t head_blk,
908 xfs_daddr_t tail_blk,
912 struct xlog_rec_header **rhead,
924 * Walk backwards from the head block until we hit the tail or the first
927 end_blk = head_blk > tail_blk ? tail_blk : 0;
928 for (i = (int) head_blk - 1; i >= end_blk; i--) {
929 error = xlog_bread(log, i, 1, bp, &offset);
933 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
935 *rhead = (struct xlog_rec_header *) offset;
936 if (++found == count)
942 * If we haven't hit the tail block or the log record header count,
943 * start looking again from the end of the physical log. Note that
944 * callers can pass head == tail if the tail is not yet known.
946 if (tail_blk >= head_blk && found != count) {
947 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
948 error = xlog_bread(log, i, 1, bp, &offset);
952 if (*(__be32 *)offset ==
953 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
956 *rhead = (struct xlog_rec_header *) offset;
957 if (++found == count)
970 * Seek forward in the log for log record headers.
972 * Given head and tail blocks, walk forward from the tail block until we find
973 * the provided number of records or hit the head block. The return value is the
974 * number of records encountered or a negative error code. The log block and
975 * buffer pointer of the last record seen are returned in rblk and rhead
979 xlog_seek_logrec_hdr(
981 xfs_daddr_t head_blk,
982 xfs_daddr_t tail_blk,
986 struct xlog_rec_header **rhead,
998 * Walk forward from the tail block until we hit the head or the last
1001 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
1002 for (i = (int) tail_blk; i <= end_blk; i++) {
1003 error = xlog_bread(log, i, 1, bp, &offset);
1007 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1009 *rhead = (struct xlog_rec_header *) offset;
1010 if (++found == count)
1016 * If we haven't hit the head block or the log record header count,
1017 * start looking again from the start of the physical log.
1019 if (tail_blk > head_blk && found != count) {
1020 for (i = 0; i < (int) head_blk; i++) {
1021 error = xlog_bread(log, i, 1, bp, &offset);
1025 if (*(__be32 *)offset ==
1026 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1029 *rhead = (struct xlog_rec_header *) offset;
1030 if (++found == count)
1043 * Calculate distance from head to tail (i.e., unused space in the log).
1048 xfs_daddr_t head_blk,
1049 xfs_daddr_t tail_blk)
1051 if (head_blk < tail_blk)
1052 return tail_blk - head_blk;
1054 return tail_blk + (log->l_logBBsize - head_blk);
1058 * Verify the log tail. This is particularly important when torn or incomplete
1059 * writes have been detected near the front of the log and the head has been
1060 * walked back accordingly.
1062 * We also have to handle the case where the tail was pinned and the head
1063 * blocked behind the tail right before a crash. If the tail had been pushed
1064 * immediately prior to the crash and the subsequent checkpoint was only
1065 * partially written, it's possible it overwrote the last referenced tail in the
1066 * log with garbage. This is not a coherency problem because the tail must have
1067 * been pushed before it can be overwritten, but appears as log corruption to
1068 * recovery because we have no way to know the tail was updated if the
1069 * subsequent checkpoint didn't write successfully.
1071 * Therefore, CRC check the log from tail to head. If a failure occurs and the
1072 * offending record is within max iclog bufs from the head, walk the tail
1073 * forward and retry until a valid tail is found or corruption is detected out
1074 * of the range of a possible overwrite.
1079 xfs_daddr_t head_blk,
1080 xfs_daddr_t *tail_blk,
1083 struct xlog_rec_header *thead;
1085 xfs_daddr_t first_bad;
1088 xfs_daddr_t tmp_tail;
1089 xfs_daddr_t orig_tail = *tail_blk;
1091 bp = xlog_get_bp(log, 1);
1096 * Make sure the tail points to a record (returns positive count on
1099 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, bp,
1100 &tmp_tail, &thead, &wrapped);
1103 if (*tail_blk != tmp_tail)
1104 *tail_blk = tmp_tail;
1107 * Run a CRC check from the tail to the head. We can't just check
1108 * MAX_ICLOGS records past the tail because the tail may point to stale
1109 * blocks cleared during the search for the head/tail. These blocks are
1110 * overwritten with zero-length records and thus record count is not a
1111 * reliable indicator of the iclog state before a crash.
1114 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1115 XLOG_RECOVER_CRCPASS, &first_bad);
1116 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1120 * Is corruption within range of the head? If so, retry from
1121 * the next record. Otherwise return an error.
1123 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1124 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1127 /* skip to the next record; returns positive count on success */
1128 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, bp,
1129 &tmp_tail, &thead, &wrapped);
1133 *tail_blk = tmp_tail;
1135 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1136 XLOG_RECOVER_CRCPASS, &first_bad);
1139 if (!error && *tail_blk != orig_tail)
1141 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1142 orig_tail, *tail_blk);
1149 * Detect and trim torn writes from the head of the log.
1151 * Storage without sector atomicity guarantees can result in torn writes in the
1152 * log in the event of a crash. Our only means to detect this scenario is via
1153 * CRC verification. While we can't always be certain that CRC verification
1154 * failure is due to a torn write vs. an unrelated corruption, we do know that
1155 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1156 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1157 * the log and treat failures in this range as torn writes as a matter of
1158 * policy. In the event of CRC failure, the head is walked back to the last good
1159 * record in the log and the tail is updated from that record and verified.
1164 xfs_daddr_t *head_blk, /* in/out: unverified head */
1165 xfs_daddr_t *tail_blk, /* out: tail block */
1167 xfs_daddr_t *rhead_blk, /* start blk of last record */
1168 struct xlog_rec_header **rhead, /* ptr to last record */
1169 bool *wrapped) /* last rec. wraps phys. log */
1171 struct xlog_rec_header *tmp_rhead;
1172 struct xfs_buf *tmp_bp;
1173 xfs_daddr_t first_bad;
1174 xfs_daddr_t tmp_rhead_blk;
1180 * Check the head of the log for torn writes. Search backwards from the
1181 * head until we hit the tail or the maximum number of log record I/Os
1182 * that could have been in flight at one time. Use a temporary buffer so
1183 * we don't trash the rhead/bp pointers from the caller.
1185 tmp_bp = xlog_get_bp(log, 1);
1188 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1189 XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk,
1190 &tmp_rhead, &tmp_wrapped);
1191 xlog_put_bp(tmp_bp);
1196 * Now run a CRC verification pass over the records starting at the
1197 * block found above to the current head. If a CRC failure occurs, the
1198 * log block of the first bad record is saved in first_bad.
1200 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1201 XLOG_RECOVER_CRCPASS, &first_bad);
1202 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1204 * We've hit a potential torn write. Reset the error and warn
1209 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1210 first_bad, *head_blk);
1213 * Get the header block and buffer pointer for the last good
1214 * record before the bad record.
1216 * Note that xlog_find_tail() clears the blocks at the new head
1217 * (i.e., the records with invalid CRC) if the cycle number
1218 * matches the the current cycle.
1220 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp,
1221 rhead_blk, rhead, wrapped);
1224 if (found == 0) /* XXX: right thing to do here? */
1228 * Reset the head block to the starting block of the first bad
1229 * log record and set the tail block based on the last good
1232 * Bail out if the updated head/tail match as this indicates
1233 * possible corruption outside of the acceptable
1234 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1236 *head_blk = first_bad;
1237 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1238 if (*head_blk == *tail_blk) {
1246 return xlog_verify_tail(log, *head_blk, tail_blk,
1247 be32_to_cpu((*rhead)->h_size));
1251 * Check whether the head of the log points to an unmount record. In other
1252 * words, determine whether the log is clean. If so, update the in-core state
1256 xlog_check_unmount_rec(
1258 xfs_daddr_t *head_blk,
1259 xfs_daddr_t *tail_blk,
1260 struct xlog_rec_header *rhead,
1261 xfs_daddr_t rhead_blk,
1265 struct xlog_op_header *op_head;
1266 xfs_daddr_t umount_data_blk;
1267 xfs_daddr_t after_umount_blk;
1275 * Look for unmount record. If we find it, then we know there was a
1276 * clean unmount. Since 'i' could be the last block in the physical
1277 * log, we convert to a log block before comparing to the head_blk.
1279 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1280 * below. We won't want to clear the unmount record if there is one, so
1281 * we pass the lsn of the unmount record rather than the block after it.
1283 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1284 int h_size = be32_to_cpu(rhead->h_size);
1285 int h_version = be32_to_cpu(rhead->h_version);
1287 if ((h_version & XLOG_VERSION_2) &&
1288 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1289 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1290 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1298 after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
1299 after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
1300 if (*head_blk == after_umount_blk &&
1301 be32_to_cpu(rhead->h_num_logops) == 1) {
1302 umount_data_blk = rhead_blk + hblks;
1303 umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
1304 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1308 op_head = (struct xlog_op_header *)offset;
1309 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1311 * Set tail and last sync so that newly written log
1312 * records will point recovery to after the current
1315 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1316 log->l_curr_cycle, after_umount_blk);
1317 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1318 log->l_curr_cycle, after_umount_blk);
1319 *tail_blk = after_umount_blk;
1331 xfs_daddr_t head_blk,
1332 struct xlog_rec_header *rhead,
1333 xfs_daddr_t rhead_blk,
1337 * Reset log values according to the state of the log when we
1338 * crashed. In the case where head_blk == 0, we bump curr_cycle
1339 * one because the next write starts a new cycle rather than
1340 * continuing the cycle of the last good log record. At this
1341 * point we have guaranteed that all partial log records have been
1342 * accounted for. Therefore, we know that the last good log record
1343 * written was complete and ended exactly on the end boundary
1344 * of the physical log.
1346 log->l_prev_block = rhead_blk;
1347 log->l_curr_block = (int)head_blk;
1348 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1350 log->l_curr_cycle++;
1351 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1352 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1353 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1354 BBTOB(log->l_curr_block));
1355 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1356 BBTOB(log->l_curr_block));
1360 * Find the sync block number or the tail of the log.
1362 * This will be the block number of the last record to have its
1363 * associated buffers synced to disk. Every log record header has
1364 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1365 * to get a sync block number. The only concern is to figure out which
1366 * log record header to believe.
1368 * The following algorithm uses the log record header with the largest
1369 * lsn. The entire log record does not need to be valid. We only care
1370 * that the header is valid.
1372 * We could speed up search by using current head_blk buffer, but it is not
1378 xfs_daddr_t *head_blk,
1379 xfs_daddr_t *tail_blk)
1381 xlog_rec_header_t *rhead;
1382 char *offset = NULL;
1385 xfs_daddr_t rhead_blk;
1387 bool wrapped = false;
1391 * Find previous log record
1393 if ((error = xlog_find_head(log, head_blk)))
1395 ASSERT(*head_blk < INT_MAX);
1397 bp = xlog_get_bp(log, 1);
1400 if (*head_blk == 0) { /* special case */
1401 error = xlog_bread(log, 0, 1, bp, &offset);
1405 if (xlog_get_cycle(offset) == 0) {
1407 /* leave all other log inited values alone */
1413 * Search backwards through the log looking for the log record header
1414 * block. This wraps all the way back around to the head so something is
1415 * seriously wrong if we can't find it.
1417 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
1418 &rhead_blk, &rhead, &wrapped);
1422 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1425 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1428 * Set the log state based on the current head record.
1430 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1431 tail_lsn = atomic64_read(&log->l_tail_lsn);
1434 * Look for an unmount record at the head of the log. This sets the log
1435 * state to determine whether recovery is necessary.
1437 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1438 rhead_blk, bp, &clean);
1443 * Verify the log head if the log is not clean (e.g., we have anything
1444 * but an unmount record at the head). This uses CRC verification to
1445 * detect and trim torn writes. If discovered, CRC failures are
1446 * considered torn writes and the log head is trimmed accordingly.
1448 * Note that we can only run CRC verification when the log is dirty
1449 * because there's no guarantee that the log data behind an unmount
1450 * record is compatible with the current architecture.
1453 xfs_daddr_t orig_head = *head_blk;
1455 error = xlog_verify_head(log, head_blk, tail_blk, bp,
1456 &rhead_blk, &rhead, &wrapped);
1460 /* update in-core state again if the head changed */
1461 if (*head_blk != orig_head) {
1462 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1464 tail_lsn = atomic64_read(&log->l_tail_lsn);
1465 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1466 rhead, rhead_blk, bp,
1474 * Note that the unmount was clean. If the unmount was not clean, we
1475 * need to know this to rebuild the superblock counters from the perag
1476 * headers if we have a filesystem using non-persistent counters.
1479 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1482 * Make sure that there are no blocks in front of the head
1483 * with the same cycle number as the head. This can happen
1484 * because we allow multiple outstanding log writes concurrently,
1485 * and the later writes might make it out before earlier ones.
1487 * We use the lsn from before modifying it so that we'll never
1488 * overwrite the unmount record after a clean unmount.
1490 * Do this only if we are going to recover the filesystem
1492 * NOTE: This used to say "if (!readonly)"
1493 * However on Linux, we can & do recover a read-only filesystem.
1494 * We only skip recovery if NORECOVERY is specified on mount,
1495 * in which case we would not be here.
1497 * But... if the -device- itself is readonly, just skip this.
1498 * We can't recover this device anyway, so it won't matter.
1500 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1501 error = xlog_clear_stale_blocks(log, tail_lsn);
1507 xfs_warn(log->l_mp, "failed to locate log tail");
1512 * Is the log zeroed at all?
1514 * The last binary search should be changed to perform an X block read
1515 * once X becomes small enough. You can then search linearly through
1516 * the X blocks. This will cut down on the number of reads we need to do.
1518 * If the log is partially zeroed, this routine will pass back the blkno
1519 * of the first block with cycle number 0. It won't have a complete LR
1523 * 0 => the log is completely written to
1524 * 1 => use *blk_no as the first block of the log
1525 * <0 => error has occurred
1530 xfs_daddr_t *blk_no)
1534 uint first_cycle, last_cycle;
1535 xfs_daddr_t new_blk, last_blk, start_blk;
1536 xfs_daddr_t num_scan_bblks;
1537 int error, log_bbnum = log->l_logBBsize;
1541 /* check totally zeroed log */
1542 bp = xlog_get_bp(log, 1);
1545 error = xlog_bread(log, 0, 1, bp, &offset);
1549 first_cycle = xlog_get_cycle(offset);
1550 if (first_cycle == 0) { /* completely zeroed log */
1556 /* check partially zeroed log */
1557 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1561 last_cycle = xlog_get_cycle(offset);
1562 if (last_cycle != 0) { /* log completely written to */
1565 } else if (first_cycle != 1) {
1567 * If the cycle of the last block is zero, the cycle of
1568 * the first block must be 1. If it's not, maybe we're
1569 * not looking at a log... Bail out.
1572 "Log inconsistent or not a log (last==0, first!=1)");
1577 /* we have a partially zeroed log */
1578 last_blk = log_bbnum-1;
1579 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1583 * Validate the answer. Because there is no way to guarantee that
1584 * the entire log is made up of log records which are the same size,
1585 * we scan over the defined maximum blocks. At this point, the maximum
1586 * is not chosen to mean anything special. XXXmiken
1588 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1589 ASSERT(num_scan_bblks <= INT_MAX);
1591 if (last_blk < num_scan_bblks)
1592 num_scan_bblks = last_blk;
1593 start_blk = last_blk - num_scan_bblks;
1596 * We search for any instances of cycle number 0 that occur before
1597 * our current estimate of the head. What we're trying to detect is
1598 * 1 ... | 0 | 1 | 0...
1599 * ^ binary search ends here
1601 if ((error = xlog_find_verify_cycle(log, start_blk,
1602 (int)num_scan_bblks, 0, &new_blk)))
1608 * Potentially backup over partial log record write. We don't need
1609 * to search the end of the log because we know it is zero.
1611 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1626 * These are simple subroutines used by xlog_clear_stale_blocks() below
1627 * to initialize a buffer full of empty log record headers and write
1628 * them into the log.
1639 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1641 memset(buf, 0, BBSIZE);
1642 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1643 recp->h_cycle = cpu_to_be32(cycle);
1644 recp->h_version = cpu_to_be32(
1645 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1646 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1647 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1648 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1649 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1653 xlog_write_log_records(
1664 int sectbb = log->l_sectBBsize;
1665 int end_block = start_block + blocks;
1671 * Greedily allocate a buffer big enough to handle the full
1672 * range of basic blocks to be written. If that fails, try
1673 * a smaller size. We need to be able to write at least a
1674 * log sector, or we're out of luck.
1676 bufblks = 1 << ffs(blocks);
1677 while (bufblks > log->l_logBBsize)
1679 while (!(bp = xlog_get_bp(log, bufblks))) {
1681 if (bufblks < sectbb)
1685 /* We may need to do a read at the start to fill in part of
1686 * the buffer in the starting sector not covered by the first
1689 balign = round_down(start_block, sectbb);
1690 if (balign != start_block) {
1691 error = xlog_bread_noalign(log, start_block, 1, bp);
1695 j = start_block - balign;
1698 for (i = start_block; i < end_block; i += bufblks) {
1699 int bcount, endcount;
1701 bcount = min(bufblks, end_block - start_block);
1702 endcount = bcount - j;
1704 /* We may need to do a read at the end to fill in part of
1705 * the buffer in the final sector not covered by the write.
1706 * If this is the same sector as the above read, skip it.
1708 ealign = round_down(end_block, sectbb);
1709 if (j == 0 && (start_block + endcount > ealign)) {
1710 offset = bp->b_addr + BBTOB(ealign - start_block);
1711 error = xlog_bread_offset(log, ealign, sectbb,
1718 offset = xlog_align(log, start_block, endcount, bp);
1719 for (; j < endcount; j++) {
1720 xlog_add_record(log, offset, cycle, i+j,
1721 tail_cycle, tail_block);
1724 error = xlog_bwrite(log, start_block, endcount, bp);
1727 start_block += endcount;
1737 * This routine is called to blow away any incomplete log writes out
1738 * in front of the log head. We do this so that we won't become confused
1739 * if we come up, write only a little bit more, and then crash again.
1740 * If we leave the partial log records out there, this situation could
1741 * cause us to think those partial writes are valid blocks since they
1742 * have the current cycle number. We get rid of them by overwriting them
1743 * with empty log records with the old cycle number rather than the
1746 * The tail lsn is passed in rather than taken from
1747 * the log so that we will not write over the unmount record after a
1748 * clean unmount in a 512 block log. Doing so would leave the log without
1749 * any valid log records in it until a new one was written. If we crashed
1750 * during that time we would not be able to recover.
1753 xlog_clear_stale_blocks(
1757 int tail_cycle, head_cycle;
1758 int tail_block, head_block;
1759 int tail_distance, max_distance;
1763 tail_cycle = CYCLE_LSN(tail_lsn);
1764 tail_block = BLOCK_LSN(tail_lsn);
1765 head_cycle = log->l_curr_cycle;
1766 head_block = log->l_curr_block;
1769 * Figure out the distance between the new head of the log
1770 * and the tail. We want to write over any blocks beyond the
1771 * head that we may have written just before the crash, but
1772 * we don't want to overwrite the tail of the log.
1774 if (head_cycle == tail_cycle) {
1776 * The tail is behind the head in the physical log,
1777 * so the distance from the head to the tail is the
1778 * distance from the head to the end of the log plus
1779 * the distance from the beginning of the log to the
1782 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1783 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1784 XFS_ERRLEVEL_LOW, log->l_mp);
1785 return -EFSCORRUPTED;
1787 tail_distance = tail_block + (log->l_logBBsize - head_block);
1790 * The head is behind the tail in the physical log,
1791 * so the distance from the head to the tail is just
1792 * the tail block minus the head block.
1794 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1795 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1796 XFS_ERRLEVEL_LOW, log->l_mp);
1797 return -EFSCORRUPTED;
1799 tail_distance = tail_block - head_block;
1803 * If the head is right up against the tail, we can't clear
1806 if (tail_distance <= 0) {
1807 ASSERT(tail_distance == 0);
1811 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1813 * Take the smaller of the maximum amount of outstanding I/O
1814 * we could have and the distance to the tail to clear out.
1815 * We take the smaller so that we don't overwrite the tail and
1816 * we don't waste all day writing from the head to the tail
1819 max_distance = MIN(max_distance, tail_distance);
1821 if ((head_block + max_distance) <= log->l_logBBsize) {
1823 * We can stomp all the blocks we need to without
1824 * wrapping around the end of the log. Just do it
1825 * in a single write. Use the cycle number of the
1826 * current cycle minus one so that the log will look like:
1829 error = xlog_write_log_records(log, (head_cycle - 1),
1830 head_block, max_distance, tail_cycle,
1836 * We need to wrap around the end of the physical log in
1837 * order to clear all the blocks. Do it in two separate
1838 * I/Os. The first write should be from the head to the
1839 * end of the physical log, and it should use the current
1840 * cycle number minus one just like above.
1842 distance = log->l_logBBsize - head_block;
1843 error = xlog_write_log_records(log, (head_cycle - 1),
1844 head_block, distance, tail_cycle,
1851 * Now write the blocks at the start of the physical log.
1852 * This writes the remainder of the blocks we want to clear.
1853 * It uses the current cycle number since we're now on the
1854 * same cycle as the head so that we get:
1855 * n ... n ... | n - 1 ...
1856 * ^^^^^ blocks we're writing
1858 distance = max_distance - (log->l_logBBsize - head_block);
1859 error = xlog_write_log_records(log, head_cycle, 0, distance,
1860 tail_cycle, tail_block);
1868 /******************************************************************************
1870 * Log recover routines
1872 ******************************************************************************
1876 * Sort the log items in the transaction.
1878 * The ordering constraints are defined by the inode allocation and unlink
1879 * behaviour. The rules are:
1881 * 1. Every item is only logged once in a given transaction. Hence it
1882 * represents the last logged state of the item. Hence ordering is
1883 * dependent on the order in which operations need to be performed so
1884 * required initial conditions are always met.
1886 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1887 * there's nothing to replay from them so we can simply cull them
1888 * from the transaction. However, we can't do that until after we've
1889 * replayed all the other items because they may be dependent on the
1890 * cancelled buffer and replaying the cancelled buffer can remove it
1891 * form the cancelled buffer table. Hence they have tobe done last.
1893 * 3. Inode allocation buffers must be replayed before inode items that
1894 * read the buffer and replay changes into it. For filesystems using the
1895 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1896 * treated the same as inode allocation buffers as they create and
1897 * initialise the buffers directly.
1899 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1900 * This ensures that inodes are completely flushed to the inode buffer
1901 * in a "free" state before we remove the unlinked inode list pointer.
1903 * Hence the ordering needs to be inode allocation buffers first, inode items
1904 * second, inode unlink buffers third and cancelled buffers last.
1906 * But there's a problem with that - we can't tell an inode allocation buffer
1907 * apart from a regular buffer, so we can't separate them. We can, however,
1908 * tell an inode unlink buffer from the others, and so we can separate them out
1909 * from all the other buffers and move them to last.
1911 * Hence, 4 lists, in order from head to tail:
1912 * - buffer_list for all buffers except cancelled/inode unlink buffers
1913 * - item_list for all non-buffer items
1914 * - inode_buffer_list for inode unlink buffers
1915 * - cancel_list for the cancelled buffers
1917 * Note that we add objects to the tail of the lists so that first-to-last
1918 * ordering is preserved within the lists. Adding objects to the head of the
1919 * list means when we traverse from the head we walk them in last-to-first
1920 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1921 * but for all other items there may be specific ordering that we need to
1925 xlog_recover_reorder_trans(
1927 struct xlog_recover *trans,
1930 xlog_recover_item_t *item, *n;
1932 LIST_HEAD(sort_list);
1933 LIST_HEAD(cancel_list);
1934 LIST_HEAD(buffer_list);
1935 LIST_HEAD(inode_buffer_list);
1936 LIST_HEAD(inode_list);
1938 list_splice_init(&trans->r_itemq, &sort_list);
1939 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1940 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1942 switch (ITEM_TYPE(item)) {
1943 case XFS_LI_ICREATE:
1944 list_move_tail(&item->ri_list, &buffer_list);
1947 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1948 trace_xfs_log_recover_item_reorder_head(log,
1950 list_move(&item->ri_list, &cancel_list);
1953 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1954 list_move(&item->ri_list, &inode_buffer_list);
1957 list_move_tail(&item->ri_list, &buffer_list);
1961 case XFS_LI_QUOTAOFF:
1970 trace_xfs_log_recover_item_reorder_tail(log,
1972 list_move_tail(&item->ri_list, &inode_list);
1976 "%s: unrecognized type of log operation",
1980 * return the remaining items back to the transaction
1981 * item list so they can be freed in caller.
1983 if (!list_empty(&sort_list))
1984 list_splice_init(&sort_list, &trans->r_itemq);
1990 ASSERT(list_empty(&sort_list));
1991 if (!list_empty(&buffer_list))
1992 list_splice(&buffer_list, &trans->r_itemq);
1993 if (!list_empty(&inode_list))
1994 list_splice_tail(&inode_list, &trans->r_itemq);
1995 if (!list_empty(&inode_buffer_list))
1996 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1997 if (!list_empty(&cancel_list))
1998 list_splice_tail(&cancel_list, &trans->r_itemq);
2003 * Build up the table of buf cancel records so that we don't replay
2004 * cancelled data in the second pass. For buffer records that are
2005 * not cancel records, there is nothing to do here so we just return.
2007 * If we get a cancel record which is already in the table, this indicates
2008 * that the buffer was cancelled multiple times. In order to ensure
2009 * that during pass 2 we keep the record in the table until we reach its
2010 * last occurrence in the log, we keep a reference count in the cancel
2011 * record in the table to tell us how many times we expect to see this
2012 * record during the second pass.
2015 xlog_recover_buffer_pass1(
2017 struct xlog_recover_item *item)
2019 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2020 struct list_head *bucket;
2021 struct xfs_buf_cancel *bcp;
2024 * If this isn't a cancel buffer item, then just return.
2026 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
2027 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
2032 * Insert an xfs_buf_cancel record into the hash table of them.
2033 * If there is already an identical record, bump its reference count.
2035 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
2036 list_for_each_entry(bcp, bucket, bc_list) {
2037 if (bcp->bc_blkno == buf_f->blf_blkno &&
2038 bcp->bc_len == buf_f->blf_len) {
2040 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
2045 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
2046 bcp->bc_blkno = buf_f->blf_blkno;
2047 bcp->bc_len = buf_f->blf_len;
2048 bcp->bc_refcount = 1;
2049 list_add_tail(&bcp->bc_list, bucket);
2051 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
2056 * Check to see whether the buffer being recovered has a corresponding
2057 * entry in the buffer cancel record table. If it is, return the cancel
2058 * buffer structure to the caller.
2060 STATIC struct xfs_buf_cancel *
2061 xlog_peek_buffer_cancelled(
2065 unsigned short flags)
2067 struct list_head *bucket;
2068 struct xfs_buf_cancel *bcp;
2070 if (!log->l_buf_cancel_table) {
2071 /* empty table means no cancelled buffers in the log */
2072 ASSERT(!(flags & XFS_BLF_CANCEL));
2076 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
2077 list_for_each_entry(bcp, bucket, bc_list) {
2078 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
2083 * We didn't find a corresponding entry in the table, so return 0 so
2084 * that the buffer is NOT cancelled.
2086 ASSERT(!(flags & XFS_BLF_CANCEL));
2091 * If the buffer is being cancelled then return 1 so that it will be cancelled,
2092 * otherwise return 0. If the buffer is actually a buffer cancel item
2093 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2094 * table and remove it from the table if this is the last reference.
2096 * We remove the cancel record from the table when we encounter its last
2097 * occurrence in the log so that if the same buffer is re-used again after its
2098 * last cancellation we actually replay the changes made at that point.
2101 xlog_check_buffer_cancelled(
2105 unsigned short flags)
2107 struct xfs_buf_cancel *bcp;
2109 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2114 * We've go a match, so return 1 so that the recovery of this buffer
2115 * is cancelled. If this buffer is actually a buffer cancel log
2116 * item, then decrement the refcount on the one in the table and
2117 * remove it if this is the last reference.
2119 if (flags & XFS_BLF_CANCEL) {
2120 if (--bcp->bc_refcount == 0) {
2121 list_del(&bcp->bc_list);
2129 * Perform recovery for a buffer full of inodes. In these buffers, the only
2130 * data which should be recovered is that which corresponds to the
2131 * di_next_unlinked pointers in the on disk inode structures. The rest of the
2132 * data for the inodes is always logged through the inodes themselves rather
2133 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2135 * The only time when buffers full of inodes are fully recovered is when the
2136 * buffer is full of newly allocated inodes. In this case the buffer will
2137 * not be marked as an inode buffer and so will be sent to
2138 * xlog_recover_do_reg_buffer() below during recovery.
2141 xlog_recover_do_inode_buffer(
2142 struct xfs_mount *mp,
2143 xlog_recover_item_t *item,
2145 xfs_buf_log_format_t *buf_f)
2151 int reg_buf_offset = 0;
2152 int reg_buf_bytes = 0;
2153 int next_unlinked_offset;
2155 xfs_agino_t *logged_nextp;
2156 xfs_agino_t *buffer_nextp;
2158 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2161 * Post recovery validation only works properly on CRC enabled
2164 if (xfs_sb_version_hascrc(&mp->m_sb))
2165 bp->b_ops = &xfs_inode_buf_ops;
2167 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
2168 for (i = 0; i < inodes_per_buf; i++) {
2169 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2170 offsetof(xfs_dinode_t, di_next_unlinked);
2172 while (next_unlinked_offset >=
2173 (reg_buf_offset + reg_buf_bytes)) {
2175 * The next di_next_unlinked field is beyond
2176 * the current logged region. Find the next
2177 * logged region that contains or is beyond
2178 * the current di_next_unlinked field.
2181 bit = xfs_next_bit(buf_f->blf_data_map,
2182 buf_f->blf_map_size, bit);
2185 * If there are no more logged regions in the
2186 * buffer, then we're done.
2191 nbits = xfs_contig_bits(buf_f->blf_data_map,
2192 buf_f->blf_map_size, bit);
2194 reg_buf_offset = bit << XFS_BLF_SHIFT;
2195 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2200 * If the current logged region starts after the current
2201 * di_next_unlinked field, then move on to the next
2202 * di_next_unlinked field.
2204 if (next_unlinked_offset < reg_buf_offset)
2207 ASSERT(item->ri_buf[item_index].i_addr != NULL);
2208 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2209 ASSERT((reg_buf_offset + reg_buf_bytes) <=
2210 BBTOB(bp->b_io_length));
2213 * The current logged region contains a copy of the
2214 * current di_next_unlinked field. Extract its value
2215 * and copy it to the buffer copy.
2217 logged_nextp = item->ri_buf[item_index].i_addr +
2218 next_unlinked_offset - reg_buf_offset;
2219 if (unlikely(*logged_nextp == 0)) {
2221 "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
2222 "Trying to replay bad (0) inode di_next_unlinked field.",
2224 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2225 XFS_ERRLEVEL_LOW, mp);
2226 return -EFSCORRUPTED;
2229 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2230 *buffer_nextp = *logged_nextp;
2233 * If necessary, recalculate the CRC in the on-disk inode. We
2234 * have to leave the inode in a consistent state for whoever
2237 xfs_dinode_calc_crc(mp,
2238 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2246 * V5 filesystems know the age of the buffer on disk being recovered. We can
2247 * have newer objects on disk than we are replaying, and so for these cases we
2248 * don't want to replay the current change as that will make the buffer contents
2249 * temporarily invalid on disk.
2251 * The magic number might not match the buffer type we are going to recover
2252 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
2253 * extract the LSN of the existing object in the buffer based on it's current
2254 * magic number. If we don't recognise the magic number in the buffer, then
2255 * return a LSN of -1 so that the caller knows it was an unrecognised block and
2256 * so can recover the buffer.
2258 * Note: we cannot rely solely on magic number matches to determine that the
2259 * buffer has a valid LSN - we also need to verify that it belongs to this
2260 * filesystem, so we need to extract the object's LSN and compare it to that
2261 * which we read from the superblock. If the UUIDs don't match, then we've got a
2262 * stale metadata block from an old filesystem instance that we need to recover
2266 xlog_recover_get_buf_lsn(
2267 struct xfs_mount *mp,
2273 void *blk = bp->b_addr;
2277 /* v4 filesystems always recover immediately */
2278 if (!xfs_sb_version_hascrc(&mp->m_sb))
2279 goto recover_immediately;
2281 magic32 = be32_to_cpu(*(__be32 *)blk);
2283 case XFS_ABTB_CRC_MAGIC:
2284 case XFS_ABTC_CRC_MAGIC:
2285 case XFS_ABTB_MAGIC:
2286 case XFS_ABTC_MAGIC:
2287 case XFS_RMAP_CRC_MAGIC:
2288 case XFS_REFC_CRC_MAGIC:
2289 case XFS_IBT_CRC_MAGIC:
2290 case XFS_IBT_MAGIC: {
2291 struct xfs_btree_block *btb = blk;
2293 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2294 uuid = &btb->bb_u.s.bb_uuid;
2297 case XFS_BMAP_CRC_MAGIC:
2298 case XFS_BMAP_MAGIC: {
2299 struct xfs_btree_block *btb = blk;
2301 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2302 uuid = &btb->bb_u.l.bb_uuid;
2306 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2307 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2309 case XFS_AGFL_MAGIC:
2310 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2311 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2314 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2315 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2317 case XFS_SYMLINK_MAGIC:
2318 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2319 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2321 case XFS_DIR3_BLOCK_MAGIC:
2322 case XFS_DIR3_DATA_MAGIC:
2323 case XFS_DIR3_FREE_MAGIC:
2324 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2325 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2327 case XFS_ATTR3_RMT_MAGIC:
2329 * Remote attr blocks are written synchronously, rather than
2330 * being logged. That means they do not contain a valid LSN
2331 * (i.e. transactionally ordered) in them, and hence any time we
2332 * see a buffer to replay over the top of a remote attribute
2333 * block we should simply do so.
2335 goto recover_immediately;
2338 * superblock uuids are magic. We may or may not have a
2339 * sb_meta_uuid on disk, but it will be set in the in-core
2340 * superblock. We set the uuid pointer for verification
2341 * according to the superblock feature mask to ensure we check
2342 * the relevant UUID in the superblock.
2344 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2345 if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2346 uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2348 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2354 if (lsn != (xfs_lsn_t)-1) {
2355 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2356 goto recover_immediately;
2360 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2362 case XFS_DIR3_LEAF1_MAGIC:
2363 case XFS_DIR3_LEAFN_MAGIC:
2364 case XFS_DA3_NODE_MAGIC:
2365 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2366 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2372 if (lsn != (xfs_lsn_t)-1) {
2373 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2374 goto recover_immediately;
2379 * We do individual object checks on dquot and inode buffers as they
2380 * have their own individual LSN records. Also, we could have a stale
2381 * buffer here, so we have to at least recognise these buffer types.
2383 * A notd complexity here is inode unlinked list processing - it logs
2384 * the inode directly in the buffer, but we don't know which inodes have
2385 * been modified, and there is no global buffer LSN. Hence we need to
2386 * recover all inode buffer types immediately. This problem will be
2387 * fixed by logical logging of the unlinked list modifications.
2389 magic16 = be16_to_cpu(*(__be16 *)blk);
2391 case XFS_DQUOT_MAGIC:
2392 case XFS_DINODE_MAGIC:
2393 goto recover_immediately;
2398 /* unknown buffer contents, recover immediately */
2400 recover_immediately:
2401 return (xfs_lsn_t)-1;
2406 * Validate the recovered buffer is of the correct type and attach the
2407 * appropriate buffer operations to them for writeback. Magic numbers are in a
2409 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2410 * the first 32 bits of the buffer (most blocks),
2411 * inside a struct xfs_da_blkinfo at the start of the buffer.
2414 xlog_recover_validate_buf_type(
2415 struct xfs_mount *mp,
2417 xfs_buf_log_format_t *buf_f,
2418 xfs_lsn_t current_lsn)
2420 struct xfs_da_blkinfo *info = bp->b_addr;
2424 char *warnmsg = NULL;
2427 * We can only do post recovery validation on items on CRC enabled
2428 * fielsystems as we need to know when the buffer was written to be able
2429 * to determine if we should have replayed the item. If we replay old
2430 * metadata over a newer buffer, then it will enter a temporarily
2431 * inconsistent state resulting in verification failures. Hence for now
2432 * just avoid the verification stage for non-crc filesystems
2434 if (!xfs_sb_version_hascrc(&mp->m_sb))
2437 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2438 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2439 magicda = be16_to_cpu(info->magic);
2440 switch (xfs_blft_from_flags(buf_f)) {
2441 case XFS_BLFT_BTREE_BUF:
2443 case XFS_ABTB_CRC_MAGIC:
2444 case XFS_ABTC_CRC_MAGIC:
2445 case XFS_ABTB_MAGIC:
2446 case XFS_ABTC_MAGIC:
2447 bp->b_ops = &xfs_allocbt_buf_ops;
2449 case XFS_IBT_CRC_MAGIC:
2450 case XFS_FIBT_CRC_MAGIC:
2452 case XFS_FIBT_MAGIC:
2453 bp->b_ops = &xfs_inobt_buf_ops;
2455 case XFS_BMAP_CRC_MAGIC:
2456 case XFS_BMAP_MAGIC:
2457 bp->b_ops = &xfs_bmbt_buf_ops;
2459 case XFS_RMAP_CRC_MAGIC:
2460 bp->b_ops = &xfs_rmapbt_buf_ops;
2462 case XFS_REFC_CRC_MAGIC:
2463 bp->b_ops = &xfs_refcountbt_buf_ops;
2466 warnmsg = "Bad btree block magic!";
2470 case XFS_BLFT_AGF_BUF:
2471 if (magic32 != XFS_AGF_MAGIC) {
2472 warnmsg = "Bad AGF block magic!";
2475 bp->b_ops = &xfs_agf_buf_ops;
2477 case XFS_BLFT_AGFL_BUF:
2478 if (magic32 != XFS_AGFL_MAGIC) {
2479 warnmsg = "Bad AGFL block magic!";
2482 bp->b_ops = &xfs_agfl_buf_ops;
2484 case XFS_BLFT_AGI_BUF:
2485 if (magic32 != XFS_AGI_MAGIC) {
2486 warnmsg = "Bad AGI block magic!";
2489 bp->b_ops = &xfs_agi_buf_ops;
2491 case XFS_BLFT_UDQUOT_BUF:
2492 case XFS_BLFT_PDQUOT_BUF:
2493 case XFS_BLFT_GDQUOT_BUF:
2494 #ifdef CONFIG_XFS_QUOTA
2495 if (magic16 != XFS_DQUOT_MAGIC) {
2496 warnmsg = "Bad DQUOT block magic!";
2499 bp->b_ops = &xfs_dquot_buf_ops;
2502 "Trying to recover dquots without QUOTA support built in!");
2506 case XFS_BLFT_DINO_BUF:
2507 if (magic16 != XFS_DINODE_MAGIC) {
2508 warnmsg = "Bad INODE block magic!";
2511 bp->b_ops = &xfs_inode_buf_ops;
2513 case XFS_BLFT_SYMLINK_BUF:
2514 if (magic32 != XFS_SYMLINK_MAGIC) {
2515 warnmsg = "Bad symlink block magic!";
2518 bp->b_ops = &xfs_symlink_buf_ops;
2520 case XFS_BLFT_DIR_BLOCK_BUF:
2521 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2522 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2523 warnmsg = "Bad dir block magic!";
2526 bp->b_ops = &xfs_dir3_block_buf_ops;
2528 case XFS_BLFT_DIR_DATA_BUF:
2529 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2530 magic32 != XFS_DIR3_DATA_MAGIC) {
2531 warnmsg = "Bad dir data magic!";
2534 bp->b_ops = &xfs_dir3_data_buf_ops;
2536 case XFS_BLFT_DIR_FREE_BUF:
2537 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2538 magic32 != XFS_DIR3_FREE_MAGIC) {
2539 warnmsg = "Bad dir3 free magic!";
2542 bp->b_ops = &xfs_dir3_free_buf_ops;
2544 case XFS_BLFT_DIR_LEAF1_BUF:
2545 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2546 magicda != XFS_DIR3_LEAF1_MAGIC) {
2547 warnmsg = "Bad dir leaf1 magic!";
2550 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2552 case XFS_BLFT_DIR_LEAFN_BUF:
2553 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2554 magicda != XFS_DIR3_LEAFN_MAGIC) {
2555 warnmsg = "Bad dir leafn magic!";
2558 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2560 case XFS_BLFT_DA_NODE_BUF:
2561 if (magicda != XFS_DA_NODE_MAGIC &&
2562 magicda != XFS_DA3_NODE_MAGIC) {
2563 warnmsg = "Bad da node magic!";
2566 bp->b_ops = &xfs_da3_node_buf_ops;
2568 case XFS_BLFT_ATTR_LEAF_BUF:
2569 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2570 magicda != XFS_ATTR3_LEAF_MAGIC) {
2571 warnmsg = "Bad attr leaf magic!";
2574 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2576 case XFS_BLFT_ATTR_RMT_BUF:
2577 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2578 warnmsg = "Bad attr remote magic!";
2581 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2583 case XFS_BLFT_SB_BUF:
2584 if (magic32 != XFS_SB_MAGIC) {
2585 warnmsg = "Bad SB block magic!";
2588 bp->b_ops = &xfs_sb_buf_ops;
2590 #ifdef CONFIG_XFS_RT
2591 case XFS_BLFT_RTBITMAP_BUF:
2592 case XFS_BLFT_RTSUMMARY_BUF:
2593 /* no magic numbers for verification of RT buffers */
2594 bp->b_ops = &xfs_rtbuf_ops;
2596 #endif /* CONFIG_XFS_RT */
2598 xfs_warn(mp, "Unknown buffer type %d!",
2599 xfs_blft_from_flags(buf_f));
2604 * Nothing else to do in the case of a NULL current LSN as this means
2605 * the buffer is more recent than the change in the log and will be
2608 if (current_lsn == NULLCOMMITLSN)
2612 xfs_warn(mp, warnmsg);
2617 * We must update the metadata LSN of the buffer as it is written out to
2618 * ensure that older transactions never replay over this one and corrupt
2619 * the buffer. This can occur if log recovery is interrupted at some
2620 * point after the current transaction completes, at which point a
2621 * subsequent mount starts recovery from the beginning.
2623 * Write verifiers update the metadata LSN from log items attached to
2624 * the buffer. Therefore, initialize a bli purely to carry the LSN to
2625 * the verifier. We'll clean it up in our ->iodone() callback.
2628 struct xfs_buf_log_item *bip;
2630 ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
2631 bp->b_iodone = xlog_recover_iodone;
2632 xfs_buf_item_init(bp, mp);
2633 bip = bp->b_log_item;
2634 bip->bli_item.li_lsn = current_lsn;
2639 * Perform a 'normal' buffer recovery. Each logged region of the
2640 * buffer should be copied over the corresponding region in the
2641 * given buffer. The bitmap in the buf log format structure indicates
2642 * where to place the logged data.
2645 xlog_recover_do_reg_buffer(
2646 struct xfs_mount *mp,
2647 xlog_recover_item_t *item,
2649 xfs_buf_log_format_t *buf_f,
2650 xfs_lsn_t current_lsn)
2657 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2660 i = 1; /* 0 is the buf format structure */
2662 bit = xfs_next_bit(buf_f->blf_data_map,
2663 buf_f->blf_map_size, bit);
2666 nbits = xfs_contig_bits(buf_f->blf_data_map,
2667 buf_f->blf_map_size, bit);
2669 ASSERT(item->ri_buf[i].i_addr != NULL);
2670 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2671 ASSERT(BBTOB(bp->b_io_length) >=
2672 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2675 * The dirty regions logged in the buffer, even though
2676 * contiguous, may span multiple chunks. This is because the
2677 * dirty region may span a physical page boundary in a buffer
2678 * and hence be split into two separate vectors for writing into
2679 * the log. Hence we need to trim nbits back to the length of
2680 * the current region being copied out of the log.
2682 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2683 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2686 * Do a sanity check if this is a dquot buffer. Just checking
2687 * the first dquot in the buffer should do. XXXThis is
2688 * probably a good thing to do for other buf types also.
2691 if (buf_f->blf_flags &
2692 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2693 if (item->ri_buf[i].i_addr == NULL) {
2695 "XFS: NULL dquot in %s.", __func__);
2698 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2700 "XFS: dquot too small (%d) in %s.",
2701 item->ri_buf[i].i_len, __func__);
2704 fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
2708 "dquot corrupt at %pS trying to replay into block 0x%llx",
2714 memcpy(xfs_buf_offset(bp,
2715 (uint)bit << XFS_BLF_SHIFT), /* dest */
2716 item->ri_buf[i].i_addr, /* source */
2717 nbits<<XFS_BLF_SHIFT); /* length */
2723 /* Shouldn't be any more regions */
2724 ASSERT(i == item->ri_total);
2726 xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
2730 * Perform a dquot buffer recovery.
2731 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2732 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2733 * Else, treat it as a regular buffer and do recovery.
2735 * Return false if the buffer was tossed and true if we recovered the buffer to
2736 * indicate to the caller if the buffer needs writing.
2739 xlog_recover_do_dquot_buffer(
2740 struct xfs_mount *mp,
2742 struct xlog_recover_item *item,
2744 struct xfs_buf_log_format *buf_f)
2748 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2751 * Filesystems are required to send in quota flags at mount time.
2757 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2758 type |= XFS_DQ_USER;
2759 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2760 type |= XFS_DQ_PROJ;
2761 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2762 type |= XFS_DQ_GROUP;
2764 * This type of quotas was turned off, so ignore this buffer
2766 if (log->l_quotaoffs_flag & type)
2769 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
2774 * This routine replays a modification made to a buffer at runtime.
2775 * There are actually two types of buffer, regular and inode, which
2776 * are handled differently. Inode buffers are handled differently
2777 * in that we only recover a specific set of data from them, namely
2778 * the inode di_next_unlinked fields. This is because all other inode
2779 * data is actually logged via inode records and any data we replay
2780 * here which overlaps that may be stale.
2782 * When meta-data buffers are freed at run time we log a buffer item
2783 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2784 * of the buffer in the log should not be replayed at recovery time.
2785 * This is so that if the blocks covered by the buffer are reused for
2786 * file data before we crash we don't end up replaying old, freed
2787 * meta-data into a user's file.
2789 * To handle the cancellation of buffer log items, we make two passes
2790 * over the log during recovery. During the first we build a table of
2791 * those buffers which have been cancelled, and during the second we
2792 * only replay those buffers which do not have corresponding cancel
2793 * records in the table. See xlog_recover_buffer_pass[1,2] above
2794 * for more details on the implementation of the table of cancel records.
2797 xlog_recover_buffer_pass2(
2799 struct list_head *buffer_list,
2800 struct xlog_recover_item *item,
2801 xfs_lsn_t current_lsn)
2803 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2804 xfs_mount_t *mp = log->l_mp;
2811 * In this pass we only want to recover all the buffers which have
2812 * not been cancelled and are not cancellation buffers themselves.
2814 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2815 buf_f->blf_len, buf_f->blf_flags)) {
2816 trace_xfs_log_recover_buf_cancel(log, buf_f);
2820 trace_xfs_log_recover_buf_recover(log, buf_f);
2823 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2824 buf_flags |= XBF_UNMAPPED;
2826 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2830 error = bp->b_error;
2832 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2837 * Recover the buffer only if we get an LSN from it and it's less than
2838 * the lsn of the transaction we are replaying.
2840 * Note that we have to be extremely careful of readahead here.
2841 * Readahead does not attach verfiers to the buffers so if we don't
2842 * actually do any replay after readahead because of the LSN we found
2843 * in the buffer if more recent than that current transaction then we
2844 * need to attach the verifier directly. Failure to do so can lead to
2845 * future recovery actions (e.g. EFI and unlinked list recovery) can
2846 * operate on the buffers and they won't get the verifier attached. This
2847 * can lead to blocks on disk having the correct content but a stale
2850 * It is safe to assume these clean buffers are currently up to date.
2851 * If the buffer is dirtied by a later transaction being replayed, then
2852 * the verifier will be reset to match whatever recover turns that
2855 lsn = xlog_recover_get_buf_lsn(mp, bp);
2856 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2857 trace_xfs_log_recover_buf_skip(log, buf_f);
2858 xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
2862 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2863 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2866 } else if (buf_f->blf_flags &
2867 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2870 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2874 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
2878 * Perform delayed write on the buffer. Asynchronous writes will be
2879 * slower when taking into account all the buffers to be flushed.
2881 * Also make sure that only inode buffers with good sizes stay in
2882 * the buffer cache. The kernel moves inodes in buffers of 1 block
2883 * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode
2884 * buffers in the log can be a different size if the log was generated
2885 * by an older kernel using unclustered inode buffers or a newer kernel
2886 * running with a different inode cluster size. Regardless, if the
2887 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2888 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2889 * the buffer out of the buffer cache so that the buffer won't
2890 * overlap with future reads of those inodes.
2892 if (XFS_DINODE_MAGIC ==
2893 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2894 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2895 (uint32_t)log->l_mp->m_inode_cluster_size))) {
2897 error = xfs_bwrite(bp);
2899 ASSERT(bp->b_target->bt_mount == mp);
2900 bp->b_iodone = xlog_recover_iodone;
2901 xfs_buf_delwri_queue(bp, buffer_list);
2910 * Inode fork owner changes
2912 * If we have been told that we have to reparent the inode fork, it's because an
2913 * extent swap operation on a CRC enabled filesystem has been done and we are
2914 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2917 * The complexity here is that we don't have an inode context to work with, so
2918 * after we've replayed the inode we need to instantiate one. This is where the
2921 * We are in the middle of log recovery, so we can't run transactions. That
2922 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2923 * that will result in the corresponding iput() running the inode through
2924 * xfs_inactive(). If we've just replayed an inode core that changes the link
2925 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2926 * transactions (bad!).
2928 * So, to avoid this, we instantiate an inode directly from the inode core we've
2929 * just recovered. We have the buffer still locked, and all we really need to
2930 * instantiate is the inode core and the forks being modified. We can do this
2931 * manually, then run the inode btree owner change, and then tear down the
2932 * xfs_inode without having to run any transactions at all.
2934 * Also, because we don't have a transaction context available here but need to
2935 * gather all the buffers we modify for writeback so we pass the buffer_list
2936 * instead for the operation to use.
2940 xfs_recover_inode_owner_change(
2941 struct xfs_mount *mp,
2942 struct xfs_dinode *dip,
2943 struct xfs_inode_log_format *in_f,
2944 struct list_head *buffer_list)
2946 struct xfs_inode *ip;
2949 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2951 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2955 /* instantiate the inode */
2956 xfs_inode_from_disk(ip, dip);
2957 ASSERT(ip->i_d.di_version >= 3);
2959 error = xfs_iformat_fork(ip, dip);
2963 if (!xfs_inode_verify_forks(ip)) {
2964 error = -EFSCORRUPTED;
2968 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2969 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2970 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2971 ip->i_ino, buffer_list);
2976 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2977 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2978 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2979 ip->i_ino, buffer_list);
2990 xlog_recover_inode_pass2(
2992 struct list_head *buffer_list,
2993 struct xlog_recover_item *item,
2994 xfs_lsn_t current_lsn)
2996 struct xfs_inode_log_format *in_f;
2997 xfs_mount_t *mp = log->l_mp;
3006 struct xfs_log_dinode *ldip;
3010 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3011 in_f = item->ri_buf[0].i_addr;
3013 in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP);
3015 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
3021 * Inode buffers can be freed, look out for it,
3022 * and do not replay the inode.
3024 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
3025 in_f->ilf_len, 0)) {
3027 trace_xfs_log_recover_inode_cancel(log, in_f);
3030 trace_xfs_log_recover_inode_recover(log, in_f);
3032 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
3033 &xfs_inode_buf_ops);
3038 error = bp->b_error;
3040 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
3043 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
3044 dip = xfs_buf_offset(bp, in_f->ilf_boffset);
3047 * Make sure the place we're flushing out to really looks
3050 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
3052 "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
3053 __func__, dip, bp, in_f->ilf_ino);
3054 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
3055 XFS_ERRLEVEL_LOW, mp);
3056 error = -EFSCORRUPTED;
3059 ldip = item->ri_buf[1].i_addr;
3060 if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
3062 "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
3063 __func__, item, in_f->ilf_ino);
3064 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
3065 XFS_ERRLEVEL_LOW, mp);
3066 error = -EFSCORRUPTED;
3071 * If the inode has an LSN in it, recover the inode only if it's less
3072 * than the lsn of the transaction we are replaying. Note: we still
3073 * need to replay an owner change even though the inode is more recent
3074 * than the transaction as there is no guarantee that all the btree
3075 * blocks are more recent than this transaction, too.
3077 if (dip->di_version >= 3) {
3078 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
3080 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3081 trace_xfs_log_recover_inode_skip(log, in_f);
3083 goto out_owner_change;
3088 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
3089 * are transactional and if ordering is necessary we can determine that
3090 * more accurately by the LSN field in the V3 inode core. Don't trust
3091 * the inode versions we might be changing them here - use the
3092 * superblock flag to determine whether we need to look at di_flushiter
3093 * to skip replay when the on disk inode is newer than the log one
3095 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3096 ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3098 * Deal with the wrap case, DI_MAX_FLUSH is less
3099 * than smaller numbers
3101 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3102 ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3105 trace_xfs_log_recover_inode_skip(log, in_f);
3111 /* Take the opportunity to reset the flush iteration count */
3112 ldip->di_flushiter = 0;
3114 if (unlikely(S_ISREG(ldip->di_mode))) {
3115 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3116 (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3117 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3118 XFS_ERRLEVEL_LOW, mp, ldip);
3120 "%s: Bad regular inode log record, rec ptr "PTR_FMT", "
3121 "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3122 __func__, item, dip, bp, in_f->ilf_ino);
3123 error = -EFSCORRUPTED;
3126 } else if (unlikely(S_ISDIR(ldip->di_mode))) {
3127 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3128 (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3129 (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3130 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3131 XFS_ERRLEVEL_LOW, mp, ldip);
3133 "%s: Bad dir inode log record, rec ptr "PTR_FMT", "
3134 "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3135 __func__, item, dip, bp, in_f->ilf_ino);
3136 error = -EFSCORRUPTED;
3140 if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3141 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3142 XFS_ERRLEVEL_LOW, mp, ldip);
3144 "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3145 "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
3146 __func__, item, dip, bp, in_f->ilf_ino,
3147 ldip->di_nextents + ldip->di_anextents,
3149 error = -EFSCORRUPTED;
3152 if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3153 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3154 XFS_ERRLEVEL_LOW, mp, ldip);
3156 "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3157 "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
3158 item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3159 error = -EFSCORRUPTED;
3162 isize = xfs_log_dinode_size(ldip->di_version);
3163 if (unlikely(item->ri_buf[1].i_len > isize)) {
3164 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3165 XFS_ERRLEVEL_LOW, mp, ldip);
3167 "%s: Bad inode log record length %d, rec ptr "PTR_FMT,
3168 __func__, item->ri_buf[1].i_len, item);
3169 error = -EFSCORRUPTED;
3173 /* recover the log dinode inode into the on disk inode */
3174 xfs_log_dinode_to_disk(ldip, dip);
3176 fields = in_f->ilf_fields;
3177 if (fields & XFS_ILOG_DEV)
3178 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3180 if (in_f->ilf_size == 2)
3181 goto out_owner_change;
3182 len = item->ri_buf[2].i_len;
3183 src = item->ri_buf[2].i_addr;
3184 ASSERT(in_f->ilf_size <= 4);
3185 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3186 ASSERT(!(fields & XFS_ILOG_DFORK) ||
3187 (len == in_f->ilf_dsize));
3189 switch (fields & XFS_ILOG_DFORK) {
3190 case XFS_ILOG_DDATA:
3192 memcpy(XFS_DFORK_DPTR(dip), src, len);
3195 case XFS_ILOG_DBROOT:
3196 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3197 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3198 XFS_DFORK_DSIZE(dip, mp));
3203 * There are no data fork flags set.
3205 ASSERT((fields & XFS_ILOG_DFORK) == 0);
3210 * If we logged any attribute data, recover it. There may or
3211 * may not have been any other non-core data logged in this
3214 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3215 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3220 len = item->ri_buf[attr_index].i_len;
3221 src = item->ri_buf[attr_index].i_addr;
3222 ASSERT(len == in_f->ilf_asize);
3224 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3225 case XFS_ILOG_ADATA:
3227 dest = XFS_DFORK_APTR(dip);
3228 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3229 memcpy(dest, src, len);
3232 case XFS_ILOG_ABROOT:
3233 dest = XFS_DFORK_APTR(dip);
3234 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3235 len, (xfs_bmdr_block_t*)dest,
3236 XFS_DFORK_ASIZE(dip, mp));
3240 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3248 /* Recover the swapext owner change unless inode has been deleted */
3249 if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) &&
3250 (dip->di_mode != 0))
3251 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3253 /* re-generate the checksum. */
3254 xfs_dinode_calc_crc(log->l_mp, dip);
3256 ASSERT(bp->b_target->bt_mount == mp);
3257 bp->b_iodone = xlog_recover_iodone;
3258 xfs_buf_delwri_queue(bp, buffer_list);
3269 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3270 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3274 xlog_recover_quotaoff_pass1(
3276 struct xlog_recover_item *item)
3278 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
3282 * The logitem format's flag tells us if this was user quotaoff,
3283 * group/project quotaoff or both.
3285 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3286 log->l_quotaoffs_flag |= XFS_DQ_USER;
3287 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3288 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3289 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3290 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3296 * Recover a dquot record
3299 xlog_recover_dquot_pass2(
3301 struct list_head *buffer_list,
3302 struct xlog_recover_item *item,
3303 xfs_lsn_t current_lsn)
3305 xfs_mount_t *mp = log->l_mp;
3307 struct xfs_disk_dquot *ddq, *recddq;
3310 xfs_dq_logformat_t *dq_f;
3315 * Filesystems are required to send in quota flags at mount time.
3317 if (mp->m_qflags == 0)
3320 recddq = item->ri_buf[1].i_addr;
3321 if (recddq == NULL) {
3322 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3325 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3326 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3327 item->ri_buf[1].i_len, __func__);
3332 * This type of quotas was turned off, so ignore this record.
3334 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3336 if (log->l_quotaoffs_flag & type)
3340 * At this point we know that quota was _not_ turned off.
3341 * Since the mount flags are not indicating to us otherwise, this
3342 * must mean that quota is on, and the dquot needs to be replayed.
3343 * Remember that we may not have fully recovered the superblock yet,
3344 * so we can't do the usual trick of looking at the SB quota bits.
3346 * The other possibility, of course, is that the quota subsystem was
3347 * removed since the last mount - ENOSYS.
3349 dq_f = item->ri_buf[0].i_addr;
3351 fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0, 0);
3353 xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
3357 ASSERT(dq_f->qlf_len == 1);
3360 * At this point we are assuming that the dquots have been allocated
3361 * and hence the buffer has valid dquots stamped in it. It should,
3362 * therefore, pass verifier validation. If the dquot is bad, then the
3363 * we'll return an error here, so we don't need to specifically check
3364 * the dquot in the buffer after the verifier has run.
3366 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3367 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3368 &xfs_dquot_buf_ops);
3373 ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3376 * If the dquot has an LSN in it, recover the dquot only if it's less
3377 * than the lsn of the transaction we are replaying.
3379 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3380 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3381 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3383 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3388 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3389 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3390 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3394 ASSERT(dq_f->qlf_size == 2);
3395 ASSERT(bp->b_target->bt_mount == mp);
3396 bp->b_iodone = xlog_recover_iodone;
3397 xfs_buf_delwri_queue(bp, buffer_list);
3405 * This routine is called to create an in-core extent free intent
3406 * item from the efi format structure which was logged on disk.
3407 * It allocates an in-core efi, copies the extents from the format
3408 * structure into it, and adds the efi to the AIL with the given
3412 xlog_recover_efi_pass2(
3414 struct xlog_recover_item *item,
3418 struct xfs_mount *mp = log->l_mp;
3419 struct xfs_efi_log_item *efip;
3420 struct xfs_efi_log_format *efi_formatp;
3422 efi_formatp = item->ri_buf[0].i_addr;
3424 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3425 error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3427 xfs_efi_item_free(efip);
3430 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3432 spin_lock(&log->l_ailp->ail_lock);
3434 * The EFI has two references. One for the EFD and one for EFI to ensure
3435 * it makes it into the AIL. Insert the EFI into the AIL directly and
3436 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3439 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3440 xfs_efi_release(efip);
3446 * This routine is called when an EFD format structure is found in a committed
3447 * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3448 * was still in the log. To do this it searches the AIL for the EFI with an id
3449 * equal to that in the EFD format structure. If we find it we drop the EFD
3450 * reference, which removes the EFI from the AIL and frees it.
3453 xlog_recover_efd_pass2(
3455 struct xlog_recover_item *item)
3457 xfs_efd_log_format_t *efd_formatp;
3458 xfs_efi_log_item_t *efip = NULL;
3459 xfs_log_item_t *lip;
3461 struct xfs_ail_cursor cur;
3462 struct xfs_ail *ailp = log->l_ailp;
3464 efd_formatp = item->ri_buf[0].i_addr;
3465 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3466 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3467 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3468 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3469 efi_id = efd_formatp->efd_efi_id;
3472 * Search for the EFI with the id in the EFD format structure in the
3475 spin_lock(&ailp->ail_lock);
3476 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3477 while (lip != NULL) {
3478 if (lip->li_type == XFS_LI_EFI) {
3479 efip = (xfs_efi_log_item_t *)lip;
3480 if (efip->efi_format.efi_id == efi_id) {
3482 * Drop the EFD reference to the EFI. This
3483 * removes the EFI from the AIL and frees it.
3485 spin_unlock(&ailp->ail_lock);
3486 xfs_efi_release(efip);
3487 spin_lock(&ailp->ail_lock);
3491 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3494 xfs_trans_ail_cursor_done(&cur);
3495 spin_unlock(&ailp->ail_lock);
3501 * This routine is called to create an in-core extent rmap update
3502 * item from the rui format structure which was logged on disk.
3503 * It allocates an in-core rui, copies the extents from the format
3504 * structure into it, and adds the rui to the AIL with the given
3508 xlog_recover_rui_pass2(
3510 struct xlog_recover_item *item,
3514 struct xfs_mount *mp = log->l_mp;
3515 struct xfs_rui_log_item *ruip;
3516 struct xfs_rui_log_format *rui_formatp;
3518 rui_formatp = item->ri_buf[0].i_addr;
3520 ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
3521 error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
3523 xfs_rui_item_free(ruip);
3526 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
3528 spin_lock(&log->l_ailp->ail_lock);
3530 * The RUI has two references. One for the RUD and one for RUI to ensure
3531 * it makes it into the AIL. Insert the RUI into the AIL directly and
3532 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3535 xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
3536 xfs_rui_release(ruip);
3542 * This routine is called when an RUD format structure is found in a committed
3543 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3544 * was still in the log. To do this it searches the AIL for the RUI with an id
3545 * equal to that in the RUD format structure. If we find it we drop the RUD
3546 * reference, which removes the RUI from the AIL and frees it.
3549 xlog_recover_rud_pass2(
3551 struct xlog_recover_item *item)
3553 struct xfs_rud_log_format *rud_formatp;
3554 struct xfs_rui_log_item *ruip = NULL;
3555 struct xfs_log_item *lip;
3557 struct xfs_ail_cursor cur;
3558 struct xfs_ail *ailp = log->l_ailp;
3560 rud_formatp = item->ri_buf[0].i_addr;
3561 ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
3562 rui_id = rud_formatp->rud_rui_id;
3565 * Search for the RUI with the id in the RUD format structure in the
3568 spin_lock(&ailp->ail_lock);
3569 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3570 while (lip != NULL) {
3571 if (lip->li_type == XFS_LI_RUI) {
3572 ruip = (struct xfs_rui_log_item *)lip;
3573 if (ruip->rui_format.rui_id == rui_id) {
3575 * Drop the RUD reference to the RUI. This
3576 * removes the RUI from the AIL and frees it.
3578 spin_unlock(&ailp->ail_lock);
3579 xfs_rui_release(ruip);
3580 spin_lock(&ailp->ail_lock);
3584 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3587 xfs_trans_ail_cursor_done(&cur);
3588 spin_unlock(&ailp->ail_lock);
3594 * Copy an CUI format buffer from the given buf, and into the destination
3595 * CUI format structure. The CUI/CUD items were designed not to need any
3596 * special alignment handling.
3599 xfs_cui_copy_format(
3600 struct xfs_log_iovec *buf,
3601 struct xfs_cui_log_format *dst_cui_fmt)
3603 struct xfs_cui_log_format *src_cui_fmt;
3606 src_cui_fmt = buf->i_addr;
3607 len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
3609 if (buf->i_len == len) {
3610 memcpy(dst_cui_fmt, src_cui_fmt, len);
3613 return -EFSCORRUPTED;
3617 * This routine is called to create an in-core extent refcount update
3618 * item from the cui format structure which was logged on disk.
3619 * It allocates an in-core cui, copies the extents from the format
3620 * structure into it, and adds the cui to the AIL with the given
3624 xlog_recover_cui_pass2(
3626 struct xlog_recover_item *item,
3630 struct xfs_mount *mp = log->l_mp;
3631 struct xfs_cui_log_item *cuip;
3632 struct xfs_cui_log_format *cui_formatp;
3634 cui_formatp = item->ri_buf[0].i_addr;
3636 cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
3637 error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
3639 xfs_cui_item_free(cuip);
3642 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
3644 spin_lock(&log->l_ailp->ail_lock);
3646 * The CUI has two references. One for the CUD and one for CUI to ensure
3647 * it makes it into the AIL. Insert the CUI into the AIL directly and
3648 * drop the CUI reference. Note that xfs_trans_ail_update() drops the
3651 xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
3652 xfs_cui_release(cuip);
3658 * This routine is called when an CUD format structure is found in a committed
3659 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
3660 * was still in the log. To do this it searches the AIL for the CUI with an id
3661 * equal to that in the CUD format structure. If we find it we drop the CUD
3662 * reference, which removes the CUI from the AIL and frees it.
3665 xlog_recover_cud_pass2(
3667 struct xlog_recover_item *item)
3669 struct xfs_cud_log_format *cud_formatp;
3670 struct xfs_cui_log_item *cuip = NULL;
3671 struct xfs_log_item *lip;
3673 struct xfs_ail_cursor cur;
3674 struct xfs_ail *ailp = log->l_ailp;
3676 cud_formatp = item->ri_buf[0].i_addr;
3677 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
3678 return -EFSCORRUPTED;
3679 cui_id = cud_formatp->cud_cui_id;
3682 * Search for the CUI with the id in the CUD format structure in the
3685 spin_lock(&ailp->ail_lock);
3686 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3687 while (lip != NULL) {
3688 if (lip->li_type == XFS_LI_CUI) {
3689 cuip = (struct xfs_cui_log_item *)lip;
3690 if (cuip->cui_format.cui_id == cui_id) {
3692 * Drop the CUD reference to the CUI. This
3693 * removes the CUI from the AIL and frees it.
3695 spin_unlock(&ailp->ail_lock);
3696 xfs_cui_release(cuip);
3697 spin_lock(&ailp->ail_lock);
3701 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3704 xfs_trans_ail_cursor_done(&cur);
3705 spin_unlock(&ailp->ail_lock);
3711 * Copy an BUI format buffer from the given buf, and into the destination
3712 * BUI format structure. The BUI/BUD items were designed not to need any
3713 * special alignment handling.
3716 xfs_bui_copy_format(
3717 struct xfs_log_iovec *buf,
3718 struct xfs_bui_log_format *dst_bui_fmt)
3720 struct xfs_bui_log_format *src_bui_fmt;
3723 src_bui_fmt = buf->i_addr;
3724 len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
3726 if (buf->i_len == len) {
3727 memcpy(dst_bui_fmt, src_bui_fmt, len);
3730 return -EFSCORRUPTED;
3734 * This routine is called to create an in-core extent bmap update
3735 * item from the bui format structure which was logged on disk.
3736 * It allocates an in-core bui, copies the extents from the format
3737 * structure into it, and adds the bui to the AIL with the given
3741 xlog_recover_bui_pass2(
3743 struct xlog_recover_item *item,
3747 struct xfs_mount *mp = log->l_mp;
3748 struct xfs_bui_log_item *buip;
3749 struct xfs_bui_log_format *bui_formatp;
3751 bui_formatp = item->ri_buf[0].i_addr;
3753 if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
3754 return -EFSCORRUPTED;
3755 buip = xfs_bui_init(mp);
3756 error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
3758 xfs_bui_item_free(buip);
3761 atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
3763 spin_lock(&log->l_ailp->ail_lock);
3765 * The RUI has two references. One for the RUD and one for RUI to ensure
3766 * it makes it into the AIL. Insert the RUI into the AIL directly and
3767 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3770 xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
3771 xfs_bui_release(buip);
3777 * This routine is called when an BUD format structure is found in a committed
3778 * transaction in the log. Its purpose is to cancel the corresponding BUI if it
3779 * was still in the log. To do this it searches the AIL for the BUI with an id
3780 * equal to that in the BUD format structure. If we find it we drop the BUD
3781 * reference, which removes the BUI from the AIL and frees it.
3784 xlog_recover_bud_pass2(
3786 struct xlog_recover_item *item)
3788 struct xfs_bud_log_format *bud_formatp;
3789 struct xfs_bui_log_item *buip = NULL;
3790 struct xfs_log_item *lip;
3792 struct xfs_ail_cursor cur;
3793 struct xfs_ail *ailp = log->l_ailp;
3795 bud_formatp = item->ri_buf[0].i_addr;
3796 if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
3797 return -EFSCORRUPTED;
3798 bui_id = bud_formatp->bud_bui_id;
3801 * Search for the BUI with the id in the BUD format structure in the
3804 spin_lock(&ailp->ail_lock);
3805 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3806 while (lip != NULL) {
3807 if (lip->li_type == XFS_LI_BUI) {
3808 buip = (struct xfs_bui_log_item *)lip;
3809 if (buip->bui_format.bui_id == bui_id) {
3811 * Drop the BUD reference to the BUI. This
3812 * removes the BUI from the AIL and frees it.
3814 spin_unlock(&ailp->ail_lock);
3815 xfs_bui_release(buip);
3816 spin_lock(&ailp->ail_lock);
3820 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3823 xfs_trans_ail_cursor_done(&cur);
3824 spin_unlock(&ailp->ail_lock);
3830 * This routine is called when an inode create format structure is found in a
3831 * committed transaction in the log. It's purpose is to initialise the inodes
3832 * being allocated on disk. This requires us to get inode cluster buffers that
3833 * match the range to be initialised, stamped with inode templates and written
3834 * by delayed write so that subsequent modifications will hit the cached buffer
3835 * and only need writing out at the end of recovery.
3838 xlog_recover_do_icreate_pass2(
3840 struct list_head *buffer_list,
3841 xlog_recover_item_t *item)
3843 struct xfs_mount *mp = log->l_mp;
3844 struct xfs_icreate_log *icl;
3845 xfs_agnumber_t agno;
3846 xfs_agblock_t agbno;
3849 xfs_agblock_t length;
3850 int blks_per_cluster;
3856 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3857 if (icl->icl_type != XFS_LI_ICREATE) {
3858 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3862 if (icl->icl_size != 1) {
3863 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3867 agno = be32_to_cpu(icl->icl_ag);
3868 if (agno >= mp->m_sb.sb_agcount) {
3869 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3872 agbno = be32_to_cpu(icl->icl_agbno);
3873 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3874 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3877 isize = be32_to_cpu(icl->icl_isize);
3878 if (isize != mp->m_sb.sb_inodesize) {
3879 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3882 count = be32_to_cpu(icl->icl_count);
3884 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3887 length = be32_to_cpu(icl->icl_length);
3888 if (!length || length >= mp->m_sb.sb_agblocks) {
3889 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3894 * The inode chunk is either full or sparse and we only support
3895 * m_ialloc_min_blks sized sparse allocations at this time.
3897 if (length != mp->m_ialloc_blks &&
3898 length != mp->m_ialloc_min_blks) {
3900 "%s: unsupported chunk length", __FUNCTION__);
3904 /* verify inode count is consistent with extent length */
3905 if ((count >> mp->m_sb.sb_inopblog) != length) {
3907 "%s: inconsistent inode count and chunk length",
3913 * The icreate transaction can cover multiple cluster buffers and these
3914 * buffers could have been freed and reused. Check the individual
3915 * buffers for cancellation so we don't overwrite anything written after
3918 blks_per_cluster = xfs_icluster_size_fsb(mp);
3919 bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
3920 nbufs = length / blks_per_cluster;
3921 for (i = 0, cancel_count = 0; i < nbufs; i++) {
3924 daddr = XFS_AGB_TO_DADDR(mp, agno,
3925 agbno + i * blks_per_cluster);
3926 if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3931 * We currently only use icreate for a single allocation at a time. This
3932 * means we should expect either all or none of the buffers to be
3933 * cancelled. Be conservative and skip replay if at least one buffer is
3934 * cancelled, but warn the user that something is awry if the buffers
3935 * are not consistent.
3937 * XXX: This must be refined to only skip cancelled clusters once we use
3938 * icreate for multiple chunk allocations.
3940 ASSERT(!cancel_count || cancel_count == nbufs);
3942 if (cancel_count != nbufs)
3944 "WARNING: partial inode chunk cancellation, skipped icreate.");
3945 trace_xfs_log_recover_icreate_cancel(log, icl);
3949 trace_xfs_log_recover_icreate_recover(log, icl);
3950 return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3951 length, be32_to_cpu(icl->icl_gen));
3955 xlog_recover_buffer_ra_pass2(
3957 struct xlog_recover_item *item)
3959 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3960 struct xfs_mount *mp = log->l_mp;
3962 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3963 buf_f->blf_len, buf_f->blf_flags)) {
3967 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3968 buf_f->blf_len, NULL);
3972 xlog_recover_inode_ra_pass2(
3974 struct xlog_recover_item *item)
3976 struct xfs_inode_log_format ilf_buf;
3977 struct xfs_inode_log_format *ilfp;
3978 struct xfs_mount *mp = log->l_mp;
3981 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3982 ilfp = item->ri_buf[0].i_addr;
3985 memset(ilfp, 0, sizeof(*ilfp));
3986 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3991 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3994 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3995 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3999 xlog_recover_dquot_ra_pass2(
4001 struct xlog_recover_item *item)
4003 struct xfs_mount *mp = log->l_mp;
4004 struct xfs_disk_dquot *recddq;
4005 struct xfs_dq_logformat *dq_f;
4010 if (mp->m_qflags == 0)
4013 recddq = item->ri_buf[1].i_addr;
4016 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
4019 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
4021 if (log->l_quotaoffs_flag & type)
4024 dq_f = item->ri_buf[0].i_addr;
4026 ASSERT(dq_f->qlf_len == 1);
4028 len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
4029 if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
4032 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
4033 &xfs_dquot_buf_ra_ops);
4037 xlog_recover_ra_pass2(
4039 struct xlog_recover_item *item)
4041 switch (ITEM_TYPE(item)) {
4043 xlog_recover_buffer_ra_pass2(log, item);
4046 xlog_recover_inode_ra_pass2(log, item);
4049 xlog_recover_dquot_ra_pass2(log, item);
4053 case XFS_LI_QUOTAOFF:
4066 xlog_recover_commit_pass1(
4068 struct xlog_recover *trans,
4069 struct xlog_recover_item *item)
4071 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
4073 switch (ITEM_TYPE(item)) {
4075 return xlog_recover_buffer_pass1(log, item);
4076 case XFS_LI_QUOTAOFF:
4077 return xlog_recover_quotaoff_pass1(log, item);
4082 case XFS_LI_ICREATE:
4089 /* nothing to do in pass 1 */
4092 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4093 __func__, ITEM_TYPE(item));
4100 xlog_recover_commit_pass2(
4102 struct xlog_recover *trans,
4103 struct list_head *buffer_list,
4104 struct xlog_recover_item *item)
4106 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
4108 switch (ITEM_TYPE(item)) {
4110 return xlog_recover_buffer_pass2(log, buffer_list, item,
4113 return xlog_recover_inode_pass2(log, buffer_list, item,
4116 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
4118 return xlog_recover_efd_pass2(log, item);
4120 return xlog_recover_rui_pass2(log, item, trans->r_lsn);
4122 return xlog_recover_rud_pass2(log, item);
4124 return xlog_recover_cui_pass2(log, item, trans->r_lsn);
4126 return xlog_recover_cud_pass2(log, item);
4128 return xlog_recover_bui_pass2(log, item, trans->r_lsn);
4130 return xlog_recover_bud_pass2(log, item);
4132 return xlog_recover_dquot_pass2(log, buffer_list, item,
4134 case XFS_LI_ICREATE:
4135 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
4136 case XFS_LI_QUOTAOFF:
4137 /* nothing to do in pass2 */
4140 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4141 __func__, ITEM_TYPE(item));
4148 xlog_recover_items_pass2(
4150 struct xlog_recover *trans,
4151 struct list_head *buffer_list,
4152 struct list_head *item_list)
4154 struct xlog_recover_item *item;
4157 list_for_each_entry(item, item_list, ri_list) {
4158 error = xlog_recover_commit_pass2(log, trans,
4168 * Perform the transaction.
4170 * If the transaction modifies a buffer or inode, do it now. Otherwise,
4171 * EFIs and EFDs get queued up by adding entries into the AIL for them.
4174 xlog_recover_commit_trans(
4176 struct xlog_recover *trans,
4178 struct list_head *buffer_list)
4181 int items_queued = 0;
4182 struct xlog_recover_item *item;
4183 struct xlog_recover_item *next;
4184 LIST_HEAD (ra_list);
4185 LIST_HEAD (done_list);
4187 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4189 hlist_del_init(&trans->r_list);
4191 error = xlog_recover_reorder_trans(log, trans, pass);
4195 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
4197 case XLOG_RECOVER_PASS1:
4198 error = xlog_recover_commit_pass1(log, trans, item);
4200 case XLOG_RECOVER_PASS2:
4201 xlog_recover_ra_pass2(log, item);
4202 list_move_tail(&item->ri_list, &ra_list);
4204 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
4205 error = xlog_recover_items_pass2(log, trans,
4206 buffer_list, &ra_list);
4207 list_splice_tail_init(&ra_list, &done_list);
4221 if (!list_empty(&ra_list)) {
4223 error = xlog_recover_items_pass2(log, trans,
4224 buffer_list, &ra_list);
4225 list_splice_tail_init(&ra_list, &done_list);
4228 if (!list_empty(&done_list))
4229 list_splice_init(&done_list, &trans->r_itemq);
4235 xlog_recover_add_item(
4236 struct list_head *head)
4238 xlog_recover_item_t *item;
4240 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
4241 INIT_LIST_HEAD(&item->ri_list);
4242 list_add_tail(&item->ri_list, head);
4246 xlog_recover_add_to_cont_trans(
4248 struct xlog_recover *trans,
4252 xlog_recover_item_t *item;
4253 char *ptr, *old_ptr;
4257 * If the transaction is empty, the header was split across this and the
4258 * previous record. Copy the rest of the header.
4260 if (list_empty(&trans->r_itemq)) {
4261 ASSERT(len <= sizeof(struct xfs_trans_header));
4262 if (len > sizeof(struct xfs_trans_header)) {
4263 xfs_warn(log->l_mp, "%s: bad header length", __func__);
4267 xlog_recover_add_item(&trans->r_itemq);
4268 ptr = (char *)&trans->r_theader +
4269 sizeof(struct xfs_trans_header) - len;
4270 memcpy(ptr, dp, len);
4274 /* take the tail entry */
4275 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4277 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4278 old_len = item->ri_buf[item->ri_cnt-1].i_len;
4280 ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP);
4281 memcpy(&ptr[old_len], dp, len);
4282 item->ri_buf[item->ri_cnt-1].i_len += len;
4283 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
4284 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
4289 * The next region to add is the start of a new region. It could be
4290 * a whole region or it could be the first part of a new region. Because
4291 * of this, the assumption here is that the type and size fields of all
4292 * format structures fit into the first 32 bits of the structure.
4294 * This works because all regions must be 32 bit aligned. Therefore, we
4295 * either have both fields or we have neither field. In the case we have
4296 * neither field, the data part of the region is zero length. We only have
4297 * a log_op_header and can throw away the header since a new one will appear
4298 * later. If we have at least 4 bytes, then we can determine how many regions
4299 * will appear in the current log item.
4302 xlog_recover_add_to_trans(
4304 struct xlog_recover *trans,
4308 struct xfs_inode_log_format *in_f; /* any will do */
4309 xlog_recover_item_t *item;
4314 if (list_empty(&trans->r_itemq)) {
4315 /* we need to catch log corruptions here */
4316 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
4317 xfs_warn(log->l_mp, "%s: bad header magic number",
4323 if (len > sizeof(struct xfs_trans_header)) {
4324 xfs_warn(log->l_mp, "%s: bad header length", __func__);
4330 * The transaction header can be arbitrarily split across op
4331 * records. If we don't have the whole thing here, copy what we
4332 * do have and handle the rest in the next record.
4334 if (len == sizeof(struct xfs_trans_header))
4335 xlog_recover_add_item(&trans->r_itemq);
4336 memcpy(&trans->r_theader, dp, len);
4340 ptr = kmem_alloc(len, KM_SLEEP);
4341 memcpy(ptr, dp, len);
4342 in_f = (struct xfs_inode_log_format *)ptr;
4344 /* take the tail entry */
4345 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4346 if (item->ri_total != 0 &&
4347 item->ri_total == item->ri_cnt) {
4348 /* tail item is in use, get a new one */
4349 xlog_recover_add_item(&trans->r_itemq);
4350 item = list_entry(trans->r_itemq.prev,
4351 xlog_recover_item_t, ri_list);
4354 if (item->ri_total == 0) { /* first region to be added */
4355 if (in_f->ilf_size == 0 ||
4356 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
4358 "bad number of regions (%d) in inode log format",
4365 item->ri_total = in_f->ilf_size;
4367 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4370 ASSERT(item->ri_total > item->ri_cnt);
4371 /* Description region is ri_buf[0] */
4372 item->ri_buf[item->ri_cnt].i_addr = ptr;
4373 item->ri_buf[item->ri_cnt].i_len = len;
4375 trace_xfs_log_recover_item_add(log, trans, item, 0);
4380 * Free up any resources allocated by the transaction
4382 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4385 xlog_recover_free_trans(
4386 struct xlog_recover *trans)
4388 xlog_recover_item_t *item, *n;
4391 hlist_del_init(&trans->r_list);
4393 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
4394 /* Free the regions in the item. */
4395 list_del(&item->ri_list);
4396 for (i = 0; i < item->ri_cnt; i++)
4397 kmem_free(item->ri_buf[i].i_addr);
4398 /* Free the item itself */
4399 kmem_free(item->ri_buf);
4402 /* Free the transaction recover structure */
4407 * On error or completion, trans is freed.
4410 xlog_recovery_process_trans(
4412 struct xlog_recover *trans,
4417 struct list_head *buffer_list)
4420 bool freeit = false;
4422 /* mask off ophdr transaction container flags */
4423 flags &= ~XLOG_END_TRANS;
4424 if (flags & XLOG_WAS_CONT_TRANS)
4425 flags &= ~XLOG_CONTINUE_TRANS;
4428 * Callees must not free the trans structure. We'll decide if we need to
4429 * free it or not based on the operation being done and it's result.
4432 /* expected flag values */
4434 case XLOG_CONTINUE_TRANS:
4435 error = xlog_recover_add_to_trans(log, trans, dp, len);
4437 case XLOG_WAS_CONT_TRANS:
4438 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4440 case XLOG_COMMIT_TRANS:
4441 error = xlog_recover_commit_trans(log, trans, pass,
4443 /* success or fail, we are now done with this transaction. */
4447 /* unexpected flag values */
4448 case XLOG_UNMOUNT_TRANS:
4449 /* just skip trans */
4450 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4453 case XLOG_START_TRANS:
4455 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4460 if (error || freeit)
4461 xlog_recover_free_trans(trans);
4466 * Lookup the transaction recovery structure associated with the ID in the
4467 * current ophdr. If the transaction doesn't exist and the start flag is set in
4468 * the ophdr, then allocate a new transaction for future ID matches to find.
4469 * Either way, return what we found during the lookup - an existing transaction
4472 STATIC struct xlog_recover *
4473 xlog_recover_ophdr_to_trans(
4474 struct hlist_head rhash[],
4475 struct xlog_rec_header *rhead,
4476 struct xlog_op_header *ohead)
4478 struct xlog_recover *trans;
4480 struct hlist_head *rhp;
4482 tid = be32_to_cpu(ohead->oh_tid);
4483 rhp = &rhash[XLOG_RHASH(tid)];
4484 hlist_for_each_entry(trans, rhp, r_list) {
4485 if (trans->r_log_tid == tid)
4490 * skip over non-start transaction headers - we could be
4491 * processing slack space before the next transaction starts
4493 if (!(ohead->oh_flags & XLOG_START_TRANS))
4496 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4499 * This is a new transaction so allocate a new recovery container to
4500 * hold the recovery ops that will follow.
4502 trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
4503 trans->r_log_tid = tid;
4504 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4505 INIT_LIST_HEAD(&trans->r_itemq);
4506 INIT_HLIST_NODE(&trans->r_list);
4507 hlist_add_head(&trans->r_list, rhp);
4510 * Nothing more to do for this ophdr. Items to be added to this new
4511 * transaction will be in subsequent ophdr containers.
4517 xlog_recover_process_ophdr(
4519 struct hlist_head rhash[],
4520 struct xlog_rec_header *rhead,
4521 struct xlog_op_header *ohead,
4525 struct list_head *buffer_list)
4527 struct xlog_recover *trans;
4531 /* Do we understand who wrote this op? */
4532 if (ohead->oh_clientid != XFS_TRANSACTION &&
4533 ohead->oh_clientid != XFS_LOG) {
4534 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4535 __func__, ohead->oh_clientid);
4541 * Check the ophdr contains all the data it is supposed to contain.
4543 len = be32_to_cpu(ohead->oh_len);
4544 if (dp + len > end) {
4545 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4550 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4552 /* nothing to do, so skip over this ophdr */
4557 * The recovered buffer queue is drained only once we know that all
4558 * recovery items for the current LSN have been processed. This is
4561 * - Buffer write submission updates the metadata LSN of the buffer.
4562 * - Log recovery skips items with a metadata LSN >= the current LSN of
4563 * the recovery item.
4564 * - Separate recovery items against the same metadata buffer can share
4565 * a current LSN. I.e., consider that the LSN of a recovery item is
4566 * defined as the starting LSN of the first record in which its
4567 * transaction appears, that a record can hold multiple transactions,
4568 * and/or that a transaction can span multiple records.
4570 * In other words, we are allowed to submit a buffer from log recovery
4571 * once per current LSN. Otherwise, we may incorrectly skip recovery
4572 * items and cause corruption.
4574 * We don't know up front whether buffers are updated multiple times per
4575 * LSN. Therefore, track the current LSN of each commit log record as it
4576 * is processed and drain the queue when it changes. Use commit records
4577 * because they are ordered correctly by the logging code.
4579 if (log->l_recovery_lsn != trans->r_lsn &&
4580 ohead->oh_flags & XLOG_COMMIT_TRANS) {
4581 error = xfs_buf_delwri_submit(buffer_list);
4584 log->l_recovery_lsn = trans->r_lsn;
4587 return xlog_recovery_process_trans(log, trans, dp, len,
4588 ohead->oh_flags, pass, buffer_list);
4592 * There are two valid states of the r_state field. 0 indicates that the
4593 * transaction structure is in a normal state. We have either seen the
4594 * start of the transaction or the last operation we added was not a partial
4595 * operation. If the last operation we added to the transaction was a
4596 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4598 * NOTE: skip LRs with 0 data length.
4601 xlog_recover_process_data(
4603 struct hlist_head rhash[],
4604 struct xlog_rec_header *rhead,
4607 struct list_head *buffer_list)
4609 struct xlog_op_header *ohead;
4614 end = dp + be32_to_cpu(rhead->h_len);
4615 num_logops = be32_to_cpu(rhead->h_num_logops);
4617 /* check the log format matches our own - else we can't recover */
4618 if (xlog_header_check_recover(log->l_mp, rhead))
4621 trace_xfs_log_recover_record(log, rhead, pass);
4622 while ((dp < end) && num_logops) {
4624 ohead = (struct xlog_op_header *)dp;
4625 dp += sizeof(*ohead);
4628 /* errors will abort recovery */
4629 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4630 dp, end, pass, buffer_list);
4634 dp += be32_to_cpu(ohead->oh_len);
4640 /* Recover the EFI if necessary. */
4642 xlog_recover_process_efi(
4643 struct xfs_mount *mp,
4644 struct xfs_ail *ailp,
4645 struct xfs_log_item *lip)
4647 struct xfs_efi_log_item *efip;
4651 * Skip EFIs that we've already processed.
4653 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4654 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
4657 spin_unlock(&ailp->ail_lock);
4658 error = xfs_efi_recover(mp, efip);
4659 spin_lock(&ailp->ail_lock);
4664 /* Release the EFI since we're cancelling everything. */
4666 xlog_recover_cancel_efi(
4667 struct xfs_mount *mp,
4668 struct xfs_ail *ailp,
4669 struct xfs_log_item *lip)
4671 struct xfs_efi_log_item *efip;
4673 efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4675 spin_unlock(&ailp->ail_lock);
4676 xfs_efi_release(efip);
4677 spin_lock(&ailp->ail_lock);
4680 /* Recover the RUI if necessary. */
4682 xlog_recover_process_rui(
4683 struct xfs_mount *mp,
4684 struct xfs_ail *ailp,
4685 struct xfs_log_item *lip)
4687 struct xfs_rui_log_item *ruip;
4691 * Skip RUIs that we've already processed.
4693 ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4694 if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
4697 spin_unlock(&ailp->ail_lock);
4698 error = xfs_rui_recover(mp, ruip);
4699 spin_lock(&ailp->ail_lock);
4704 /* Release the RUI since we're cancelling everything. */
4706 xlog_recover_cancel_rui(
4707 struct xfs_mount *mp,
4708 struct xfs_ail *ailp,
4709 struct xfs_log_item *lip)
4711 struct xfs_rui_log_item *ruip;
4713 ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4715 spin_unlock(&ailp->ail_lock);
4716 xfs_rui_release(ruip);
4717 spin_lock(&ailp->ail_lock);
4720 /* Recover the CUI if necessary. */
4722 xlog_recover_process_cui(
4723 struct xfs_mount *mp,
4724 struct xfs_ail *ailp,
4725 struct xfs_log_item *lip,
4726 struct xfs_defer_ops *dfops)
4728 struct xfs_cui_log_item *cuip;
4732 * Skip CUIs that we've already processed.
4734 cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4735 if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
4738 spin_unlock(&ailp->ail_lock);
4739 error = xfs_cui_recover(mp, cuip, dfops);
4740 spin_lock(&ailp->ail_lock);
4745 /* Release the CUI since we're cancelling everything. */
4747 xlog_recover_cancel_cui(
4748 struct xfs_mount *mp,
4749 struct xfs_ail *ailp,
4750 struct xfs_log_item *lip)
4752 struct xfs_cui_log_item *cuip;
4754 cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4756 spin_unlock(&ailp->ail_lock);
4757 xfs_cui_release(cuip);
4758 spin_lock(&ailp->ail_lock);
4761 /* Recover the BUI if necessary. */
4763 xlog_recover_process_bui(
4764 struct xfs_mount *mp,
4765 struct xfs_ail *ailp,
4766 struct xfs_log_item *lip,
4767 struct xfs_defer_ops *dfops)
4769 struct xfs_bui_log_item *buip;
4773 * Skip BUIs that we've already processed.
4775 buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4776 if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
4779 spin_unlock(&ailp->ail_lock);
4780 error = xfs_bui_recover(mp, buip, dfops);
4781 spin_lock(&ailp->ail_lock);
4786 /* Release the BUI since we're cancelling everything. */
4788 xlog_recover_cancel_bui(
4789 struct xfs_mount *mp,
4790 struct xfs_ail *ailp,
4791 struct xfs_log_item *lip)
4793 struct xfs_bui_log_item *buip;
4795 buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4797 spin_unlock(&ailp->ail_lock);
4798 xfs_bui_release(buip);
4799 spin_lock(&ailp->ail_lock);
4802 /* Is this log item a deferred action intent? */
4803 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4805 switch (lip->li_type) {
4816 /* Take all the collected deferred ops and finish them in order. */
4818 xlog_finish_defer_ops(
4819 struct xfs_mount *mp,
4820 struct xfs_defer_ops *dfops)
4822 struct xfs_trans *tp;
4828 * We're finishing the defer_ops that accumulated as a result of
4829 * recovering unfinished intent items during log recovery. We
4830 * reserve an itruncate transaction because it is the largest
4831 * permanent transaction type. Since we're the only user of the fs
4832 * right now, take 93% (15/16) of the available free blocks. Use
4833 * weird math to avoid a 64-bit division.
4835 freeblks = percpu_counter_sum(&mp->m_fdblocks);
4838 resblks = min_t(int64_t, UINT_MAX, freeblks);
4839 resblks = (resblks * 15) >> 4;
4840 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
4841 0, XFS_TRANS_RESERVE, &tp);
4845 error = xfs_defer_finish(&tp, dfops);
4849 return xfs_trans_commit(tp);
4852 xfs_trans_cancel(tp);
4857 * When this is called, all of the log intent items which did not have
4858 * corresponding log done items should be in the AIL. What we do now
4859 * is update the data structures associated with each one.
4861 * Since we process the log intent items in normal transactions, they
4862 * will be removed at some point after the commit. This prevents us
4863 * from just walking down the list processing each one. We'll use a
4864 * flag in the intent item to skip those that we've already processed
4865 * and use the AIL iteration mechanism's generation count to try to
4866 * speed this up at least a bit.
4868 * When we start, we know that the intents are the only things in the
4869 * AIL. As we process them, however, other items are added to the
4873 xlog_recover_process_intents(
4876 struct xfs_defer_ops dfops;
4877 struct xfs_ail_cursor cur;
4878 struct xfs_log_item *lip;
4879 struct xfs_ail *ailp;
4880 xfs_fsblock_t firstfsb;
4882 #if defined(DEBUG) || defined(XFS_WARN)
4887 spin_lock(&ailp->ail_lock);
4888 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4889 #if defined(DEBUG) || defined(XFS_WARN)
4890 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4892 xfs_defer_init(&dfops, &firstfsb);
4893 while (lip != NULL) {
4895 * We're done when we see something other than an intent.
4896 * There should be no intents left in the AIL now.
4898 if (!xlog_item_is_intent(lip)) {
4900 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4901 ASSERT(!xlog_item_is_intent(lip));
4907 * We should never see a redo item with a LSN higher than
4908 * the last transaction we found in the log at the start
4911 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4914 * NOTE: If your intent processing routine can create more
4915 * deferred ops, you /must/ attach them to the dfops in this
4916 * routine or else those subsequent intents will get
4917 * replayed in the wrong order!
4919 switch (lip->li_type) {
4921 error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4924 error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4927 error = xlog_recover_process_cui(log->l_mp, ailp, lip,
4931 error = xlog_recover_process_bui(log->l_mp, ailp, lip,
4937 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4940 xfs_trans_ail_cursor_done(&cur);
4941 spin_unlock(&ailp->ail_lock);
4943 xfs_defer_cancel(&dfops);
4945 error = xlog_finish_defer_ops(log->l_mp, &dfops);
4951 * A cancel occurs when the mount has failed and we're bailing out.
4952 * Release all pending log intent items so they don't pin the AIL.
4955 xlog_recover_cancel_intents(
4958 struct xfs_log_item *lip;
4960 struct xfs_ail_cursor cur;
4961 struct xfs_ail *ailp;
4964 spin_lock(&ailp->ail_lock);
4965 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4966 while (lip != NULL) {
4968 * We're done when we see something other than an intent.
4969 * There should be no intents left in the AIL now.
4971 if (!xlog_item_is_intent(lip)) {
4973 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4974 ASSERT(!xlog_item_is_intent(lip));
4979 switch (lip->li_type) {
4981 xlog_recover_cancel_efi(log->l_mp, ailp, lip);
4984 xlog_recover_cancel_rui(log->l_mp, ailp, lip);
4987 xlog_recover_cancel_cui(log->l_mp, ailp, lip);
4990 xlog_recover_cancel_bui(log->l_mp, ailp, lip);
4994 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4997 xfs_trans_ail_cursor_done(&cur);
4998 spin_unlock(&ailp->ail_lock);
5003 * This routine performs a transaction to null out a bad inode pointer
5004 * in an agi unlinked inode hash bucket.
5007 xlog_recover_clear_agi_bucket(
5009 xfs_agnumber_t agno,
5018 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
5022 error = xfs_read_agi(mp, tp, agno, &agibp);
5026 agi = XFS_BUF_TO_AGI(agibp);
5027 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
5028 offset = offsetof(xfs_agi_t, agi_unlinked) +
5029 (sizeof(xfs_agino_t) * bucket);
5030 xfs_trans_log_buf(tp, agibp, offset,
5031 (offset + sizeof(xfs_agino_t) - 1));
5033 error = xfs_trans_commit(tp);
5039 xfs_trans_cancel(tp);
5041 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
5046 xlog_recover_process_one_iunlink(
5047 struct xfs_mount *mp,
5048 xfs_agnumber_t agno,
5052 struct xfs_buf *ibp;
5053 struct xfs_dinode *dip;
5054 struct xfs_inode *ip;
5058 ino = XFS_AGINO_TO_INO(mp, agno, agino);
5059 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
5064 * Get the on disk inode to find the next inode in the bucket.
5066 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
5070 xfs_iflags_clear(ip, XFS_IRECOVERY);
5071 ASSERT(VFS_I(ip)->i_nlink == 0);
5072 ASSERT(VFS_I(ip)->i_mode != 0);
5074 /* setup for the next pass */
5075 agino = be32_to_cpu(dip->di_next_unlinked);
5079 * Prevent any DMAPI event from being sent when the reference on
5080 * the inode is dropped.
5082 ip->i_d.di_dmevmask = 0;
5091 * We can't read in the inode this bucket points to, or this inode
5092 * is messed up. Just ditch this bucket of inodes. We will lose
5093 * some inodes and space, but at least we won't hang.
5095 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
5096 * clear the inode pointer in the bucket.
5098 xlog_recover_clear_agi_bucket(mp, agno, bucket);
5103 * xlog_iunlink_recover
5105 * This is called during recovery to process any inodes which
5106 * we unlinked but not freed when the system crashed. These
5107 * inodes will be on the lists in the AGI blocks. What we do
5108 * here is scan all the AGIs and fully truncate and free any
5109 * inodes found on the lists. Each inode is removed from the
5110 * lists when it has been fully truncated and is freed. The
5111 * freeing of the inode and its removal from the list must be
5115 xlog_recover_process_iunlinks(
5119 xfs_agnumber_t agno;
5128 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5130 * Find the agi for this ag.
5132 error = xfs_read_agi(mp, NULL, agno, &agibp);
5135 * AGI is b0rked. Don't process it.
5137 * We should probably mark the filesystem as corrupt
5138 * after we've recovered all the ag's we can....
5143 * Unlock the buffer so that it can be acquired in the normal
5144 * course of the transaction to truncate and free each inode.
5145 * Because we are not racing with anyone else here for the AGI
5146 * buffer, we don't even need to hold it locked to read the
5147 * initial unlinked bucket entries out of the buffer. We keep
5148 * buffer reference though, so that it stays pinned in memory
5149 * while we need the buffer.
5151 agi = XFS_BUF_TO_AGI(agibp);
5152 xfs_buf_unlock(agibp);
5154 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
5155 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
5156 while (agino != NULLAGINO) {
5157 agino = xlog_recover_process_one_iunlink(mp,
5158 agno, agino, bucket);
5161 xfs_buf_rele(agibp);
5167 struct xlog_rec_header *rhead,
5173 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
5174 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
5175 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
5179 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5180 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
5181 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
5182 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5183 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5184 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
5193 * CRC check, unpack and process a log record.
5196 xlog_recover_process(
5198 struct hlist_head rhash[],
5199 struct xlog_rec_header *rhead,
5202 struct list_head *buffer_list)
5205 __le32 old_crc = rhead->h_crc;
5209 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
5212 * Nothing else to do if this is a CRC verification pass. Just return
5213 * if this a record with a non-zero crc. Unfortunately, mkfs always
5214 * sets old_crc to 0 so we must consider this valid even on v5 supers.
5215 * Otherwise, return EFSBADCRC on failure so the callers up the stack
5216 * know precisely what failed.
5218 if (pass == XLOG_RECOVER_CRCPASS) {
5219 if (old_crc && crc != old_crc)
5225 * We're in the normal recovery path. Issue a warning if and only if the
5226 * CRC in the header is non-zero. This is an advisory warning and the
5227 * zero CRC check prevents warnings from being emitted when upgrading
5228 * the kernel from one that does not add CRCs by default.
5230 if (crc != old_crc) {
5231 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
5232 xfs_alert(log->l_mp,
5233 "log record CRC mismatch: found 0x%x, expected 0x%x.",
5234 le32_to_cpu(old_crc),
5236 xfs_hex_dump(dp, 32);
5240 * If the filesystem is CRC enabled, this mismatch becomes a
5241 * fatal log corruption failure.
5243 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
5244 return -EFSCORRUPTED;
5247 error = xlog_unpack_data(rhead, dp, log);
5251 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
5256 xlog_valid_rec_header(
5258 struct xlog_rec_header *rhead,
5263 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
5264 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
5265 XFS_ERRLEVEL_LOW, log->l_mp);
5266 return -EFSCORRUPTED;
5269 (!rhead->h_version ||
5270 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
5271 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
5272 __func__, be32_to_cpu(rhead->h_version));
5276 /* LR body must have data or it wouldn't have been written */
5277 hlen = be32_to_cpu(rhead->h_len);
5278 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
5279 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
5280 XFS_ERRLEVEL_LOW, log->l_mp);
5281 return -EFSCORRUPTED;
5283 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
5284 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
5285 XFS_ERRLEVEL_LOW, log->l_mp);
5286 return -EFSCORRUPTED;
5292 * Read the log from tail to head and process the log records found.
5293 * Handle the two cases where the tail and head are in the same cycle
5294 * and where the active portion of the log wraps around the end of
5295 * the physical log separately. The pass parameter is passed through
5296 * to the routines called to process the data and is not looked at
5300 xlog_do_recovery_pass(
5302 xfs_daddr_t head_blk,
5303 xfs_daddr_t tail_blk,
5305 xfs_daddr_t *first_bad) /* out: first bad log rec */
5307 xlog_rec_header_t *rhead;
5308 xfs_daddr_t blk_no, rblk_no;
5309 xfs_daddr_t rhead_blk;
5311 xfs_buf_t *hbp, *dbp;
5312 int error = 0, h_size, h_len;
5314 int bblks, split_bblks;
5315 int hblks, split_hblks, wrapped_hblks;
5317 struct hlist_head rhash[XLOG_RHASH_SIZE];
5318 LIST_HEAD (buffer_list);
5320 ASSERT(head_blk != tail_blk);
5321 blk_no = rhead_blk = tail_blk;
5323 for (i = 0; i < XLOG_RHASH_SIZE; i++)
5324 INIT_HLIST_HEAD(&rhash[i]);
5327 * Read the header of the tail block and get the iclog buffer size from
5328 * h_size. Use this to tell how many sectors make up the log header.
5330 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5332 * When using variable length iclogs, read first sector of
5333 * iclog header and extract the header size from it. Get a
5334 * new hbp that is the correct size.
5336 hbp = xlog_get_bp(log, 1);
5340 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
5344 rhead = (xlog_rec_header_t *)offset;
5345 error = xlog_valid_rec_header(log, rhead, tail_blk);
5350 * xfsprogs has a bug where record length is based on lsunit but
5351 * h_size (iclog size) is hardcoded to 32k. Now that we
5352 * unconditionally CRC verify the unmount record, this means the
5353 * log buffer can be too small for the record and cause an
5356 * Detect this condition here. Use lsunit for the buffer size as
5357 * long as this looks like the mkfs case. Otherwise, return an
5358 * error to avoid a buffer overrun.
5360 h_size = be32_to_cpu(rhead->h_size);
5361 h_len = be32_to_cpu(rhead->h_len);
5362 if (h_len > h_size) {
5363 if (h_len <= log->l_mp->m_logbsize &&
5364 be32_to_cpu(rhead->h_num_logops) == 1) {
5366 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
5367 h_size, log->l_mp->m_logbsize);
5368 h_size = log->l_mp->m_logbsize;
5370 return -EFSCORRUPTED;
5373 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
5374 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
5375 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
5376 if (h_size % XLOG_HEADER_CYCLE_SIZE)
5379 hbp = xlog_get_bp(log, hblks);
5384 ASSERT(log->l_sectBBsize == 1);
5386 hbp = xlog_get_bp(log, 1);
5387 h_size = XLOG_BIG_RECORD_BSIZE;
5392 dbp = xlog_get_bp(log, BTOBB(h_size));
5398 memset(rhash, 0, sizeof(rhash));
5399 if (tail_blk > head_blk) {
5401 * Perform recovery around the end of the physical log.
5402 * When the head is not on the same cycle number as the tail,
5403 * we can't do a sequential recovery.
5405 while (blk_no < log->l_logBBsize) {
5407 * Check for header wrapping around physical end-of-log
5409 offset = hbp->b_addr;
5412 if (blk_no + hblks <= log->l_logBBsize) {
5413 /* Read header in one read */
5414 error = xlog_bread(log, blk_no, hblks, hbp,
5419 /* This LR is split across physical log end */
5420 if (blk_no != log->l_logBBsize) {
5421 /* some data before physical log end */
5422 ASSERT(blk_no <= INT_MAX);
5423 split_hblks = log->l_logBBsize - (int)blk_no;
5424 ASSERT(split_hblks > 0);
5425 error = xlog_bread(log, blk_no,
5433 * Note: this black magic still works with
5434 * large sector sizes (non-512) only because:
5435 * - we increased the buffer size originally
5436 * by 1 sector giving us enough extra space
5437 * for the second read;
5438 * - the log start is guaranteed to be sector
5440 * - we read the log end (LR header start)
5441 * _first_, then the log start (LR header end)
5442 * - order is important.
5444 wrapped_hblks = hblks - split_hblks;
5445 error = xlog_bread_offset(log, 0,
5447 offset + BBTOB(split_hblks));
5451 rhead = (xlog_rec_header_t *)offset;
5452 error = xlog_valid_rec_header(log, rhead,
5453 split_hblks ? blk_no : 0);
5457 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5461 * Read the log record data in multiple reads if it
5462 * wraps around the end of the log. Note that if the
5463 * header already wrapped, blk_no could point past the
5464 * end of the log. The record data is contiguous in
5467 if (blk_no + bblks <= log->l_logBBsize ||
5468 blk_no >= log->l_logBBsize) {
5469 /* mod blk_no in case the header wrapped and
5470 * pushed it beyond the end of the log */
5471 rblk_no = do_mod(blk_no, log->l_logBBsize);
5472 error = xlog_bread(log, rblk_no, bblks, dbp,
5477 /* This log record is split across the
5478 * physical end of log */
5479 offset = dbp->b_addr;
5481 if (blk_no != log->l_logBBsize) {
5482 /* some data is before the physical
5484 ASSERT(!wrapped_hblks);
5485 ASSERT(blk_no <= INT_MAX);
5487 log->l_logBBsize - (int)blk_no;
5488 ASSERT(split_bblks > 0);
5489 error = xlog_bread(log, blk_no,
5497 * Note: this black magic still works with
5498 * large sector sizes (non-512) only because:
5499 * - we increased the buffer size originally
5500 * by 1 sector giving us enough extra space
5501 * for the second read;
5502 * - the log start is guaranteed to be sector
5504 * - we read the log end (LR header start)
5505 * _first_, then the log start (LR header end)
5506 * - order is important.
5508 error = xlog_bread_offset(log, 0,
5509 bblks - split_bblks, dbp,
5510 offset + BBTOB(split_bblks));
5515 error = xlog_recover_process(log, rhash, rhead, offset,
5516 pass, &buffer_list);
5524 ASSERT(blk_no >= log->l_logBBsize);
5525 blk_no -= log->l_logBBsize;
5529 /* read first part of physical log */
5530 while (blk_no < head_blk) {
5531 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
5535 rhead = (xlog_rec_header_t *)offset;
5536 error = xlog_valid_rec_header(log, rhead, blk_no);
5540 /* blocks in data section */
5541 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5542 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
5547 error = xlog_recover_process(log, rhash, rhead, offset, pass,
5552 blk_no += bblks + hblks;
5562 * Submit buffers that have been added from the last record processed,
5563 * regardless of error status.
5565 if (!list_empty(&buffer_list))
5566 error2 = xfs_buf_delwri_submit(&buffer_list);
5568 if (error && first_bad)
5569 *first_bad = rhead_blk;
5572 * Transactions are freed at commit time but transactions without commit
5573 * records on disk are never committed. Free any that may be left in the
5576 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
5577 struct hlist_node *tmp;
5578 struct xlog_recover *trans;
5580 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
5581 xlog_recover_free_trans(trans);
5584 return error ? error : error2;
5588 * Do the recovery of the log. We actually do this in two phases.
5589 * The two passes are necessary in order to implement the function
5590 * of cancelling a record written into the log. The first pass
5591 * determines those things which have been cancelled, and the
5592 * second pass replays log items normally except for those which
5593 * have been cancelled. The handling of the replay and cancellations
5594 * takes place in the log item type specific routines.
5596 * The table of items which have cancel records in the log is allocated
5597 * and freed at this level, since only here do we know when all of
5598 * the log recovery has been completed.
5601 xlog_do_log_recovery(
5603 xfs_daddr_t head_blk,
5604 xfs_daddr_t tail_blk)
5608 ASSERT(head_blk != tail_blk);
5611 * First do a pass to find all of the cancelled buf log items.
5612 * Store them in the buf_cancel_table for use in the second pass.
5614 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5615 sizeof(struct list_head),
5617 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5618 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5620 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5621 XLOG_RECOVER_PASS1, NULL);
5623 kmem_free(log->l_buf_cancel_table);
5624 log->l_buf_cancel_table = NULL;
5628 * Then do a second pass to actually recover the items in the log.
5629 * When it is complete free the table of buf cancel items.
5631 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5632 XLOG_RECOVER_PASS2, NULL);
5637 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5638 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
5642 kmem_free(log->l_buf_cancel_table);
5643 log->l_buf_cancel_table = NULL;
5649 * Do the actual recovery
5654 xfs_daddr_t head_blk,
5655 xfs_daddr_t tail_blk)
5657 struct xfs_mount *mp = log->l_mp;
5662 trace_xfs_log_recover(log, head_blk, tail_blk);
5665 * First replay the images in the log.
5667 error = xlog_do_log_recovery(log, head_blk, tail_blk);
5672 * If IO errors happened during recovery, bail out.
5674 if (XFS_FORCED_SHUTDOWN(mp)) {
5679 * We now update the tail_lsn since much of the recovery has completed
5680 * and there may be space available to use. If there were no extent
5681 * or iunlinks, we can free up the entire log and set the tail_lsn to
5682 * be the last_sync_lsn. This was set in xlog_find_tail to be the
5683 * lsn of the last known good LR on disk. If there are extent frees
5684 * or iunlinks they will have some entries in the AIL; so we look at
5685 * the AIL to determine how to set the tail_lsn.
5687 xlog_assign_tail_lsn(mp);
5690 * Now that we've finished replaying all buffer and inode
5691 * updates, re-read in the superblock and reverify it.
5693 bp = xfs_getsb(mp, 0);
5694 bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5695 ASSERT(!(bp->b_flags & XBF_WRITE));
5696 bp->b_flags |= XBF_READ;
5697 bp->b_ops = &xfs_sb_buf_ops;
5699 error = xfs_buf_submit_wait(bp);
5701 if (!XFS_FORCED_SHUTDOWN(mp)) {
5702 xfs_buf_ioerror_alert(bp, __func__);
5709 /* Convert superblock from on-disk format */
5711 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5714 /* re-initialise in-core superblock and geometry structures */
5715 xfs_reinit_percpu_counters(mp);
5716 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5718 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5721 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
5723 xlog_recover_check_summary(log);
5725 /* Normal transactions can now occur */
5726 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5731 * Perform recovery and re-initialize some log variables in xlog_find_tail.
5733 * Return error or zero.
5739 xfs_daddr_t head_blk, tail_blk;
5742 /* find the tail of the log */
5743 error = xlog_find_tail(log, &head_blk, &tail_blk);
5748 * The superblock was read before the log was available and thus the LSN
5749 * could not be verified. Check the superblock LSN against the current
5750 * LSN now that it's known.
5752 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5753 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5756 if (tail_blk != head_blk) {
5757 /* There used to be a comment here:
5759 * disallow recovery on read-only mounts. note -- mount
5760 * checks for ENOSPC and turns it into an intelligent
5762 * ...but this is no longer true. Now, unless you specify
5763 * NORECOVERY (in which case this function would never be
5764 * called), we just go ahead and recover. We do this all
5765 * under the vfs layer, so we can get away with it unless
5766 * the device itself is read-only, in which case we fail.
5768 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5773 * Version 5 superblock log feature mask validation. We know the
5774 * log is dirty so check if there are any unknown log features
5775 * in what we need to recover. If there are unknown features
5776 * (e.g. unsupported transactions, then simply reject the
5777 * attempt at recovery before touching anything.
5779 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5780 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5781 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5783 "Superblock has unknown incompatible log features (0x%x) enabled.",
5784 (log->l_mp->m_sb.sb_features_log_incompat &
5785 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5787 "The log can not be fully and/or safely recovered by this kernel.");
5789 "Please recover the log on a kernel that supports the unknown features.");
5794 * Delay log recovery if the debug hook is set. This is debug
5795 * instrumention to coordinate simulation of I/O failures with
5798 if (xfs_globals.log_recovery_delay) {
5799 xfs_notice(log->l_mp,
5800 "Delaying log recovery for %d seconds.",
5801 xfs_globals.log_recovery_delay);
5802 msleep(xfs_globals.log_recovery_delay * 1000);
5805 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5806 log->l_mp->m_logname ? log->l_mp->m_logname
5809 error = xlog_do_recover(log, head_blk, tail_blk);
5810 log->l_flags |= XLOG_RECOVERY_NEEDED;
5816 * In the first part of recovery we replay inodes and buffers and build
5817 * up the list of extent free items which need to be processed. Here
5818 * we process the extent free items and clean up the on disk unlinked
5819 * inode lists. This is separated from the first part of recovery so
5820 * that the root and real-time bitmap inodes can be read in from disk in
5821 * between the two stages. This is necessary so that we can free space
5822 * in the real-time portion of the file system.
5825 xlog_recover_finish(
5829 * Now we're ready to do the transactions needed for the
5830 * rest of recovery. Start with completing all the extent
5831 * free intent records and then process the unlinked inode
5832 * lists. At this point, we essentially run in normal mode
5833 * except that we're still performing recovery actions
5834 * rather than accepting new requests.
5836 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5838 error = xlog_recover_process_intents(log);
5840 xfs_alert(log->l_mp, "Failed to recover intents");
5845 * Sync the log to get all the intents out of the AIL.
5846 * This isn't absolutely necessary, but it helps in
5847 * case the unlink transactions would have problems
5848 * pushing the intents out of the way.
5850 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5852 xlog_recover_process_iunlinks(log);
5854 xlog_recover_check_summary(log);
5856 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5857 log->l_mp->m_logname ? log->l_mp->m_logname
5859 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5861 xfs_info(log->l_mp, "Ending clean mount");
5867 xlog_recover_cancel(
5872 if (log->l_flags & XLOG_RECOVERY_NEEDED)
5873 error = xlog_recover_cancel_intents(log);
5880 * Read all of the agf and agi counters and check that they
5881 * are consistent with the superblock counters.
5884 xlog_recover_check_summary(
5891 xfs_agnumber_t agno;
5902 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5903 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5905 xfs_alert(mp, "%s agf read failed agno %d error %d",
5906 __func__, agno, error);
5908 agfp = XFS_BUF_TO_AGF(agfbp);
5909 freeblks += be32_to_cpu(agfp->agf_freeblks) +
5910 be32_to_cpu(agfp->agf_flcount);
5911 xfs_buf_relse(agfbp);
5914 error = xfs_read_agi(mp, NULL, agno, &agibp);
5916 xfs_alert(mp, "%s agi read failed agno %d error %d",
5917 __func__, agno, error);
5919 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
5921 itotal += be32_to_cpu(agi->agi_count);
5922 ifree += be32_to_cpu(agi->agi_freecount);
5923 xfs_buf_relse(agibp);