2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
37 #include "xfs_ialloc.h"
38 #include "xfs_log_priv.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_log_recover.h"
41 #include "xfs_extfree_item.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_quota.h"
44 #include "xfs_utils.h"
45 #include "xfs_cksum.h"
46 #include "xfs_trace.h"
47 #include "xfs_icache.h"
48 #include "xfs_symlink.h"
55 xlog_clear_stale_blocks(
60 xlog_recover_check_summary(
63 #define xlog_recover_check_summary(log)
67 * This structure is used during recovery to record the buf log items which
68 * have been canceled and should not be replayed.
70 struct xfs_buf_cancel {
74 struct list_head bc_list;
78 * Sector aligned buffer routines for buffer create/read/write/access
82 * Verify the given count of basic blocks is valid number of blocks
83 * to specify for an operation involving the given XFS log buffer.
84 * Returns nonzero if the count is valid, 0 otherwise.
88 xlog_buf_bbcount_valid(
92 return bbcount > 0 && bbcount <= log->l_logBBsize;
96 * Allocate a buffer to hold log data. The buffer needs to be able
97 * to map to a range of nbblks basic blocks at any valid (basic
98 * block) offset within the log.
107 if (!xlog_buf_bbcount_valid(log, nbblks)) {
108 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
110 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
115 * We do log I/O in units of log sectors (a power-of-2
116 * multiple of the basic block size), so we round up the
117 * requested size to accommodate the basic blocks required
118 * for complete log sectors.
120 * In addition, the buffer may be used for a non-sector-
121 * aligned block offset, in which case an I/O of the
122 * requested size could extend beyond the end of the
123 * buffer. If the requested size is only 1 basic block it
124 * will never straddle a sector boundary, so this won't be
125 * an issue. Nor will this be a problem if the log I/O is
126 * done in basic blocks (sector size 1). But otherwise we
127 * extend the buffer by one extra log sector to ensure
128 * there's space to accommodate this possibility.
130 if (nbblks > 1 && log->l_sectBBsize > 1)
131 nbblks += log->l_sectBBsize;
132 nbblks = round_up(nbblks, log->l_sectBBsize);
134 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
148 * Return the address of the start of the given block number's data
149 * in a log buffer. The buffer covers a log sector-aligned region.
158 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
160 ASSERT(offset + nbblks <= bp->b_length);
161 return bp->b_addr + BBTOB(offset);
166 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
177 if (!xlog_buf_bbcount_valid(log, nbblks)) {
178 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
180 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
184 blk_no = round_down(blk_no, log->l_sectBBsize);
185 nbblks = round_up(nbblks, log->l_sectBBsize);
188 ASSERT(nbblks <= bp->b_length);
190 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
192 bp->b_io_length = nbblks;
195 xfsbdstrat(log->l_mp, bp);
196 error = xfs_buf_iowait(bp);
198 xfs_buf_ioerror_alert(bp, __func__);
212 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
216 *offset = xlog_align(log, blk_no, nbblks, bp);
221 * Read at an offset into the buffer. Returns with the buffer in it's original
222 * state regardless of the result of the read.
227 xfs_daddr_t blk_no, /* block to read from */
228 int nbblks, /* blocks to read */
232 xfs_caddr_t orig_offset = bp->b_addr;
233 int orig_len = BBTOB(bp->b_length);
236 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
240 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
242 /* must reset buffer pointer even on error */
243 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
250 * Write out the buffer at the given block for the given number of blocks.
251 * The buffer is kept locked across the write and is returned locked.
252 * This can only be used for synchronous log writes.
263 if (!xlog_buf_bbcount_valid(log, nbblks)) {
264 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
266 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
270 blk_no = round_down(blk_no, log->l_sectBBsize);
271 nbblks = round_up(nbblks, log->l_sectBBsize);
274 ASSERT(nbblks <= bp->b_length);
276 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
277 XFS_BUF_ZEROFLAGS(bp);
280 bp->b_io_length = nbblks;
283 error = xfs_bwrite(bp);
285 xfs_buf_ioerror_alert(bp, __func__);
292 * dump debug superblock and log record information
295 xlog_header_check_dump(
297 xlog_rec_header_t *head)
299 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n",
300 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
301 xfs_debug(mp, " log : uuid = %pU, fmt = %d\n",
302 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
305 #define xlog_header_check_dump(mp, head)
309 * check log record header for recovery
312 xlog_header_check_recover(
314 xlog_rec_header_t *head)
316 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
319 * IRIX doesn't write the h_fmt field and leaves it zeroed
320 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
321 * a dirty log created in IRIX.
323 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
325 "dirty log written in incompatible format - can't recover");
326 xlog_header_check_dump(mp, head);
327 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
328 XFS_ERRLEVEL_HIGH, mp);
329 return XFS_ERROR(EFSCORRUPTED);
330 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
332 "dirty log entry has mismatched uuid - can't recover");
333 xlog_header_check_dump(mp, head);
334 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
335 XFS_ERRLEVEL_HIGH, mp);
336 return XFS_ERROR(EFSCORRUPTED);
342 * read the head block of the log and check the header
345 xlog_header_check_mount(
347 xlog_rec_header_t *head)
349 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
351 if (uuid_is_nil(&head->h_fs_uuid)) {
353 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
354 * h_fs_uuid is nil, we assume this log was last mounted
355 * by IRIX and continue.
357 xfs_warn(mp, "nil uuid in log - IRIX style log");
358 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
359 xfs_warn(mp, "log has mismatched uuid - can't recover");
360 xlog_header_check_dump(mp, head);
361 XFS_ERROR_REPORT("xlog_header_check_mount",
362 XFS_ERRLEVEL_HIGH, mp);
363 return XFS_ERROR(EFSCORRUPTED);
374 * We're not going to bother about retrying
375 * this during recovery. One strike!
377 xfs_buf_ioerror_alert(bp, __func__);
378 xfs_force_shutdown(bp->b_target->bt_mount,
379 SHUTDOWN_META_IO_ERROR);
382 xfs_buf_ioend(bp, 0);
386 * This routine finds (to an approximation) the first block in the physical
387 * log which contains the given cycle. It uses a binary search algorithm.
388 * Note that the algorithm can not be perfect because the disk will not
389 * necessarily be perfect.
392 xlog_find_cycle_start(
395 xfs_daddr_t first_blk,
396 xfs_daddr_t *last_blk,
406 mid_blk = BLK_AVG(first_blk, end_blk);
407 while (mid_blk != first_blk && mid_blk != end_blk) {
408 error = xlog_bread(log, mid_blk, 1, bp, &offset);
411 mid_cycle = xlog_get_cycle(offset);
412 if (mid_cycle == cycle)
413 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
415 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
416 mid_blk = BLK_AVG(first_blk, end_blk);
418 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
419 (mid_blk == end_blk && mid_blk-1 == first_blk));
427 * Check that a range of blocks does not contain stop_on_cycle_no.
428 * Fill in *new_blk with the block offset where such a block is
429 * found, or with -1 (an invalid block number) if there is no such
430 * block in the range. The scan needs to occur from front to back
431 * and the pointer into the region must be updated since a later
432 * routine will need to perform another test.
435 xlog_find_verify_cycle(
437 xfs_daddr_t start_blk,
439 uint stop_on_cycle_no,
440 xfs_daddr_t *new_blk)
446 xfs_caddr_t buf = NULL;
450 * Greedily allocate a buffer big enough to handle the full
451 * range of basic blocks we'll be examining. If that fails,
452 * try a smaller size. We need to be able to read at least
453 * a log sector, or we're out of luck.
455 bufblks = 1 << ffs(nbblks);
456 while (bufblks > log->l_logBBsize)
458 while (!(bp = xlog_get_bp(log, bufblks))) {
460 if (bufblks < log->l_sectBBsize)
464 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
467 bcount = min(bufblks, (start_blk + nbblks - i));
469 error = xlog_bread(log, i, bcount, bp, &buf);
473 for (j = 0; j < bcount; j++) {
474 cycle = xlog_get_cycle(buf);
475 if (cycle == stop_on_cycle_no) {
492 * Potentially backup over partial log record write.
494 * In the typical case, last_blk is the number of the block directly after
495 * a good log record. Therefore, we subtract one to get the block number
496 * of the last block in the given buffer. extra_bblks contains the number
497 * of blocks we would have read on a previous read. This happens when the
498 * last log record is split over the end of the physical log.
500 * extra_bblks is the number of blocks potentially verified on a previous
501 * call to this routine.
504 xlog_find_verify_log_record(
506 xfs_daddr_t start_blk,
507 xfs_daddr_t *last_blk,
512 xfs_caddr_t offset = NULL;
513 xlog_rec_header_t *head = NULL;
516 int num_blks = *last_blk - start_blk;
519 ASSERT(start_blk != 0 || *last_blk != start_blk);
521 if (!(bp = xlog_get_bp(log, num_blks))) {
522 if (!(bp = xlog_get_bp(log, 1)))
526 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
529 offset += ((num_blks - 1) << BBSHIFT);
532 for (i = (*last_blk) - 1; i >= 0; i--) {
534 /* valid log record not found */
536 "Log inconsistent (didn't find previous header)");
538 error = XFS_ERROR(EIO);
543 error = xlog_bread(log, i, 1, bp, &offset);
548 head = (xlog_rec_header_t *)offset;
550 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
558 * We hit the beginning of the physical log & still no header. Return
559 * to caller. If caller can handle a return of -1, then this routine
560 * will be called again for the end of the physical log.
568 * We have the final block of the good log (the first block
569 * of the log record _before_ the head. So we check the uuid.
571 if ((error = xlog_header_check_mount(log->l_mp, head)))
575 * We may have found a log record header before we expected one.
576 * last_blk will be the 1st block # with a given cycle #. We may end
577 * up reading an entire log record. In this case, we don't want to
578 * reset last_blk. Only when last_blk points in the middle of a log
579 * record do we update last_blk.
581 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
582 uint h_size = be32_to_cpu(head->h_size);
584 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
585 if (h_size % XLOG_HEADER_CYCLE_SIZE)
591 if (*last_blk - i + extra_bblks !=
592 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
601 * Head is defined to be the point of the log where the next log write
602 * write could go. This means that incomplete LR writes at the end are
603 * eliminated when calculating the head. We aren't guaranteed that previous
604 * LR have complete transactions. We only know that a cycle number of
605 * current cycle number -1 won't be present in the log if we start writing
606 * from our current block number.
608 * last_blk contains the block number of the first block with a given
611 * Return: zero if normal, non-zero if error.
616 xfs_daddr_t *return_head_blk)
620 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
622 uint first_half_cycle, last_half_cycle;
624 int error, log_bbnum = log->l_logBBsize;
626 /* Is the end of the log device zeroed? */
627 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
628 *return_head_blk = first_blk;
630 /* Is the whole lot zeroed? */
632 /* Linux XFS shouldn't generate totally zeroed logs -
633 * mkfs etc write a dummy unmount record to a fresh
634 * log so we can store the uuid in there
636 xfs_warn(log->l_mp, "totally zeroed log");
641 xfs_warn(log->l_mp, "empty log check failed");
645 first_blk = 0; /* get cycle # of 1st block */
646 bp = xlog_get_bp(log, 1);
650 error = xlog_bread(log, 0, 1, bp, &offset);
654 first_half_cycle = xlog_get_cycle(offset);
656 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
657 error = xlog_bread(log, last_blk, 1, bp, &offset);
661 last_half_cycle = xlog_get_cycle(offset);
662 ASSERT(last_half_cycle != 0);
665 * If the 1st half cycle number is equal to the last half cycle number,
666 * then the entire log is stamped with the same cycle number. In this
667 * case, head_blk can't be set to zero (which makes sense). The below
668 * math doesn't work out properly with head_blk equal to zero. Instead,
669 * we set it to log_bbnum which is an invalid block number, but this
670 * value makes the math correct. If head_blk doesn't changed through
671 * all the tests below, *head_blk is set to zero at the very end rather
672 * than log_bbnum. In a sense, log_bbnum and zero are the same block
673 * in a circular file.
675 if (first_half_cycle == last_half_cycle) {
677 * In this case we believe that the entire log should have
678 * cycle number last_half_cycle. We need to scan backwards
679 * from the end verifying that there are no holes still
680 * containing last_half_cycle - 1. If we find such a hole,
681 * then the start of that hole will be the new head. The
682 * simple case looks like
683 * x | x ... | x - 1 | x
684 * Another case that fits this picture would be
685 * x | x + 1 | x ... | x
686 * In this case the head really is somewhere at the end of the
687 * log, as one of the latest writes at the beginning was
690 * x | x + 1 | x ... | x - 1 | x
691 * This is really the combination of the above two cases, and
692 * the head has to end up at the start of the x-1 hole at the
695 * In the 256k log case, we will read from the beginning to the
696 * end of the log and search for cycle numbers equal to x-1.
697 * We don't worry about the x+1 blocks that we encounter,
698 * because we know that they cannot be the head since the log
701 head_blk = log_bbnum;
702 stop_on_cycle = last_half_cycle - 1;
705 * In this case we want to find the first block with cycle
706 * number matching last_half_cycle. We expect the log to be
708 * x + 1 ... | x ... | x
709 * The first block with cycle number x (last_half_cycle) will
710 * be where the new head belongs. First we do a binary search
711 * for the first occurrence of last_half_cycle. The binary
712 * search may not be totally accurate, so then we scan back
713 * from there looking for occurrences of last_half_cycle before
714 * us. If that backwards scan wraps around the beginning of
715 * the log, then we look for occurrences of last_half_cycle - 1
716 * at the end of the log. The cases we're looking for look
718 * v binary search stopped here
719 * x + 1 ... | x | x + 1 | x ... | x
720 * ^ but we want to locate this spot
722 * <---------> less than scan distance
723 * x + 1 ... | x ... | x - 1 | x
724 * ^ we want to locate this spot
726 stop_on_cycle = last_half_cycle;
727 if ((error = xlog_find_cycle_start(log, bp, first_blk,
728 &head_blk, last_half_cycle)))
733 * Now validate the answer. Scan back some number of maximum possible
734 * blocks and make sure each one has the expected cycle number. The
735 * maximum is determined by the total possible amount of buffering
736 * in the in-core log. The following number can be made tighter if
737 * we actually look at the block size of the filesystem.
739 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
740 if (head_blk >= num_scan_bblks) {
742 * We are guaranteed that the entire check can be performed
745 start_blk = head_blk - num_scan_bblks;
746 if ((error = xlog_find_verify_cycle(log,
747 start_blk, num_scan_bblks,
748 stop_on_cycle, &new_blk)))
752 } else { /* need to read 2 parts of log */
754 * We are going to scan backwards in the log in two parts.
755 * First we scan the physical end of the log. In this part
756 * of the log, we are looking for blocks with cycle number
757 * last_half_cycle - 1.
758 * If we find one, then we know that the log starts there, as
759 * we've found a hole that didn't get written in going around
760 * the end of the physical log. The simple case for this is
761 * x + 1 ... | x ... | x - 1 | x
762 * <---------> less than scan distance
763 * If all of the blocks at the end of the log have cycle number
764 * last_half_cycle, then we check the blocks at the start of
765 * the log looking for occurrences of last_half_cycle. If we
766 * find one, then our current estimate for the location of the
767 * first occurrence of last_half_cycle is wrong and we move
768 * back to the hole we've found. This case looks like
769 * x + 1 ... | x | x + 1 | x ...
770 * ^ binary search stopped here
771 * Another case we need to handle that only occurs in 256k
773 * x + 1 ... | x ... | x+1 | x ...
774 * ^ binary search stops here
775 * In a 256k log, the scan at the end of the log will see the
776 * x + 1 blocks. We need to skip past those since that is
777 * certainly not the head of the log. By searching for
778 * last_half_cycle-1 we accomplish that.
780 ASSERT(head_blk <= INT_MAX &&
781 (xfs_daddr_t) num_scan_bblks >= head_blk);
782 start_blk = log_bbnum - (num_scan_bblks - head_blk);
783 if ((error = xlog_find_verify_cycle(log, start_blk,
784 num_scan_bblks - (int)head_blk,
785 (stop_on_cycle - 1), &new_blk)))
793 * Scan beginning of log now. The last part of the physical
794 * log is good. This scan needs to verify that it doesn't find
795 * the last_half_cycle.
798 ASSERT(head_blk <= INT_MAX);
799 if ((error = xlog_find_verify_cycle(log,
800 start_blk, (int)head_blk,
801 stop_on_cycle, &new_blk)))
809 * Now we need to make sure head_blk is not pointing to a block in
810 * the middle of a log record.
812 num_scan_bblks = XLOG_REC_SHIFT(log);
813 if (head_blk >= num_scan_bblks) {
814 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
816 /* start ptr at last block ptr before head_blk */
817 if ((error = xlog_find_verify_log_record(log, start_blk,
818 &head_blk, 0)) == -1) {
819 error = XFS_ERROR(EIO);
825 ASSERT(head_blk <= INT_MAX);
826 if ((error = xlog_find_verify_log_record(log, start_blk,
827 &head_blk, 0)) == -1) {
828 /* We hit the beginning of the log during our search */
829 start_blk = log_bbnum - (num_scan_bblks - head_blk);
831 ASSERT(start_blk <= INT_MAX &&
832 (xfs_daddr_t) log_bbnum-start_blk >= 0);
833 ASSERT(head_blk <= INT_MAX);
834 if ((error = xlog_find_verify_log_record(log,
836 (int)head_blk)) == -1) {
837 error = XFS_ERROR(EIO);
841 if (new_blk != log_bbnum)
848 if (head_blk == log_bbnum)
849 *return_head_blk = 0;
851 *return_head_blk = head_blk;
853 * When returning here, we have a good block number. Bad block
854 * means that during a previous crash, we didn't have a clean break
855 * from cycle number N to cycle number N-1. In this case, we need
856 * to find the first block with cycle number N-1.
864 xfs_warn(log->l_mp, "failed to find log head");
869 * Find the sync block number or the tail of the log.
871 * This will be the block number of the last record to have its
872 * associated buffers synced to disk. Every log record header has
873 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
874 * to get a sync block number. The only concern is to figure out which
875 * log record header to believe.
877 * The following algorithm uses the log record header with the largest
878 * lsn. The entire log record does not need to be valid. We only care
879 * that the header is valid.
881 * We could speed up search by using current head_blk buffer, but it is not
887 xfs_daddr_t *head_blk,
888 xfs_daddr_t *tail_blk)
890 xlog_rec_header_t *rhead;
891 xlog_op_header_t *op_head;
892 xfs_caddr_t offset = NULL;
895 xfs_daddr_t umount_data_blk;
896 xfs_daddr_t after_umount_blk;
903 * Find previous log record
905 if ((error = xlog_find_head(log, head_blk)))
908 bp = xlog_get_bp(log, 1);
911 if (*head_blk == 0) { /* special case */
912 error = xlog_bread(log, 0, 1, bp, &offset);
916 if (xlog_get_cycle(offset) == 0) {
918 /* leave all other log inited values alone */
924 * Search backwards looking for log record header block
926 ASSERT(*head_blk < INT_MAX);
927 for (i = (int)(*head_blk) - 1; i >= 0; i--) {
928 error = xlog_bread(log, i, 1, bp, &offset);
932 if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
938 * If we haven't found the log record header block, start looking
939 * again from the end of the physical log. XXXmiken: There should be
940 * a check here to make sure we didn't search more than N blocks in
944 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
945 error = xlog_bread(log, i, 1, bp, &offset);
949 if (*(__be32 *)offset ==
950 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
957 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
959 return XFS_ERROR(EIO);
962 /* find blk_no of tail of log */
963 rhead = (xlog_rec_header_t *)offset;
964 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
967 * Reset log values according to the state of the log when we
968 * crashed. In the case where head_blk == 0, we bump curr_cycle
969 * one because the next write starts a new cycle rather than
970 * continuing the cycle of the last good log record. At this
971 * point we have guaranteed that all partial log records have been
972 * accounted for. Therefore, we know that the last good log record
973 * written was complete and ended exactly on the end boundary
974 * of the physical log.
976 log->l_prev_block = i;
977 log->l_curr_block = (int)*head_blk;
978 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
981 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
982 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
983 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
984 BBTOB(log->l_curr_block));
985 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
986 BBTOB(log->l_curr_block));
989 * Look for unmount record. If we find it, then we know there
990 * was a clean unmount. Since 'i' could be the last block in
991 * the physical log, we convert to a log block before comparing
994 * Save the current tail lsn to use to pass to
995 * xlog_clear_stale_blocks() below. We won't want to clear the
996 * unmount record if there is one, so we pass the lsn of the
997 * unmount record rather than the block after it.
999 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1000 int h_size = be32_to_cpu(rhead->h_size);
1001 int h_version = be32_to_cpu(rhead->h_version);
1003 if ((h_version & XLOG_VERSION_2) &&
1004 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1005 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1006 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1014 after_umount_blk = (i + hblks + (int)
1015 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1016 tail_lsn = atomic64_read(&log->l_tail_lsn);
1017 if (*head_blk == after_umount_blk &&
1018 be32_to_cpu(rhead->h_num_logops) == 1) {
1019 umount_data_blk = (i + hblks) % log->l_logBBsize;
1020 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1024 op_head = (xlog_op_header_t *)offset;
1025 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1027 * Set tail and last sync so that newly written
1028 * log records will point recovery to after the
1029 * current unmount record.
1031 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1032 log->l_curr_cycle, after_umount_blk);
1033 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1034 log->l_curr_cycle, after_umount_blk);
1035 *tail_blk = after_umount_blk;
1038 * Note that the unmount was clean. If the unmount
1039 * was not clean, we need to know this to rebuild the
1040 * superblock counters from the perag headers if we
1041 * have a filesystem using non-persistent counters.
1043 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1048 * Make sure that there are no blocks in front of the head
1049 * with the same cycle number as the head. This can happen
1050 * because we allow multiple outstanding log writes concurrently,
1051 * and the later writes might make it out before earlier ones.
1053 * We use the lsn from before modifying it so that we'll never
1054 * overwrite the unmount record after a clean unmount.
1056 * Do this only if we are going to recover the filesystem
1058 * NOTE: This used to say "if (!readonly)"
1059 * However on Linux, we can & do recover a read-only filesystem.
1060 * We only skip recovery if NORECOVERY is specified on mount,
1061 * in which case we would not be here.
1063 * But... if the -device- itself is readonly, just skip this.
1064 * We can't recover this device anyway, so it won't matter.
1066 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1067 error = xlog_clear_stale_blocks(log, tail_lsn);
1073 xfs_warn(log->l_mp, "failed to locate log tail");
1078 * Is the log zeroed at all?
1080 * The last binary search should be changed to perform an X block read
1081 * once X becomes small enough. You can then search linearly through
1082 * the X blocks. This will cut down on the number of reads we need to do.
1084 * If the log is partially zeroed, this routine will pass back the blkno
1085 * of the first block with cycle number 0. It won't have a complete LR
1089 * 0 => the log is completely written to
1090 * -1 => use *blk_no as the first block of the log
1091 * >0 => error has occurred
1096 xfs_daddr_t *blk_no)
1100 uint first_cycle, last_cycle;
1101 xfs_daddr_t new_blk, last_blk, start_blk;
1102 xfs_daddr_t num_scan_bblks;
1103 int error, log_bbnum = log->l_logBBsize;
1107 /* check totally zeroed log */
1108 bp = xlog_get_bp(log, 1);
1111 error = xlog_bread(log, 0, 1, bp, &offset);
1115 first_cycle = xlog_get_cycle(offset);
1116 if (first_cycle == 0) { /* completely zeroed log */
1122 /* check partially zeroed log */
1123 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1127 last_cycle = xlog_get_cycle(offset);
1128 if (last_cycle != 0) { /* log completely written to */
1131 } else if (first_cycle != 1) {
1133 * If the cycle of the last block is zero, the cycle of
1134 * the first block must be 1. If it's not, maybe we're
1135 * not looking at a log... Bail out.
1138 "Log inconsistent or not a log (last==0, first!=1)");
1139 return XFS_ERROR(EINVAL);
1142 /* we have a partially zeroed log */
1143 last_blk = log_bbnum-1;
1144 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1148 * Validate the answer. Because there is no way to guarantee that
1149 * the entire log is made up of log records which are the same size,
1150 * we scan over the defined maximum blocks. At this point, the maximum
1151 * is not chosen to mean anything special. XXXmiken
1153 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1154 ASSERT(num_scan_bblks <= INT_MAX);
1156 if (last_blk < num_scan_bblks)
1157 num_scan_bblks = last_blk;
1158 start_blk = last_blk - num_scan_bblks;
1161 * We search for any instances of cycle number 0 that occur before
1162 * our current estimate of the head. What we're trying to detect is
1163 * 1 ... | 0 | 1 | 0...
1164 * ^ binary search ends here
1166 if ((error = xlog_find_verify_cycle(log, start_blk,
1167 (int)num_scan_bblks, 0, &new_blk)))
1173 * Potentially backup over partial log record write. We don't need
1174 * to search the end of the log because we know it is zero.
1176 if ((error = xlog_find_verify_log_record(log, start_blk,
1177 &last_blk, 0)) == -1) {
1178 error = XFS_ERROR(EIO);
1192 * These are simple subroutines used by xlog_clear_stale_blocks() below
1193 * to initialize a buffer full of empty log record headers and write
1194 * them into the log.
1205 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1207 memset(buf, 0, BBSIZE);
1208 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1209 recp->h_cycle = cpu_to_be32(cycle);
1210 recp->h_version = cpu_to_be32(
1211 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1212 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1213 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1214 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1215 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1219 xlog_write_log_records(
1230 int sectbb = log->l_sectBBsize;
1231 int end_block = start_block + blocks;
1237 * Greedily allocate a buffer big enough to handle the full
1238 * range of basic blocks to be written. If that fails, try
1239 * a smaller size. We need to be able to write at least a
1240 * log sector, or we're out of luck.
1242 bufblks = 1 << ffs(blocks);
1243 while (bufblks > log->l_logBBsize)
1245 while (!(bp = xlog_get_bp(log, bufblks))) {
1247 if (bufblks < sectbb)
1251 /* We may need to do a read at the start to fill in part of
1252 * the buffer in the starting sector not covered by the first
1255 balign = round_down(start_block, sectbb);
1256 if (balign != start_block) {
1257 error = xlog_bread_noalign(log, start_block, 1, bp);
1261 j = start_block - balign;
1264 for (i = start_block; i < end_block; i += bufblks) {
1265 int bcount, endcount;
1267 bcount = min(bufblks, end_block - start_block);
1268 endcount = bcount - j;
1270 /* We may need to do a read at the end to fill in part of
1271 * the buffer in the final sector not covered by the write.
1272 * If this is the same sector as the above read, skip it.
1274 ealign = round_down(end_block, sectbb);
1275 if (j == 0 && (start_block + endcount > ealign)) {
1276 offset = bp->b_addr + BBTOB(ealign - start_block);
1277 error = xlog_bread_offset(log, ealign, sectbb,
1284 offset = xlog_align(log, start_block, endcount, bp);
1285 for (; j < endcount; j++) {
1286 xlog_add_record(log, offset, cycle, i+j,
1287 tail_cycle, tail_block);
1290 error = xlog_bwrite(log, start_block, endcount, bp);
1293 start_block += endcount;
1303 * This routine is called to blow away any incomplete log writes out
1304 * in front of the log head. We do this so that we won't become confused
1305 * if we come up, write only a little bit more, and then crash again.
1306 * If we leave the partial log records out there, this situation could
1307 * cause us to think those partial writes are valid blocks since they
1308 * have the current cycle number. We get rid of them by overwriting them
1309 * with empty log records with the old cycle number rather than the
1312 * The tail lsn is passed in rather than taken from
1313 * the log so that we will not write over the unmount record after a
1314 * clean unmount in a 512 block log. Doing so would leave the log without
1315 * any valid log records in it until a new one was written. If we crashed
1316 * during that time we would not be able to recover.
1319 xlog_clear_stale_blocks(
1323 int tail_cycle, head_cycle;
1324 int tail_block, head_block;
1325 int tail_distance, max_distance;
1329 tail_cycle = CYCLE_LSN(tail_lsn);
1330 tail_block = BLOCK_LSN(tail_lsn);
1331 head_cycle = log->l_curr_cycle;
1332 head_block = log->l_curr_block;
1335 * Figure out the distance between the new head of the log
1336 * and the tail. We want to write over any blocks beyond the
1337 * head that we may have written just before the crash, but
1338 * we don't want to overwrite the tail of the log.
1340 if (head_cycle == tail_cycle) {
1342 * The tail is behind the head in the physical log,
1343 * so the distance from the head to the tail is the
1344 * distance from the head to the end of the log plus
1345 * the distance from the beginning of the log to the
1348 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1349 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1350 XFS_ERRLEVEL_LOW, log->l_mp);
1351 return XFS_ERROR(EFSCORRUPTED);
1353 tail_distance = tail_block + (log->l_logBBsize - head_block);
1356 * The head is behind the tail in the physical log,
1357 * so the distance from the head to the tail is just
1358 * the tail block minus the head block.
1360 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1361 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1362 XFS_ERRLEVEL_LOW, log->l_mp);
1363 return XFS_ERROR(EFSCORRUPTED);
1365 tail_distance = tail_block - head_block;
1369 * If the head is right up against the tail, we can't clear
1372 if (tail_distance <= 0) {
1373 ASSERT(tail_distance == 0);
1377 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1379 * Take the smaller of the maximum amount of outstanding I/O
1380 * we could have and the distance to the tail to clear out.
1381 * We take the smaller so that we don't overwrite the tail and
1382 * we don't waste all day writing from the head to the tail
1385 max_distance = MIN(max_distance, tail_distance);
1387 if ((head_block + max_distance) <= log->l_logBBsize) {
1389 * We can stomp all the blocks we need to without
1390 * wrapping around the end of the log. Just do it
1391 * in a single write. Use the cycle number of the
1392 * current cycle minus one so that the log will look like:
1395 error = xlog_write_log_records(log, (head_cycle - 1),
1396 head_block, max_distance, tail_cycle,
1402 * We need to wrap around the end of the physical log in
1403 * order to clear all the blocks. Do it in two separate
1404 * I/Os. The first write should be from the head to the
1405 * end of the physical log, and it should use the current
1406 * cycle number minus one just like above.
1408 distance = log->l_logBBsize - head_block;
1409 error = xlog_write_log_records(log, (head_cycle - 1),
1410 head_block, distance, tail_cycle,
1417 * Now write the blocks at the start of the physical log.
1418 * This writes the remainder of the blocks we want to clear.
1419 * It uses the current cycle number since we're now on the
1420 * same cycle as the head so that we get:
1421 * n ... n ... | n - 1 ...
1422 * ^^^^^ blocks we're writing
1424 distance = max_distance - (log->l_logBBsize - head_block);
1425 error = xlog_write_log_records(log, head_cycle, 0, distance,
1426 tail_cycle, tail_block);
1434 /******************************************************************************
1436 * Log recover routines
1438 ******************************************************************************
1441 STATIC xlog_recover_t *
1442 xlog_recover_find_tid(
1443 struct hlist_head *head,
1446 xlog_recover_t *trans;
1448 hlist_for_each_entry(trans, head, r_list) {
1449 if (trans->r_log_tid == tid)
1456 xlog_recover_new_tid(
1457 struct hlist_head *head,
1461 xlog_recover_t *trans;
1463 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1464 trans->r_log_tid = tid;
1466 INIT_LIST_HEAD(&trans->r_itemq);
1468 INIT_HLIST_NODE(&trans->r_list);
1469 hlist_add_head(&trans->r_list, head);
1473 xlog_recover_add_item(
1474 struct list_head *head)
1476 xlog_recover_item_t *item;
1478 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1479 INIT_LIST_HEAD(&item->ri_list);
1480 list_add_tail(&item->ri_list, head);
1484 xlog_recover_add_to_cont_trans(
1486 struct xlog_recover *trans,
1490 xlog_recover_item_t *item;
1491 xfs_caddr_t ptr, old_ptr;
1494 if (list_empty(&trans->r_itemq)) {
1495 /* finish copying rest of trans header */
1496 xlog_recover_add_item(&trans->r_itemq);
1497 ptr = (xfs_caddr_t) &trans->r_theader +
1498 sizeof(xfs_trans_header_t) - len;
1499 memcpy(ptr, dp, len); /* d, s, l */
1502 /* take the tail entry */
1503 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1505 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1506 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1508 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1509 memcpy(&ptr[old_len], dp, len); /* d, s, l */
1510 item->ri_buf[item->ri_cnt-1].i_len += len;
1511 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1512 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1517 * The next region to add is the start of a new region. It could be
1518 * a whole region or it could be the first part of a new region. Because
1519 * of this, the assumption here is that the type and size fields of all
1520 * format structures fit into the first 32 bits of the structure.
1522 * This works because all regions must be 32 bit aligned. Therefore, we
1523 * either have both fields or we have neither field. In the case we have
1524 * neither field, the data part of the region is zero length. We only have
1525 * a log_op_header and can throw away the header since a new one will appear
1526 * later. If we have at least 4 bytes, then we can determine how many regions
1527 * will appear in the current log item.
1530 xlog_recover_add_to_trans(
1532 struct xlog_recover *trans,
1536 xfs_inode_log_format_t *in_f; /* any will do */
1537 xlog_recover_item_t *item;
1542 if (list_empty(&trans->r_itemq)) {
1543 /* we need to catch log corruptions here */
1544 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1545 xfs_warn(log->l_mp, "%s: bad header magic number",
1548 return XFS_ERROR(EIO);
1550 if (len == sizeof(xfs_trans_header_t))
1551 xlog_recover_add_item(&trans->r_itemq);
1552 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1556 ptr = kmem_alloc(len, KM_SLEEP);
1557 memcpy(ptr, dp, len);
1558 in_f = (xfs_inode_log_format_t *)ptr;
1560 /* take the tail entry */
1561 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1562 if (item->ri_total != 0 &&
1563 item->ri_total == item->ri_cnt) {
1564 /* tail item is in use, get a new one */
1565 xlog_recover_add_item(&trans->r_itemq);
1566 item = list_entry(trans->r_itemq.prev,
1567 xlog_recover_item_t, ri_list);
1570 if (item->ri_total == 0) { /* first region to be added */
1571 if (in_f->ilf_size == 0 ||
1572 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1574 "bad number of regions (%d) in inode log format",
1577 return XFS_ERROR(EIO);
1580 item->ri_total = in_f->ilf_size;
1582 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1585 ASSERT(item->ri_total > item->ri_cnt);
1586 /* Description region is ri_buf[0] */
1587 item->ri_buf[item->ri_cnt].i_addr = ptr;
1588 item->ri_buf[item->ri_cnt].i_len = len;
1590 trace_xfs_log_recover_item_add(log, trans, item, 0);
1595 * Sort the log items in the transaction. Cancelled buffers need
1596 * to be put first so they are processed before any items that might
1597 * modify the buffers. If they are cancelled, then the modifications
1598 * don't need to be replayed.
1601 xlog_recover_reorder_trans(
1603 struct xlog_recover *trans,
1606 xlog_recover_item_t *item, *n;
1607 LIST_HEAD(sort_list);
1609 list_splice_init(&trans->r_itemq, &sort_list);
1610 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1611 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1613 switch (ITEM_TYPE(item)) {
1615 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1616 trace_xfs_log_recover_item_reorder_head(log,
1618 list_move(&item->ri_list, &trans->r_itemq);
1623 case XFS_LI_QUOTAOFF:
1626 trace_xfs_log_recover_item_reorder_tail(log,
1628 list_move_tail(&item->ri_list, &trans->r_itemq);
1632 "%s: unrecognized type of log operation",
1635 return XFS_ERROR(EIO);
1638 ASSERT(list_empty(&sort_list));
1643 * Build up the table of buf cancel records so that we don't replay
1644 * cancelled data in the second pass. For buffer records that are
1645 * not cancel records, there is nothing to do here so we just return.
1647 * If we get a cancel record which is already in the table, this indicates
1648 * that the buffer was cancelled multiple times. In order to ensure
1649 * that during pass 2 we keep the record in the table until we reach its
1650 * last occurrence in the log, we keep a reference count in the cancel
1651 * record in the table to tell us how many times we expect to see this
1652 * record during the second pass.
1655 xlog_recover_buffer_pass1(
1657 struct xlog_recover_item *item)
1659 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1660 struct list_head *bucket;
1661 struct xfs_buf_cancel *bcp;
1664 * If this isn't a cancel buffer item, then just return.
1666 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1667 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1672 * Insert an xfs_buf_cancel record into the hash table of them.
1673 * If there is already an identical record, bump its reference count.
1675 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1676 list_for_each_entry(bcp, bucket, bc_list) {
1677 if (bcp->bc_blkno == buf_f->blf_blkno &&
1678 bcp->bc_len == buf_f->blf_len) {
1680 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1685 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1686 bcp->bc_blkno = buf_f->blf_blkno;
1687 bcp->bc_len = buf_f->blf_len;
1688 bcp->bc_refcount = 1;
1689 list_add_tail(&bcp->bc_list, bucket);
1691 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1696 * Check to see whether the buffer being recovered has a corresponding
1697 * entry in the buffer cancel record table. If it does then return 1
1698 * so that it will be cancelled, otherwise return 0. If the buffer is
1699 * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1700 * the refcount on the entry in the table and remove it from the table
1701 * if this is the last reference.
1703 * We remove the cancel record from the table when we encounter its
1704 * last occurrence in the log so that if the same buffer is re-used
1705 * again after its last cancellation we actually replay the changes
1706 * made at that point.
1709 xlog_check_buffer_cancelled(
1715 struct list_head *bucket;
1716 struct xfs_buf_cancel *bcp;
1718 if (log->l_buf_cancel_table == NULL) {
1720 * There is nothing in the table built in pass one,
1721 * so this buffer must not be cancelled.
1723 ASSERT(!(flags & XFS_BLF_CANCEL));
1728 * Search for an entry in the cancel table that matches our buffer.
1730 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1731 list_for_each_entry(bcp, bucket, bc_list) {
1732 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1737 * We didn't find a corresponding entry in the table, so return 0 so
1738 * that the buffer is NOT cancelled.
1740 ASSERT(!(flags & XFS_BLF_CANCEL));
1745 * We've go a match, so return 1 so that the recovery of this buffer
1746 * is cancelled. If this buffer is actually a buffer cancel log
1747 * item, then decrement the refcount on the one in the table and
1748 * remove it if this is the last reference.
1750 if (flags & XFS_BLF_CANCEL) {
1751 if (--bcp->bc_refcount == 0) {
1752 list_del(&bcp->bc_list);
1760 * Perform recovery for a buffer full of inodes. In these buffers, the only
1761 * data which should be recovered is that which corresponds to the
1762 * di_next_unlinked pointers in the on disk inode structures. The rest of the
1763 * data for the inodes is always logged through the inodes themselves rather
1764 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1766 * The only time when buffers full of inodes are fully recovered is when the
1767 * buffer is full of newly allocated inodes. In this case the buffer will
1768 * not be marked as an inode buffer and so will be sent to
1769 * xlog_recover_do_reg_buffer() below during recovery.
1772 xlog_recover_do_inode_buffer(
1773 struct xfs_mount *mp,
1774 xlog_recover_item_t *item,
1776 xfs_buf_log_format_t *buf_f)
1782 int reg_buf_offset = 0;
1783 int reg_buf_bytes = 0;
1784 int next_unlinked_offset;
1786 xfs_agino_t *logged_nextp;
1787 xfs_agino_t *buffer_nextp;
1789 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1790 bp->b_ops = &xfs_inode_buf_ops;
1792 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1793 for (i = 0; i < inodes_per_buf; i++) {
1794 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1795 offsetof(xfs_dinode_t, di_next_unlinked);
1797 while (next_unlinked_offset >=
1798 (reg_buf_offset + reg_buf_bytes)) {
1800 * The next di_next_unlinked field is beyond
1801 * the current logged region. Find the next
1802 * logged region that contains or is beyond
1803 * the current di_next_unlinked field.
1806 bit = xfs_next_bit(buf_f->blf_data_map,
1807 buf_f->blf_map_size, bit);
1810 * If there are no more logged regions in the
1811 * buffer, then we're done.
1816 nbits = xfs_contig_bits(buf_f->blf_data_map,
1817 buf_f->blf_map_size, bit);
1819 reg_buf_offset = bit << XFS_BLF_SHIFT;
1820 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1825 * If the current logged region starts after the current
1826 * di_next_unlinked field, then move on to the next
1827 * di_next_unlinked field.
1829 if (next_unlinked_offset < reg_buf_offset)
1832 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1833 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1834 ASSERT((reg_buf_offset + reg_buf_bytes) <=
1835 BBTOB(bp->b_io_length));
1838 * The current logged region contains a copy of the
1839 * current di_next_unlinked field. Extract its value
1840 * and copy it to the buffer copy.
1842 logged_nextp = item->ri_buf[item_index].i_addr +
1843 next_unlinked_offset - reg_buf_offset;
1844 if (unlikely(*logged_nextp == 0)) {
1846 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1847 "Trying to replay bad (0) inode di_next_unlinked field.",
1849 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1850 XFS_ERRLEVEL_LOW, mp);
1851 return XFS_ERROR(EFSCORRUPTED);
1854 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1855 next_unlinked_offset);
1856 *buffer_nextp = *logged_nextp;
1863 * Perform a 'normal' buffer recovery. Each logged region of the
1864 * buffer should be copied over the corresponding region in the
1865 * given buffer. The bitmap in the buf log format structure indicates
1866 * where to place the logged data.
1869 xlog_recover_do_reg_buffer(
1870 struct xfs_mount *mp,
1871 xlog_recover_item_t *item,
1873 xfs_buf_log_format_t *buf_f)
1880 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1883 i = 1; /* 0 is the buf format structure */
1885 bit = xfs_next_bit(buf_f->blf_data_map,
1886 buf_f->blf_map_size, bit);
1889 nbits = xfs_contig_bits(buf_f->blf_data_map,
1890 buf_f->blf_map_size, bit);
1892 ASSERT(item->ri_buf[i].i_addr != NULL);
1893 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
1894 ASSERT(BBTOB(bp->b_io_length) >=
1895 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
1898 * Do a sanity check if this is a dquot buffer. Just checking
1899 * the first dquot in the buffer should do. XXXThis is
1900 * probably a good thing to do for other buf types also.
1903 if (buf_f->blf_flags &
1904 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
1905 if (item->ri_buf[i].i_addr == NULL) {
1907 "XFS: NULL dquot in %s.", __func__);
1910 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1912 "XFS: dquot too small (%d) in %s.",
1913 item->ri_buf[i].i_len, __func__);
1916 error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
1917 -1, 0, XFS_QMOPT_DOWARN,
1918 "dquot_buf_recover");
1923 memcpy(xfs_buf_offset(bp,
1924 (uint)bit << XFS_BLF_SHIFT), /* dest */
1925 item->ri_buf[i].i_addr, /* source */
1926 nbits<<XFS_BLF_SHIFT); /* length */
1932 /* Shouldn't be any more regions */
1933 ASSERT(i == item->ri_total);
1935 switch (buf_f->blf_flags & XFS_BLF_TYPE_MASK) {
1936 case XFS_BLF_BTREE_BUF:
1937 switch (be32_to_cpu(*(__be32 *)bp->b_addr)) {
1938 case XFS_ABTB_CRC_MAGIC:
1939 case XFS_ABTC_CRC_MAGIC:
1940 case XFS_ABTB_MAGIC:
1941 case XFS_ABTC_MAGIC:
1942 bp->b_ops = &xfs_allocbt_buf_ops;
1944 case XFS_IBT_CRC_MAGIC:
1946 bp->b_ops = &xfs_inobt_buf_ops;
1948 case XFS_BMAP_CRC_MAGIC:
1949 case XFS_BMAP_MAGIC:
1950 bp->b_ops = &xfs_bmbt_buf_ops;
1953 xfs_warn(mp, "Bad btree block magic!");
1958 case XFS_BLF_AGF_BUF:
1959 if (*(__be32 *)bp->b_addr != cpu_to_be32(XFS_AGF_MAGIC)) {
1960 xfs_warn(mp, "Bad AGF block magic!");
1964 bp->b_ops = &xfs_agf_buf_ops;
1966 case XFS_BLF_AGFL_BUF:
1967 if (!xfs_sb_version_hascrc(&mp->m_sb))
1969 if (*(__be32 *)bp->b_addr != cpu_to_be32(XFS_AGFL_MAGIC)) {
1970 xfs_warn(mp, "Bad AGFL block magic!");
1974 bp->b_ops = &xfs_agfl_buf_ops;
1976 case XFS_BLF_AGI_BUF:
1977 if (*(__be32 *)bp->b_addr != cpu_to_be32(XFS_AGI_MAGIC)) {
1978 xfs_warn(mp, "Bad AGI block magic!");
1982 bp->b_ops = &xfs_agi_buf_ops;
1984 case XFS_BLF_UDQUOT_BUF:
1985 case XFS_BLF_PDQUOT_BUF:
1986 case XFS_BLF_GDQUOT_BUF:
1987 if (*(__be16 *)bp->b_addr != cpu_to_be16(XFS_DQUOT_MAGIC)) {
1988 xfs_warn(mp, "Bad DQUOT block magic!");
1992 bp->b_ops = &xfs_dquot_buf_ops;
1994 case XFS_BLF_DINO_BUF:
1996 * we get here with inode allocation buffers, not buffers that
1997 * track unlinked list changes.
1999 if (*(__be16 *)bp->b_addr != cpu_to_be16(XFS_DINODE_MAGIC)) {
2000 xfs_warn(mp, "Bad INODE block magic!");
2004 bp->b_ops = &xfs_inode_buf_ops;
2006 case XFS_BLF_SYMLINK_BUF:
2007 if (*(__be32 *)bp->b_addr != cpu_to_be32(XFS_SYMLINK_MAGIC)) {
2008 xfs_warn(mp, "Bad symlink block magic!");
2012 bp->b_ops = &xfs_symlink_buf_ops;
2020 * Do some primitive error checking on ondisk dquot data structures.
2024 struct xfs_mount *mp,
2025 xfs_disk_dquot_t *ddq,
2027 uint type, /* used only when IO_dorepair is true */
2031 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
2035 * We can encounter an uninitialized dquot buffer for 2 reasons:
2036 * 1. If we crash while deleting the quotainode(s), and those blks got
2037 * used for user data. This is because we take the path of regular
2038 * file deletion; however, the size field of quotainodes is never
2039 * updated, so all the tricks that we play in itruncate_finish
2040 * don't quite matter.
2042 * 2. We don't play the quota buffers when there's a quotaoff logitem.
2043 * But the allocation will be replayed so we'll end up with an
2044 * uninitialized quota block.
2046 * This is all fine; things are still consistent, and we haven't lost
2047 * any quota information. Just don't complain about bad dquot blks.
2049 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
2050 if (flags & XFS_QMOPT_DOWARN)
2052 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2053 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2056 if (ddq->d_version != XFS_DQUOT_VERSION) {
2057 if (flags & XFS_QMOPT_DOWARN)
2059 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2060 str, id, ddq->d_version, XFS_DQUOT_VERSION);
2064 if (ddq->d_flags != XFS_DQ_USER &&
2065 ddq->d_flags != XFS_DQ_PROJ &&
2066 ddq->d_flags != XFS_DQ_GROUP) {
2067 if (flags & XFS_QMOPT_DOWARN)
2069 "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2070 str, id, ddq->d_flags);
2074 if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2075 if (flags & XFS_QMOPT_DOWARN)
2077 "%s : ondisk-dquot 0x%p, ID mismatch: "
2078 "0x%x expected, found id 0x%x",
2079 str, ddq, id, be32_to_cpu(ddq->d_id));
2083 if (!errs && ddq->d_id) {
2084 if (ddq->d_blk_softlimit &&
2085 be64_to_cpu(ddq->d_bcount) >
2086 be64_to_cpu(ddq->d_blk_softlimit)) {
2087 if (!ddq->d_btimer) {
2088 if (flags & XFS_QMOPT_DOWARN)
2090 "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2091 str, (int)be32_to_cpu(ddq->d_id), ddq);
2095 if (ddq->d_ino_softlimit &&
2096 be64_to_cpu(ddq->d_icount) >
2097 be64_to_cpu(ddq->d_ino_softlimit)) {
2098 if (!ddq->d_itimer) {
2099 if (flags & XFS_QMOPT_DOWARN)
2101 "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2102 str, (int)be32_to_cpu(ddq->d_id), ddq);
2106 if (ddq->d_rtb_softlimit &&
2107 be64_to_cpu(ddq->d_rtbcount) >
2108 be64_to_cpu(ddq->d_rtb_softlimit)) {
2109 if (!ddq->d_rtbtimer) {
2110 if (flags & XFS_QMOPT_DOWARN)
2112 "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2113 str, (int)be32_to_cpu(ddq->d_id), ddq);
2119 if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2122 if (flags & XFS_QMOPT_DOWARN)
2123 xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2126 * Typically, a repair is only requested by quotacheck.
2129 ASSERT(flags & XFS_QMOPT_DQREPAIR);
2130 memset(d, 0, sizeof(xfs_dqblk_t));
2132 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2133 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2134 d->dd_diskdq.d_flags = type;
2135 d->dd_diskdq.d_id = cpu_to_be32(id);
2141 * Perform a dquot buffer recovery.
2142 * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2143 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2144 * Else, treat it as a regular buffer and do recovery.
2147 xlog_recover_do_dquot_buffer(
2148 struct xfs_mount *mp,
2150 struct xlog_recover_item *item,
2152 struct xfs_buf_log_format *buf_f)
2156 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2159 * Filesystems are required to send in quota flags at mount time.
2161 if (mp->m_qflags == 0) {
2166 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2167 type |= XFS_DQ_USER;
2168 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2169 type |= XFS_DQ_PROJ;
2170 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2171 type |= XFS_DQ_GROUP;
2173 * This type of quotas was turned off, so ignore this buffer
2175 if (log->l_quotaoffs_flag & type)
2178 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2182 * This routine replays a modification made to a buffer at runtime.
2183 * There are actually two types of buffer, regular and inode, which
2184 * are handled differently. Inode buffers are handled differently
2185 * in that we only recover a specific set of data from them, namely
2186 * the inode di_next_unlinked fields. This is because all other inode
2187 * data is actually logged via inode records and any data we replay
2188 * here which overlaps that may be stale.
2190 * When meta-data buffers are freed at run time we log a buffer item
2191 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2192 * of the buffer in the log should not be replayed at recovery time.
2193 * This is so that if the blocks covered by the buffer are reused for
2194 * file data before we crash we don't end up replaying old, freed
2195 * meta-data into a user's file.
2197 * To handle the cancellation of buffer log items, we make two passes
2198 * over the log during recovery. During the first we build a table of
2199 * those buffers which have been cancelled, and during the second we
2200 * only replay those buffers which do not have corresponding cancel
2201 * records in the table. See xlog_recover_do_buffer_pass[1,2] above
2202 * for more details on the implementation of the table of cancel records.
2205 xlog_recover_buffer_pass2(
2207 struct list_head *buffer_list,
2208 struct xlog_recover_item *item)
2210 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2211 xfs_mount_t *mp = log->l_mp;
2217 * In this pass we only want to recover all the buffers which have
2218 * not been cancelled and are not cancellation buffers themselves.
2220 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2221 buf_f->blf_len, buf_f->blf_flags)) {
2222 trace_xfs_log_recover_buf_cancel(log, buf_f);
2226 trace_xfs_log_recover_buf_recover(log, buf_f);
2229 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2230 buf_flags |= XBF_UNMAPPED;
2232 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2235 return XFS_ERROR(ENOMEM);
2236 error = bp->b_error;
2238 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2243 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2244 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2245 } else if (buf_f->blf_flags &
2246 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2247 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2249 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2252 return XFS_ERROR(error);
2255 * Perform delayed write on the buffer. Asynchronous writes will be
2256 * slower when taking into account all the buffers to be flushed.
2258 * Also make sure that only inode buffers with good sizes stay in
2259 * the buffer cache. The kernel moves inodes in buffers of 1 block
2260 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
2261 * buffers in the log can be a different size if the log was generated
2262 * by an older kernel using unclustered inode buffers or a newer kernel
2263 * running with a different inode cluster size. Regardless, if the
2264 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2265 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2266 * the buffer out of the buffer cache so that the buffer won't
2267 * overlap with future reads of those inodes.
2269 if (XFS_DINODE_MAGIC ==
2270 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2271 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2272 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2274 error = xfs_bwrite(bp);
2276 ASSERT(bp->b_target->bt_mount == mp);
2277 bp->b_iodone = xlog_recover_iodone;
2278 xfs_buf_delwri_queue(bp, buffer_list);
2286 xlog_recover_inode_pass2(
2288 struct list_head *buffer_list,
2289 struct xlog_recover_item *item)
2291 xfs_inode_log_format_t *in_f;
2292 xfs_mount_t *mp = log->l_mp;
2301 xfs_icdinode_t *dicp;
2305 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2306 in_f = item->ri_buf[0].i_addr;
2308 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2310 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2316 * Inode buffers can be freed, look out for it,
2317 * and do not replay the inode.
2319 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2320 in_f->ilf_len, 0)) {
2322 trace_xfs_log_recover_inode_cancel(log, in_f);
2325 trace_xfs_log_recover_inode_recover(log, in_f);
2327 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2328 &xfs_inode_buf_ops);
2333 error = bp->b_error;
2335 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2339 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2340 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2343 * Make sure the place we're flushing out to really looks
2346 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2349 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2350 __func__, dip, bp, in_f->ilf_ino);
2351 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2352 XFS_ERRLEVEL_LOW, mp);
2353 error = EFSCORRUPTED;
2356 dicp = item->ri_buf[1].i_addr;
2357 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2360 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2361 __func__, item, in_f->ilf_ino);
2362 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2363 XFS_ERRLEVEL_LOW, mp);
2364 error = EFSCORRUPTED;
2368 /* Skip replay when the on disk inode is newer than the log one */
2369 if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2371 * Deal with the wrap case, DI_MAX_FLUSH is less
2372 * than smaller numbers
2374 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2375 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2379 trace_xfs_log_recover_inode_skip(log, in_f);
2384 /* Take the opportunity to reset the flush iteration count */
2385 dicp->di_flushiter = 0;
2387 if (unlikely(S_ISREG(dicp->di_mode))) {
2388 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2389 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2390 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2391 XFS_ERRLEVEL_LOW, mp, dicp);
2394 "%s: Bad regular inode log record, rec ptr 0x%p, "
2395 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2396 __func__, item, dip, bp, in_f->ilf_ino);
2397 error = EFSCORRUPTED;
2400 } else if (unlikely(S_ISDIR(dicp->di_mode))) {
2401 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2402 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2403 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2404 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2405 XFS_ERRLEVEL_LOW, mp, dicp);
2408 "%s: Bad dir inode log record, rec ptr 0x%p, "
2409 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2410 __func__, item, dip, bp, in_f->ilf_ino);
2411 error = EFSCORRUPTED;
2415 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2416 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2417 XFS_ERRLEVEL_LOW, mp, dicp);
2420 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2421 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2422 __func__, item, dip, bp, in_f->ilf_ino,
2423 dicp->di_nextents + dicp->di_anextents,
2425 error = EFSCORRUPTED;
2428 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2429 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2430 XFS_ERRLEVEL_LOW, mp, dicp);
2433 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2434 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2435 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2436 error = EFSCORRUPTED;
2439 isize = xfs_icdinode_size(dicp->di_version);
2440 if (unlikely(item->ri_buf[1].i_len > isize)) {
2441 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2442 XFS_ERRLEVEL_LOW, mp, dicp);
2445 "%s: Bad inode log record length %d, rec ptr 0x%p",
2446 __func__, item->ri_buf[1].i_len, item);
2447 error = EFSCORRUPTED;
2451 /* The core is in in-core format */
2452 xfs_dinode_to_disk(dip, dicp);
2454 /* the rest is in on-disk format */
2455 if (item->ri_buf[1].i_len > isize) {
2456 memcpy((char *)dip + isize,
2457 item->ri_buf[1].i_addr + isize,
2458 item->ri_buf[1].i_len - isize);
2461 fields = in_f->ilf_fields;
2462 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2464 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2467 memcpy(XFS_DFORK_DPTR(dip),
2468 &in_f->ilf_u.ilfu_uuid,
2473 if (in_f->ilf_size == 2)
2474 goto write_inode_buffer;
2475 len = item->ri_buf[2].i_len;
2476 src = item->ri_buf[2].i_addr;
2477 ASSERT(in_f->ilf_size <= 4);
2478 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2479 ASSERT(!(fields & XFS_ILOG_DFORK) ||
2480 (len == in_f->ilf_dsize));
2482 switch (fields & XFS_ILOG_DFORK) {
2483 case XFS_ILOG_DDATA:
2485 memcpy(XFS_DFORK_DPTR(dip), src, len);
2488 case XFS_ILOG_DBROOT:
2489 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2490 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2491 XFS_DFORK_DSIZE(dip, mp));
2496 * There are no data fork flags set.
2498 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2503 * If we logged any attribute data, recover it. There may or
2504 * may not have been any other non-core data logged in this
2507 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2508 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2513 len = item->ri_buf[attr_index].i_len;
2514 src = item->ri_buf[attr_index].i_addr;
2515 ASSERT(len == in_f->ilf_asize);
2517 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2518 case XFS_ILOG_ADATA:
2520 dest = XFS_DFORK_APTR(dip);
2521 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2522 memcpy(dest, src, len);
2525 case XFS_ILOG_ABROOT:
2526 dest = XFS_DFORK_APTR(dip);
2527 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2528 len, (xfs_bmdr_block_t*)dest,
2529 XFS_DFORK_ASIZE(dip, mp));
2533 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2542 /* re-generate the checksum. */
2543 xfs_dinode_calc_crc(log->l_mp, dip);
2545 ASSERT(bp->b_target->bt_mount == mp);
2546 bp->b_iodone = xlog_recover_iodone;
2547 xfs_buf_delwri_queue(bp, buffer_list);
2552 return XFS_ERROR(error);
2556 * Recover QUOTAOFF records. We simply make a note of it in the xlog
2557 * structure, so that we know not to do any dquot item or dquot buffer recovery,
2561 xlog_recover_quotaoff_pass1(
2563 struct xlog_recover_item *item)
2565 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
2569 * The logitem format's flag tells us if this was user quotaoff,
2570 * group/project quotaoff or both.
2572 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2573 log->l_quotaoffs_flag |= XFS_DQ_USER;
2574 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2575 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2576 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2577 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2583 * Recover a dquot record
2586 xlog_recover_dquot_pass2(
2588 struct list_head *buffer_list,
2589 struct xlog_recover_item *item)
2591 xfs_mount_t *mp = log->l_mp;
2593 struct xfs_disk_dquot *ddq, *recddq;
2595 xfs_dq_logformat_t *dq_f;
2600 * Filesystems are required to send in quota flags at mount time.
2602 if (mp->m_qflags == 0)
2605 recddq = item->ri_buf[1].i_addr;
2606 if (recddq == NULL) {
2607 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2608 return XFS_ERROR(EIO);
2610 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2611 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2612 item->ri_buf[1].i_len, __func__);
2613 return XFS_ERROR(EIO);
2617 * This type of quotas was turned off, so ignore this record.
2619 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2621 if (log->l_quotaoffs_flag & type)
2625 * At this point we know that quota was _not_ turned off.
2626 * Since the mount flags are not indicating to us otherwise, this
2627 * must mean that quota is on, and the dquot needs to be replayed.
2628 * Remember that we may not have fully recovered the superblock yet,
2629 * so we can't do the usual trick of looking at the SB quota bits.
2631 * The other possibility, of course, is that the quota subsystem was
2632 * removed since the last mount - ENOSYS.
2634 dq_f = item->ri_buf[0].i_addr;
2636 error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2637 "xlog_recover_dquot_pass2 (log copy)");
2639 return XFS_ERROR(EIO);
2640 ASSERT(dq_f->qlf_len == 1);
2642 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
2643 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
2649 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2652 * At least the magic num portion should be on disk because this
2653 * was among a chunk of dquots created earlier, and we did some
2654 * minimal initialization then.
2656 error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2657 "xlog_recover_dquot_pass2");
2660 return XFS_ERROR(EIO);
2663 memcpy(ddq, recddq, item->ri_buf[1].i_len);
2665 ASSERT(dq_f->qlf_size == 2);
2666 ASSERT(bp->b_target->bt_mount == mp);
2667 bp->b_iodone = xlog_recover_iodone;
2668 xfs_buf_delwri_queue(bp, buffer_list);
2675 * This routine is called to create an in-core extent free intent
2676 * item from the efi format structure which was logged on disk.
2677 * It allocates an in-core efi, copies the extents from the format
2678 * structure into it, and adds the efi to the AIL with the given
2682 xlog_recover_efi_pass2(
2684 struct xlog_recover_item *item,
2688 xfs_mount_t *mp = log->l_mp;
2689 xfs_efi_log_item_t *efip;
2690 xfs_efi_log_format_t *efi_formatp;
2692 efi_formatp = item->ri_buf[0].i_addr;
2694 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2695 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2696 &(efip->efi_format)))) {
2697 xfs_efi_item_free(efip);
2700 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2702 spin_lock(&log->l_ailp->xa_lock);
2704 * xfs_trans_ail_update() drops the AIL lock.
2706 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2712 * This routine is called when an efd format structure is found in
2713 * a committed transaction in the log. It's purpose is to cancel
2714 * the corresponding efi if it was still in the log. To do this
2715 * it searches the AIL for the efi with an id equal to that in the
2716 * efd format structure. If we find it, we remove the efi from the
2720 xlog_recover_efd_pass2(
2722 struct xlog_recover_item *item)
2724 xfs_efd_log_format_t *efd_formatp;
2725 xfs_efi_log_item_t *efip = NULL;
2726 xfs_log_item_t *lip;
2728 struct xfs_ail_cursor cur;
2729 struct xfs_ail *ailp = log->l_ailp;
2731 efd_formatp = item->ri_buf[0].i_addr;
2732 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2733 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2734 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2735 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2736 efi_id = efd_formatp->efd_efi_id;
2739 * Search for the efi with the id in the efd format structure
2742 spin_lock(&ailp->xa_lock);
2743 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2744 while (lip != NULL) {
2745 if (lip->li_type == XFS_LI_EFI) {
2746 efip = (xfs_efi_log_item_t *)lip;
2747 if (efip->efi_format.efi_id == efi_id) {
2749 * xfs_trans_ail_delete() drops the
2752 xfs_trans_ail_delete(ailp, lip,
2753 SHUTDOWN_CORRUPT_INCORE);
2754 xfs_efi_item_free(efip);
2755 spin_lock(&ailp->xa_lock);
2759 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2761 xfs_trans_ail_cursor_done(ailp, &cur);
2762 spin_unlock(&ailp->xa_lock);
2768 * Free up any resources allocated by the transaction
2770 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2773 xlog_recover_free_trans(
2774 struct xlog_recover *trans)
2776 xlog_recover_item_t *item, *n;
2779 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2780 /* Free the regions in the item. */
2781 list_del(&item->ri_list);
2782 for (i = 0; i < item->ri_cnt; i++)
2783 kmem_free(item->ri_buf[i].i_addr);
2784 /* Free the item itself */
2785 kmem_free(item->ri_buf);
2788 /* Free the transaction recover structure */
2793 xlog_recover_commit_pass1(
2795 struct xlog_recover *trans,
2796 struct xlog_recover_item *item)
2798 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
2800 switch (ITEM_TYPE(item)) {
2802 return xlog_recover_buffer_pass1(log, item);
2803 case XFS_LI_QUOTAOFF:
2804 return xlog_recover_quotaoff_pass1(log, item);
2809 /* nothing to do in pass 1 */
2812 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2813 __func__, ITEM_TYPE(item));
2815 return XFS_ERROR(EIO);
2820 xlog_recover_commit_pass2(
2822 struct xlog_recover *trans,
2823 struct list_head *buffer_list,
2824 struct xlog_recover_item *item)
2826 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
2828 switch (ITEM_TYPE(item)) {
2830 return xlog_recover_buffer_pass2(log, buffer_list, item);
2832 return xlog_recover_inode_pass2(log, buffer_list, item);
2834 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
2836 return xlog_recover_efd_pass2(log, item);
2838 return xlog_recover_dquot_pass2(log, buffer_list, item);
2839 case XFS_LI_QUOTAOFF:
2840 /* nothing to do in pass2 */
2843 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2844 __func__, ITEM_TYPE(item));
2846 return XFS_ERROR(EIO);
2851 * Perform the transaction.
2853 * If the transaction modifies a buffer or inode, do it now. Otherwise,
2854 * EFIs and EFDs get queued up by adding entries into the AIL for them.
2857 xlog_recover_commit_trans(
2859 struct xlog_recover *trans,
2862 int error = 0, error2;
2863 xlog_recover_item_t *item;
2864 LIST_HEAD (buffer_list);
2866 hlist_del(&trans->r_list);
2868 error = xlog_recover_reorder_trans(log, trans, pass);
2872 list_for_each_entry(item, &trans->r_itemq, ri_list) {
2874 case XLOG_RECOVER_PASS1:
2875 error = xlog_recover_commit_pass1(log, trans, item);
2877 case XLOG_RECOVER_PASS2:
2878 error = xlog_recover_commit_pass2(log, trans,
2879 &buffer_list, item);
2889 xlog_recover_free_trans(trans);
2892 error2 = xfs_buf_delwri_submit(&buffer_list);
2893 return error ? error : error2;
2897 xlog_recover_unmount_trans(
2899 struct xlog_recover *trans)
2901 /* Do nothing now */
2902 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2907 * There are two valid states of the r_state field. 0 indicates that the
2908 * transaction structure is in a normal state. We have either seen the
2909 * start of the transaction or the last operation we added was not a partial
2910 * operation. If the last operation we added to the transaction was a
2911 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2913 * NOTE: skip LRs with 0 data length.
2916 xlog_recover_process_data(
2918 struct hlist_head rhash[],
2919 struct xlog_rec_header *rhead,
2925 xlog_op_header_t *ohead;
2926 xlog_recover_t *trans;
2932 lp = dp + be32_to_cpu(rhead->h_len);
2933 num_logops = be32_to_cpu(rhead->h_num_logops);
2935 /* check the log format matches our own - else we can't recover */
2936 if (xlog_header_check_recover(log->l_mp, rhead))
2937 return (XFS_ERROR(EIO));
2939 while ((dp < lp) && num_logops) {
2940 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2941 ohead = (xlog_op_header_t *)dp;
2942 dp += sizeof(xlog_op_header_t);
2943 if (ohead->oh_clientid != XFS_TRANSACTION &&
2944 ohead->oh_clientid != XFS_LOG) {
2945 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2946 __func__, ohead->oh_clientid);
2948 return (XFS_ERROR(EIO));
2950 tid = be32_to_cpu(ohead->oh_tid);
2951 hash = XLOG_RHASH(tid);
2952 trans = xlog_recover_find_tid(&rhash[hash], tid);
2953 if (trans == NULL) { /* not found; add new tid */
2954 if (ohead->oh_flags & XLOG_START_TRANS)
2955 xlog_recover_new_tid(&rhash[hash], tid,
2956 be64_to_cpu(rhead->h_lsn));
2958 if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2959 xfs_warn(log->l_mp, "%s: bad length 0x%x",
2960 __func__, be32_to_cpu(ohead->oh_len));
2962 return (XFS_ERROR(EIO));
2964 flags = ohead->oh_flags & ~XLOG_END_TRANS;
2965 if (flags & XLOG_WAS_CONT_TRANS)
2966 flags &= ~XLOG_CONTINUE_TRANS;
2968 case XLOG_COMMIT_TRANS:
2969 error = xlog_recover_commit_trans(log,
2972 case XLOG_UNMOUNT_TRANS:
2973 error = xlog_recover_unmount_trans(log, trans);
2975 case XLOG_WAS_CONT_TRANS:
2976 error = xlog_recover_add_to_cont_trans(log,
2978 be32_to_cpu(ohead->oh_len));
2980 case XLOG_START_TRANS:
2981 xfs_warn(log->l_mp, "%s: bad transaction",
2984 error = XFS_ERROR(EIO);
2987 case XLOG_CONTINUE_TRANS:
2988 error = xlog_recover_add_to_trans(log, trans,
2989 dp, be32_to_cpu(ohead->oh_len));
2992 xfs_warn(log->l_mp, "%s: bad flag 0x%x",
2995 error = XFS_ERROR(EIO);
3001 dp += be32_to_cpu(ohead->oh_len);
3008 * Process an extent free intent item that was recovered from
3009 * the log. We need to free the extents that it describes.
3012 xlog_recover_process_efi(
3014 xfs_efi_log_item_t *efip)
3016 xfs_efd_log_item_t *efdp;
3021 xfs_fsblock_t startblock_fsb;
3023 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3026 * First check the validity of the extents described by the
3027 * EFI. If any are bad, then assume that all are bad and
3028 * just toss the EFI.
3030 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3031 extp = &(efip->efi_format.efi_extents[i]);
3032 startblock_fsb = XFS_BB_TO_FSB(mp,
3033 XFS_FSB_TO_DADDR(mp, extp->ext_start));
3034 if ((startblock_fsb == 0) ||
3035 (extp->ext_len == 0) ||
3036 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3037 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3039 * This will pull the EFI from the AIL and
3040 * free the memory associated with it.
3042 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3043 xfs_efi_release(efip, efip->efi_format.efi_nextents);
3044 return XFS_ERROR(EIO);
3048 tp = xfs_trans_alloc(mp, 0);
3049 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3052 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3054 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3055 extp = &(efip->efi_format.efi_extents[i]);
3056 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3059 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3063 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3064 error = xfs_trans_commit(tp, 0);
3068 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3073 * When this is called, all of the EFIs which did not have
3074 * corresponding EFDs should be in the AIL. What we do now
3075 * is free the extents associated with each one.
3077 * Since we process the EFIs in normal transactions, they
3078 * will be removed at some point after the commit. This prevents
3079 * us from just walking down the list processing each one.
3080 * We'll use a flag in the EFI to skip those that we've already
3081 * processed and use the AIL iteration mechanism's generation
3082 * count to try to speed this up at least a bit.
3084 * When we start, we know that the EFIs are the only things in
3085 * the AIL. As we process them, however, other items are added
3086 * to the AIL. Since everything added to the AIL must come after
3087 * everything already in the AIL, we stop processing as soon as
3088 * we see something other than an EFI in the AIL.
3091 xlog_recover_process_efis(
3094 xfs_log_item_t *lip;
3095 xfs_efi_log_item_t *efip;
3097 struct xfs_ail_cursor cur;
3098 struct xfs_ail *ailp;
3101 spin_lock(&ailp->xa_lock);
3102 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3103 while (lip != NULL) {
3105 * We're done when we see something other than an EFI.
3106 * There should be no EFIs left in the AIL now.
3108 if (lip->li_type != XFS_LI_EFI) {
3110 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3111 ASSERT(lip->li_type != XFS_LI_EFI);
3117 * Skip EFIs that we've already processed.
3119 efip = (xfs_efi_log_item_t *)lip;
3120 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3121 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3125 spin_unlock(&ailp->xa_lock);
3126 error = xlog_recover_process_efi(log->l_mp, efip);
3127 spin_lock(&ailp->xa_lock);
3130 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3133 xfs_trans_ail_cursor_done(ailp, &cur);
3134 spin_unlock(&ailp->xa_lock);
3139 * This routine performs a transaction to null out a bad inode pointer
3140 * in an agi unlinked inode hash bucket.
3143 xlog_recover_clear_agi_bucket(
3145 xfs_agnumber_t agno,
3154 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3155 error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3160 error = xfs_read_agi(mp, tp, agno, &agibp);
3164 agi = XFS_BUF_TO_AGI(agibp);
3165 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3166 offset = offsetof(xfs_agi_t, agi_unlinked) +
3167 (sizeof(xfs_agino_t) * bucket);
3168 xfs_trans_log_buf(tp, agibp, offset,
3169 (offset + sizeof(xfs_agino_t) - 1));
3171 error = xfs_trans_commit(tp, 0);
3177 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3179 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3184 xlog_recover_process_one_iunlink(
3185 struct xfs_mount *mp,
3186 xfs_agnumber_t agno,
3190 struct xfs_buf *ibp;
3191 struct xfs_dinode *dip;
3192 struct xfs_inode *ip;
3196 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3197 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3202 * Get the on disk inode to find the next inode in the bucket.
3204 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3208 ASSERT(ip->i_d.di_nlink == 0);
3209 ASSERT(ip->i_d.di_mode != 0);
3211 /* setup for the next pass */
3212 agino = be32_to_cpu(dip->di_next_unlinked);
3216 * Prevent any DMAPI event from being sent when the reference on
3217 * the inode is dropped.
3219 ip->i_d.di_dmevmask = 0;
3228 * We can't read in the inode this bucket points to, or this inode
3229 * is messed up. Just ditch this bucket of inodes. We will lose
3230 * some inodes and space, but at least we won't hang.
3232 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3233 * clear the inode pointer in the bucket.
3235 xlog_recover_clear_agi_bucket(mp, agno, bucket);
3240 * xlog_iunlink_recover
3242 * This is called during recovery to process any inodes which
3243 * we unlinked but not freed when the system crashed. These
3244 * inodes will be on the lists in the AGI blocks. What we do
3245 * here is scan all the AGIs and fully truncate and free any
3246 * inodes found on the lists. Each inode is removed from the
3247 * lists when it has been fully truncated and is freed. The
3248 * freeing of the inode and its removal from the list must be
3252 xlog_recover_process_iunlinks(
3256 xfs_agnumber_t agno;
3267 * Prevent any DMAPI event from being sent while in this function.
3269 mp_dmevmask = mp->m_dmevmask;
3272 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3274 * Find the agi for this ag.
3276 error = xfs_read_agi(mp, NULL, agno, &agibp);
3279 * AGI is b0rked. Don't process it.
3281 * We should probably mark the filesystem as corrupt
3282 * after we've recovered all the ag's we can....
3287 * Unlock the buffer so that it can be acquired in the normal
3288 * course of the transaction to truncate and free each inode.
3289 * Because we are not racing with anyone else here for the AGI
3290 * buffer, we don't even need to hold it locked to read the
3291 * initial unlinked bucket entries out of the buffer. We keep
3292 * buffer reference though, so that it stays pinned in memory
3293 * while we need the buffer.
3295 agi = XFS_BUF_TO_AGI(agibp);
3296 xfs_buf_unlock(agibp);
3298 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3299 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3300 while (agino != NULLAGINO) {
3301 agino = xlog_recover_process_one_iunlink(mp,
3302 agno, agino, bucket);
3305 xfs_buf_rele(agibp);
3308 mp->m_dmevmask = mp_dmevmask;
3312 * Upack the log buffer data and crc check it. If the check fails, issue a
3313 * warning if and only if the CRC in the header is non-zero. This makes the
3314 * check an advisory warning, and the zero CRC check will prevent failure
3315 * warnings from being emitted when upgrading the kernel from one that does not
3316 * add CRCs by default.
3318 * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
3319 * corruption failure
3322 xlog_unpack_data_crc(
3323 struct xlog_rec_header *rhead,
3329 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
3330 if (crc != rhead->h_crc) {
3331 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
3332 xfs_alert(log->l_mp,
3333 "log record CRC mismatch: found 0x%x, expected 0x%x.\n",
3334 le32_to_cpu(rhead->h_crc),
3336 xfs_hex_dump(dp, 32);
3340 * If we've detected a log record corruption, then we can't
3341 * recover past this point. Abort recovery if we are enforcing
3342 * CRC protection by punting an error back up the stack.
3344 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
3345 return EFSCORRUPTED;
3353 struct xlog_rec_header *rhead,
3360 error = xlog_unpack_data_crc(rhead, dp, log);
3364 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3365 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3366 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3370 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3371 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3372 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3373 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3374 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3375 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3384 xlog_valid_rec_header(
3386 struct xlog_rec_header *rhead,
3391 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
3392 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3393 XFS_ERRLEVEL_LOW, log->l_mp);
3394 return XFS_ERROR(EFSCORRUPTED);
3397 (!rhead->h_version ||
3398 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3399 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
3400 __func__, be32_to_cpu(rhead->h_version));
3401 return XFS_ERROR(EIO);
3404 /* LR body must have data or it wouldn't have been written */
3405 hlen = be32_to_cpu(rhead->h_len);
3406 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3407 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3408 XFS_ERRLEVEL_LOW, log->l_mp);
3409 return XFS_ERROR(EFSCORRUPTED);
3411 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3412 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3413 XFS_ERRLEVEL_LOW, log->l_mp);
3414 return XFS_ERROR(EFSCORRUPTED);
3420 * Read the log from tail to head and process the log records found.
3421 * Handle the two cases where the tail and head are in the same cycle
3422 * and where the active portion of the log wraps around the end of
3423 * the physical log separately. The pass parameter is passed through
3424 * to the routines called to process the data and is not looked at
3428 xlog_do_recovery_pass(
3430 xfs_daddr_t head_blk,
3431 xfs_daddr_t tail_blk,
3434 xlog_rec_header_t *rhead;
3437 xfs_buf_t *hbp, *dbp;
3438 int error = 0, h_size;
3439 int bblks, split_bblks;
3440 int hblks, split_hblks, wrapped_hblks;
3441 struct hlist_head rhash[XLOG_RHASH_SIZE];
3443 ASSERT(head_blk != tail_blk);
3446 * Read the header of the tail block and get the iclog buffer size from
3447 * h_size. Use this to tell how many sectors make up the log header.
3449 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3451 * When using variable length iclogs, read first sector of
3452 * iclog header and extract the header size from it. Get a
3453 * new hbp that is the correct size.
3455 hbp = xlog_get_bp(log, 1);
3459 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3463 rhead = (xlog_rec_header_t *)offset;
3464 error = xlog_valid_rec_header(log, rhead, tail_blk);
3467 h_size = be32_to_cpu(rhead->h_size);
3468 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3469 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3470 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3471 if (h_size % XLOG_HEADER_CYCLE_SIZE)
3474 hbp = xlog_get_bp(log, hblks);
3479 ASSERT(log->l_sectBBsize == 1);
3481 hbp = xlog_get_bp(log, 1);
3482 h_size = XLOG_BIG_RECORD_BSIZE;
3487 dbp = xlog_get_bp(log, BTOBB(h_size));
3493 memset(rhash, 0, sizeof(rhash));
3494 if (tail_blk <= head_blk) {
3495 for (blk_no = tail_blk; blk_no < head_blk; ) {
3496 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3500 rhead = (xlog_rec_header_t *)offset;
3501 error = xlog_valid_rec_header(log, rhead, blk_no);
3505 /* blocks in data section */
3506 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3507 error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3512 error = xlog_unpack_data(rhead, offset, log);
3516 error = xlog_recover_process_data(log,
3517 rhash, rhead, offset, pass);
3520 blk_no += bblks + hblks;
3524 * Perform recovery around the end of the physical log.
3525 * When the head is not on the same cycle number as the tail,
3526 * we can't do a sequential recovery as above.
3529 while (blk_no < log->l_logBBsize) {
3531 * Check for header wrapping around physical end-of-log
3533 offset = hbp->b_addr;
3536 if (blk_no + hblks <= log->l_logBBsize) {
3537 /* Read header in one read */
3538 error = xlog_bread(log, blk_no, hblks, hbp,
3543 /* This LR is split across physical log end */
3544 if (blk_no != log->l_logBBsize) {
3545 /* some data before physical log end */
3546 ASSERT(blk_no <= INT_MAX);
3547 split_hblks = log->l_logBBsize - (int)blk_no;
3548 ASSERT(split_hblks > 0);
3549 error = xlog_bread(log, blk_no,
3557 * Note: this black magic still works with
3558 * large sector sizes (non-512) only because:
3559 * - we increased the buffer size originally
3560 * by 1 sector giving us enough extra space
3561 * for the second read;
3562 * - the log start is guaranteed to be sector
3564 * - we read the log end (LR header start)
3565 * _first_, then the log start (LR header end)
3566 * - order is important.
3568 wrapped_hblks = hblks - split_hblks;
3569 error = xlog_bread_offset(log, 0,
3571 offset + BBTOB(split_hblks));
3575 rhead = (xlog_rec_header_t *)offset;
3576 error = xlog_valid_rec_header(log, rhead,
3577 split_hblks ? blk_no : 0);
3581 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3584 /* Read in data for log record */
3585 if (blk_no + bblks <= log->l_logBBsize) {
3586 error = xlog_bread(log, blk_no, bblks, dbp,
3591 /* This log record is split across the
3592 * physical end of log */
3593 offset = dbp->b_addr;
3595 if (blk_no != log->l_logBBsize) {
3596 /* some data is before the physical
3598 ASSERT(!wrapped_hblks);
3599 ASSERT(blk_no <= INT_MAX);
3601 log->l_logBBsize - (int)blk_no;
3602 ASSERT(split_bblks > 0);
3603 error = xlog_bread(log, blk_no,
3611 * Note: this black magic still works with
3612 * large sector sizes (non-512) only because:
3613 * - we increased the buffer size originally
3614 * by 1 sector giving us enough extra space
3615 * for the second read;
3616 * - the log start is guaranteed to be sector
3618 * - we read the log end (LR header start)
3619 * _first_, then the log start (LR header end)
3620 * - order is important.
3622 error = xlog_bread_offset(log, 0,
3623 bblks - split_bblks, dbp,
3624 offset + BBTOB(split_bblks));
3629 error = xlog_unpack_data(rhead, offset, log);
3633 error = xlog_recover_process_data(log, rhash,
3634 rhead, offset, pass);
3640 ASSERT(blk_no >= log->l_logBBsize);
3641 blk_no -= log->l_logBBsize;
3643 /* read first part of physical log */
3644 while (blk_no < head_blk) {
3645 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3649 rhead = (xlog_rec_header_t *)offset;
3650 error = xlog_valid_rec_header(log, rhead, blk_no);
3654 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3655 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3660 error = xlog_unpack_data(rhead, offset, log);
3664 error = xlog_recover_process_data(log, rhash,
3665 rhead, offset, pass);
3668 blk_no += bblks + hblks;
3680 * Do the recovery of the log. We actually do this in two phases.
3681 * The two passes are necessary in order to implement the function
3682 * of cancelling a record written into the log. The first pass
3683 * determines those things which have been cancelled, and the
3684 * second pass replays log items normally except for those which
3685 * have been cancelled. The handling of the replay and cancellations
3686 * takes place in the log item type specific routines.
3688 * The table of items which have cancel records in the log is allocated
3689 * and freed at this level, since only here do we know when all of
3690 * the log recovery has been completed.
3693 xlog_do_log_recovery(
3695 xfs_daddr_t head_blk,
3696 xfs_daddr_t tail_blk)
3700 ASSERT(head_blk != tail_blk);
3703 * First do a pass to find all of the cancelled buf log items.
3704 * Store them in the buf_cancel_table for use in the second pass.
3706 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3707 sizeof(struct list_head),
3709 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3710 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3712 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3713 XLOG_RECOVER_PASS1);
3715 kmem_free(log->l_buf_cancel_table);
3716 log->l_buf_cancel_table = NULL;
3720 * Then do a second pass to actually recover the items in the log.
3721 * When it is complete free the table of buf cancel items.
3723 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3724 XLOG_RECOVER_PASS2);
3729 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3730 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3734 kmem_free(log->l_buf_cancel_table);
3735 log->l_buf_cancel_table = NULL;
3741 * Do the actual recovery
3746 xfs_daddr_t head_blk,
3747 xfs_daddr_t tail_blk)
3754 * First replay the images in the log.
3756 error = xlog_do_log_recovery(log, head_blk, tail_blk);
3761 * If IO errors happened during recovery, bail out.
3763 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3768 * We now update the tail_lsn since much of the recovery has completed
3769 * and there may be space available to use. If there were no extent
3770 * or iunlinks, we can free up the entire log and set the tail_lsn to
3771 * be the last_sync_lsn. This was set in xlog_find_tail to be the
3772 * lsn of the last known good LR on disk. If there are extent frees
3773 * or iunlinks they will have some entries in the AIL; so we look at
3774 * the AIL to determine how to set the tail_lsn.
3776 xlog_assign_tail_lsn(log->l_mp);
3779 * Now that we've finished replaying all buffer and inode
3780 * updates, re-read in the superblock and reverify it.
3782 bp = xfs_getsb(log->l_mp, 0);
3784 ASSERT(!(XFS_BUF_ISWRITE(bp)));
3786 XFS_BUF_UNASYNC(bp);
3787 bp->b_ops = &xfs_sb_buf_ops;
3788 xfsbdstrat(log->l_mp, bp);
3789 error = xfs_buf_iowait(bp);
3791 xfs_buf_ioerror_alert(bp, __func__);
3797 /* Convert superblock from on-disk format */
3798 sbp = &log->l_mp->m_sb;
3799 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
3800 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3801 ASSERT(xfs_sb_good_version(sbp));
3804 /* We've re-read the superblock so re-initialize per-cpu counters */
3805 xfs_icsb_reinit_counters(log->l_mp);
3807 xlog_recover_check_summary(log);
3809 /* Normal transactions can now occur */
3810 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3815 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3817 * Return error or zero.
3823 xfs_daddr_t head_blk, tail_blk;
3826 /* find the tail of the log */
3827 if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3830 if (tail_blk != head_blk) {
3831 /* There used to be a comment here:
3833 * disallow recovery on read-only mounts. note -- mount
3834 * checks for ENOSPC and turns it into an intelligent
3836 * ...but this is no longer true. Now, unless you specify
3837 * NORECOVERY (in which case this function would never be
3838 * called), we just go ahead and recover. We do this all
3839 * under the vfs layer, so we can get away with it unless
3840 * the device itself is read-only, in which case we fail.
3842 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3846 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3847 log->l_mp->m_logname ? log->l_mp->m_logname
3850 error = xlog_do_recover(log, head_blk, tail_blk);
3851 log->l_flags |= XLOG_RECOVERY_NEEDED;
3857 * In the first part of recovery we replay inodes and buffers and build
3858 * up the list of extent free items which need to be processed. Here
3859 * we process the extent free items and clean up the on disk unlinked
3860 * inode lists. This is separated from the first part of recovery so
3861 * that the root and real-time bitmap inodes can be read in from disk in
3862 * between the two stages. This is necessary so that we can free space
3863 * in the real-time portion of the file system.
3866 xlog_recover_finish(
3870 * Now we're ready to do the transactions needed for the
3871 * rest of recovery. Start with completing all the extent
3872 * free intent records and then process the unlinked inode
3873 * lists. At this point, we essentially run in normal mode
3874 * except that we're still performing recovery actions
3875 * rather than accepting new requests.
3877 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3879 error = xlog_recover_process_efis(log);
3881 xfs_alert(log->l_mp, "Failed to recover EFIs");
3885 * Sync the log to get all the EFIs out of the AIL.
3886 * This isn't absolutely necessary, but it helps in
3887 * case the unlink transactions would have problems
3888 * pushing the EFIs out of the way.
3890 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3892 xlog_recover_process_iunlinks(log);
3894 xlog_recover_check_summary(log);
3896 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3897 log->l_mp->m_logname ? log->l_mp->m_logname
3899 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3901 xfs_info(log->l_mp, "Ending clean mount");
3909 * Read all of the agf and agi counters and check that they
3910 * are consistent with the superblock counters.
3913 xlog_recover_check_summary(
3920 xfs_agnumber_t agno;
3921 __uint64_t freeblks;
3931 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3932 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3934 xfs_alert(mp, "%s agf read failed agno %d error %d",
3935 __func__, agno, error);
3937 agfp = XFS_BUF_TO_AGF(agfbp);
3938 freeblks += be32_to_cpu(agfp->agf_freeblks) +
3939 be32_to_cpu(agfp->agf_flcount);
3940 xfs_buf_relse(agfbp);
3943 error = xfs_read_agi(mp, NULL, agno, &agibp);
3945 xfs_alert(mp, "%s agi read failed agno %d error %d",
3946 __func__, agno, error);
3948 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
3950 itotal += be32_to_cpu(agi->agi_count);
3951 ifree += be32_to_cpu(agi->agi_freecount);
3952 xfs_buf_relse(agibp);