Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Tim Shimmin | 87c199c | 2006-06-09 14:56:16 +1000 | [diff] [blame] | 2 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 19 | #include "xfs_fs.h" |
Dave Chinner | 70a9883c | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 20 | #include "xfs_shared.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 21 | #include "xfs_format.h" |
| 22 | #include "xfs_log_format.h" |
| 23 | #include "xfs_trans_resv.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 24 | #include "xfs_bit.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 25 | #include "xfs_sb.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include "xfs_mount.h" |
Dave Chinner | 5706278 | 2013-10-15 09:17:51 +1100 | [diff] [blame] | 27 | #include "xfs_da_format.h" |
Dave Chinner | 9a2cc41 | 2014-12-04 09:43:17 +1100 | [diff] [blame] | 28 | #include "xfs_da_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include "xfs_inode.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 30 | #include "xfs_trans.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 31 | #include "xfs_log.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include "xfs_log_priv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include "xfs_log_recover.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 34 | #include "xfs_inode_item.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include "xfs_extfree_item.h" |
| 36 | #include "xfs_trans_priv.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 37 | #include "xfs_alloc.h" |
| 38 | #include "xfs_ialloc.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include "xfs_quota.h" |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 40 | #include "xfs_cksum.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 41 | #include "xfs_trace.h" |
Dave Chinner | 33479e0 | 2012-10-08 21:56:11 +1100 | [diff] [blame] | 42 | #include "xfs_icache.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 43 | #include "xfs_bmap_btree.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 44 | #include "xfs_error.h" |
Dave Chinner | 2b9ab5a | 2013-08-12 20:49:37 +1000 | [diff] [blame] | 45 | #include "xfs_dir2.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
Dave Chinner | fc06c6d | 2013-08-12 20:49:22 +1000 | [diff] [blame] | 47 | #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) |
| 48 | |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 49 | STATIC int |
| 50 | xlog_find_zeroed( |
| 51 | struct xlog *, |
| 52 | xfs_daddr_t *); |
| 53 | STATIC int |
| 54 | xlog_clear_stale_blocks( |
| 55 | struct xlog *, |
| 56 | xfs_lsn_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | #if defined(DEBUG) |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 58 | STATIC void |
| 59 | xlog_recover_check_summary( |
| 60 | struct xlog *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | #else |
| 62 | #define xlog_recover_check_summary(log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | #endif |
| 64 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | /* |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 66 | * This structure is used during recovery to record the buf log items which |
| 67 | * have been canceled and should not be replayed. |
| 68 | */ |
| 69 | struct xfs_buf_cancel { |
| 70 | xfs_daddr_t bc_blkno; |
| 71 | uint bc_len; |
| 72 | int bc_refcount; |
| 73 | struct list_head bc_list; |
| 74 | }; |
| 75 | |
| 76 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | * Sector aligned buffer routines for buffer create/read/write/access |
| 78 | */ |
| 79 | |
Alex Elder | ff30a62 | 2010-04-13 15:22:58 +1000 | [diff] [blame] | 80 | /* |
| 81 | * Verify the given count of basic blocks is valid number of blocks |
| 82 | * to specify for an operation involving the given XFS log buffer. |
| 83 | * Returns nonzero if the count is valid, 0 otherwise. |
| 84 | */ |
| 85 | |
| 86 | static inline int |
| 87 | xlog_buf_bbcount_valid( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 88 | struct xlog *log, |
Alex Elder | ff30a62 | 2010-04-13 15:22:58 +1000 | [diff] [blame] | 89 | int bbcount) |
| 90 | { |
| 91 | return bbcount > 0 && bbcount <= log->l_logBBsize; |
| 92 | } |
| 93 | |
Alex Elder | 36adecf | 2010-04-13 15:21:13 +1000 | [diff] [blame] | 94 | /* |
| 95 | * Allocate a buffer to hold log data. The buffer needs to be able |
| 96 | * to map to a range of nbblks basic blocks at any valid (basic |
| 97 | * block) offset within the log. |
| 98 | */ |
Eric Sandeen | 5d77c0d | 2009-11-19 15:52:00 +0000 | [diff] [blame] | 99 | STATIC xfs_buf_t * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | xlog_get_bp( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 101 | struct xlog *log, |
Dave Chinner | 3228149 | 2009-01-22 15:37:47 +1100 | [diff] [blame] | 102 | int nbblks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | { |
Christoph Hellwig | c8da0fa | 2011-07-08 14:36:25 +0200 | [diff] [blame] | 104 | struct xfs_buf *bp; |
| 105 | |
Alex Elder | ff30a62 | 2010-04-13 15:22:58 +1000 | [diff] [blame] | 106 | if (!xlog_buf_bbcount_valid(log, nbblks)) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 107 | xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", |
Alex Elder | ff30a62 | 2010-04-13 15:22:58 +1000 | [diff] [blame] | 108 | nbblks); |
| 109 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); |
Dave Chinner | 3228149 | 2009-01-22 15:37:47 +1100 | [diff] [blame] | 110 | return NULL; |
| 111 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
Alex Elder | 36adecf | 2010-04-13 15:21:13 +1000 | [diff] [blame] | 113 | /* |
| 114 | * We do log I/O in units of log sectors (a power-of-2 |
| 115 | * multiple of the basic block size), so we round up the |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 116 | * requested size to accommodate the basic blocks required |
Alex Elder | 36adecf | 2010-04-13 15:21:13 +1000 | [diff] [blame] | 117 | * for complete log sectors. |
| 118 | * |
| 119 | * In addition, the buffer may be used for a non-sector- |
| 120 | * aligned block offset, in which case an I/O of the |
| 121 | * requested size could extend beyond the end of the |
| 122 | * buffer. If the requested size is only 1 basic block it |
| 123 | * will never straddle a sector boundary, so this won't be |
| 124 | * an issue. Nor will this be a problem if the log I/O is |
| 125 | * done in basic blocks (sector size 1). But otherwise we |
| 126 | * extend the buffer by one extra log sector to ensure |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 127 | * there's space to accommodate this possibility. |
Alex Elder | 36adecf | 2010-04-13 15:21:13 +1000 | [diff] [blame] | 128 | */ |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 129 | if (nbblks > 1 && log->l_sectBBsize > 1) |
| 130 | nbblks += log->l_sectBBsize; |
| 131 | nbblks = round_up(nbblks, log->l_sectBBsize); |
Alex Elder | 36adecf | 2010-04-13 15:21:13 +1000 | [diff] [blame] | 132 | |
Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 133 | bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0); |
Christoph Hellwig | c8da0fa | 2011-07-08 14:36:25 +0200 | [diff] [blame] | 134 | if (bp) |
| 135 | xfs_buf_unlock(bp); |
| 136 | return bp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | } |
| 138 | |
Eric Sandeen | 5d77c0d | 2009-11-19 15:52:00 +0000 | [diff] [blame] | 139 | STATIC void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | xlog_put_bp( |
| 141 | xfs_buf_t *bp) |
| 142 | { |
| 143 | xfs_buf_free(bp); |
| 144 | } |
| 145 | |
Alex Elder | 48389ef | 2010-04-20 17:10:21 +1000 | [diff] [blame] | 146 | /* |
| 147 | * Return the address of the start of the given block number's data |
| 148 | * in a log buffer. The buffer covers a log sector-aligned region. |
| 149 | */ |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 150 | STATIC char * |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 151 | xlog_align( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 152 | struct xlog *log, |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 153 | xfs_daddr_t blk_no, |
| 154 | int nbblks, |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 155 | struct xfs_buf *bp) |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 156 | { |
Christoph Hellwig | fdc07f4 | 2010-05-10 17:28:14 +0000 | [diff] [blame] | 157 | xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1); |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 158 | |
Dave Chinner | 4e94b71 | 2012-04-23 15:58:51 +1000 | [diff] [blame] | 159 | ASSERT(offset + nbblks <= bp->b_length); |
Chandra Seetharaman | 6292604 | 2011-07-22 23:40:15 +0000 | [diff] [blame] | 160 | return bp->b_addr + BBTOB(offset); |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 161 | } |
| 162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | |
| 164 | /* |
| 165 | * nbblks should be uint, but oh well. Just want to catch that 32-bit length. |
| 166 | */ |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 167 | STATIC int |
| 168 | xlog_bread_noalign( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 169 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | xfs_daddr_t blk_no, |
| 171 | int nbblks, |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 172 | struct xfs_buf *bp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | { |
| 174 | int error; |
| 175 | |
Alex Elder | ff30a62 | 2010-04-13 15:22:58 +1000 | [diff] [blame] | 176 | if (!xlog_buf_bbcount_valid(log, nbblks)) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 177 | xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", |
Alex Elder | ff30a62 | 2010-04-13 15:22:58 +1000 | [diff] [blame] | 178 | nbblks); |
| 179 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 180 | return -EFSCORRUPTED; |
Dave Chinner | 3228149 | 2009-01-22 15:37:47 +1100 | [diff] [blame] | 181 | } |
| 182 | |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 183 | blk_no = round_down(blk_no, log->l_sectBBsize); |
| 184 | nbblks = round_up(nbblks, log->l_sectBBsize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | |
| 186 | ASSERT(nbblks > 0); |
Dave Chinner | 4e94b71 | 2012-04-23 15:58:51 +1000 | [diff] [blame] | 187 | ASSERT(nbblks <= bp->b_length); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
| 189 | XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); |
| 190 | XFS_BUF_READ(bp); |
Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 191 | bp->b_io_length = nbblks; |
Dave Chinner | 0e95f19 | 2012-04-23 15:58:46 +1000 | [diff] [blame] | 192 | bp->b_error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | |
Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 194 | error = xfs_buf_submit_wait(bp); |
| 195 | if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) |
Christoph Hellwig | 901796a | 2011-10-10 16:52:49 +0000 | [diff] [blame] | 196 | xfs_buf_ioerror_alert(bp, __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | return error; |
| 198 | } |
| 199 | |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 200 | STATIC int |
| 201 | xlog_bread( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 202 | struct xlog *log, |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 203 | xfs_daddr_t blk_no, |
| 204 | int nbblks, |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 205 | struct xfs_buf *bp, |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 206 | char **offset) |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 207 | { |
| 208 | int error; |
| 209 | |
| 210 | error = xlog_bread_noalign(log, blk_no, nbblks, bp); |
| 211 | if (error) |
| 212 | return error; |
| 213 | |
| 214 | *offset = xlog_align(log, blk_no, nbblks, bp); |
| 215 | return 0; |
| 216 | } |
| 217 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | /* |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 219 | * Read at an offset into the buffer. Returns with the buffer in it's original |
| 220 | * state regardless of the result of the read. |
| 221 | */ |
| 222 | STATIC int |
| 223 | xlog_bread_offset( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 224 | struct xlog *log, |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 225 | xfs_daddr_t blk_no, /* block to read from */ |
| 226 | int nbblks, /* blocks to read */ |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 227 | struct xfs_buf *bp, |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 228 | char *offset) |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 229 | { |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 230 | char *orig_offset = bp->b_addr; |
Dave Chinner | 4e94b71 | 2012-04-23 15:58:51 +1000 | [diff] [blame] | 231 | int orig_len = BBTOB(bp->b_length); |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 232 | int error, error2; |
| 233 | |
Chandra Seetharaman | 02fe03d | 2011-07-22 23:40:22 +0000 | [diff] [blame] | 234 | error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks)); |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 235 | if (error) |
| 236 | return error; |
| 237 | |
| 238 | error = xlog_bread_noalign(log, blk_no, nbblks, bp); |
| 239 | |
| 240 | /* must reset buffer pointer even on error */ |
Chandra Seetharaman | 02fe03d | 2011-07-22 23:40:22 +0000 | [diff] [blame] | 241 | error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len); |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 242 | if (error) |
| 243 | return error; |
| 244 | return error2; |
| 245 | } |
| 246 | |
| 247 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | * Write out the buffer at the given block for the given number of blocks. |
| 249 | * The buffer is kept locked across the write and is returned locked. |
| 250 | * This can only be used for synchronous log writes. |
| 251 | */ |
Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 252 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | xlog_bwrite( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 254 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | xfs_daddr_t blk_no, |
| 256 | int nbblks, |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 257 | struct xfs_buf *bp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | { |
| 259 | int error; |
| 260 | |
Alex Elder | ff30a62 | 2010-04-13 15:22:58 +1000 | [diff] [blame] | 261 | if (!xlog_buf_bbcount_valid(log, nbblks)) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 262 | xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", |
Alex Elder | ff30a62 | 2010-04-13 15:22:58 +1000 | [diff] [blame] | 263 | nbblks); |
| 264 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 265 | return -EFSCORRUPTED; |
Dave Chinner | 3228149 | 2009-01-22 15:37:47 +1100 | [diff] [blame] | 266 | } |
| 267 | |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 268 | blk_no = round_down(blk_no, log->l_sectBBsize); |
| 269 | nbblks = round_up(nbblks, log->l_sectBBsize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | |
| 271 | ASSERT(nbblks > 0); |
Dave Chinner | 4e94b71 | 2012-04-23 15:58:51 +1000 | [diff] [blame] | 272 | ASSERT(nbblks <= bp->b_length); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | |
| 274 | XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); |
| 275 | XFS_BUF_ZEROFLAGS(bp); |
Chandra Seetharaman | 72790aa | 2011-07-22 23:40:04 +0000 | [diff] [blame] | 276 | xfs_buf_hold(bp); |
Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 277 | xfs_buf_lock(bp); |
Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 278 | bp->b_io_length = nbblks; |
Dave Chinner | 0e95f19 | 2012-04-23 15:58:46 +1000 | [diff] [blame] | 279 | bp->b_error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | |
Christoph Hellwig | c2b006c | 2011-08-23 08:28:07 +0000 | [diff] [blame] | 281 | error = xfs_bwrite(bp); |
Christoph Hellwig | 901796a | 2011-10-10 16:52:49 +0000 | [diff] [blame] | 282 | if (error) |
| 283 | xfs_buf_ioerror_alert(bp, __func__); |
Christoph Hellwig | c2b006c | 2011-08-23 08:28:07 +0000 | [diff] [blame] | 284 | xfs_buf_relse(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | return error; |
| 286 | } |
| 287 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | #ifdef DEBUG |
| 289 | /* |
| 290 | * dump debug superblock and log record information |
| 291 | */ |
| 292 | STATIC void |
| 293 | xlog_header_check_dump( |
| 294 | xfs_mount_t *mp, |
| 295 | xlog_rec_header_t *head) |
| 296 | { |
Eric Sandeen | 08e96e1 | 2013-10-11 20:59:05 -0500 | [diff] [blame] | 297 | xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d", |
Joe Perches | 03daa57 | 2009-12-14 18:01:10 -0800 | [diff] [blame] | 298 | __func__, &mp->m_sb.sb_uuid, XLOG_FMT); |
Eric Sandeen | 08e96e1 | 2013-10-11 20:59:05 -0500 | [diff] [blame] | 299 | xfs_debug(mp, " log : uuid = %pU, fmt = %d", |
Joe Perches | 03daa57 | 2009-12-14 18:01:10 -0800 | [diff] [blame] | 300 | &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | } |
| 302 | #else |
| 303 | #define xlog_header_check_dump(mp, head) |
| 304 | #endif |
| 305 | |
| 306 | /* |
| 307 | * check log record header for recovery |
| 308 | */ |
| 309 | STATIC int |
| 310 | xlog_header_check_recover( |
| 311 | xfs_mount_t *mp, |
| 312 | xlog_rec_header_t *head) |
| 313 | { |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 314 | ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | |
| 316 | /* |
| 317 | * IRIX doesn't write the h_fmt field and leaves it zeroed |
| 318 | * (XLOG_FMT_UNKNOWN). This stops us from trying to recover |
| 319 | * a dirty log created in IRIX. |
| 320 | */ |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 321 | if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 322 | xfs_warn(mp, |
| 323 | "dirty log written in incompatible format - can't recover"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | xlog_header_check_dump(mp, head); |
| 325 | XFS_ERROR_REPORT("xlog_header_check_recover(1)", |
| 326 | XFS_ERRLEVEL_HIGH, mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 327 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 329 | xfs_warn(mp, |
| 330 | "dirty log entry has mismatched uuid - can't recover"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | xlog_header_check_dump(mp, head); |
| 332 | XFS_ERROR_REPORT("xlog_header_check_recover(2)", |
| 333 | XFS_ERRLEVEL_HIGH, mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 334 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | } |
| 336 | return 0; |
| 337 | } |
| 338 | |
| 339 | /* |
| 340 | * read the head block of the log and check the header |
| 341 | */ |
| 342 | STATIC int |
| 343 | xlog_header_check_mount( |
| 344 | xfs_mount_t *mp, |
| 345 | xlog_rec_header_t *head) |
| 346 | { |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 347 | ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | |
| 349 | if (uuid_is_nil(&head->h_fs_uuid)) { |
| 350 | /* |
| 351 | * IRIX doesn't write the h_fs_uuid or h_fmt fields. If |
| 352 | * h_fs_uuid is nil, we assume this log was last mounted |
| 353 | * by IRIX and continue. |
| 354 | */ |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 355 | xfs_warn(mp, "nil uuid in log - IRIX style log"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 357 | xfs_warn(mp, "log has mismatched uuid - can't recover"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | xlog_header_check_dump(mp, head); |
| 359 | XFS_ERROR_REPORT("xlog_header_check_mount", |
| 360 | XFS_ERRLEVEL_HIGH, mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 361 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | } |
| 363 | return 0; |
| 364 | } |
| 365 | |
| 366 | STATIC void |
| 367 | xlog_recover_iodone( |
| 368 | struct xfs_buf *bp) |
| 369 | { |
Chandra Seetharaman | 5a52c2a58 | 2011-07-22 23:39:51 +0000 | [diff] [blame] | 370 | if (bp->b_error) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | /* |
| 372 | * We're not going to bother about retrying |
| 373 | * this during recovery. One strike! |
| 374 | */ |
Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 375 | if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { |
| 376 | xfs_buf_ioerror_alert(bp, __func__); |
| 377 | xfs_force_shutdown(bp->b_target->bt_mount, |
| 378 | SHUTDOWN_META_IO_ERROR); |
| 379 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | } |
Christoph Hellwig | cb669ca | 2011-07-13 13:43:49 +0200 | [diff] [blame] | 381 | bp->b_iodone = NULL; |
Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 382 | xfs_buf_ioend(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | /* |
| 386 | * This routine finds (to an approximation) the first block in the physical |
| 387 | * log which contains the given cycle. It uses a binary search algorithm. |
| 388 | * Note that the algorithm can not be perfect because the disk will not |
| 389 | * necessarily be perfect. |
| 390 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 391 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | xlog_find_cycle_start( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 393 | struct xlog *log, |
| 394 | struct xfs_buf *bp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | xfs_daddr_t first_blk, |
| 396 | xfs_daddr_t *last_blk, |
| 397 | uint cycle) |
| 398 | { |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 399 | char *offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | xfs_daddr_t mid_blk; |
Alex Elder | e3bb2e3 | 2010-04-15 18:17:30 +0000 | [diff] [blame] | 401 | xfs_daddr_t end_blk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | uint mid_cycle; |
| 403 | int error; |
| 404 | |
Alex Elder | e3bb2e3 | 2010-04-15 18:17:30 +0000 | [diff] [blame] | 405 | end_blk = *last_blk; |
| 406 | mid_blk = BLK_AVG(first_blk, end_blk); |
| 407 | while (mid_blk != first_blk && mid_blk != end_blk) { |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 408 | error = xlog_bread(log, mid_blk, 1, bp, &offset); |
| 409 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | return error; |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 411 | mid_cycle = xlog_get_cycle(offset); |
Alex Elder | e3bb2e3 | 2010-04-15 18:17:30 +0000 | [diff] [blame] | 412 | if (mid_cycle == cycle) |
| 413 | end_blk = mid_blk; /* last_half_cycle == mid_cycle */ |
| 414 | else |
| 415 | first_blk = mid_blk; /* first_half_cycle == mid_cycle */ |
| 416 | mid_blk = BLK_AVG(first_blk, end_blk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | } |
Alex Elder | e3bb2e3 | 2010-04-15 18:17:30 +0000 | [diff] [blame] | 418 | ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) || |
| 419 | (mid_blk == end_blk && mid_blk-1 == first_blk)); |
| 420 | |
| 421 | *last_blk = end_blk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | |
| 423 | return 0; |
| 424 | } |
| 425 | |
| 426 | /* |
Alex Elder | 3f943d8 | 2010-04-15 18:17:34 +0000 | [diff] [blame] | 427 | * Check that a range of blocks does not contain stop_on_cycle_no. |
| 428 | * Fill in *new_blk with the block offset where such a block is |
| 429 | * found, or with -1 (an invalid block number) if there is no such |
| 430 | * block in the range. The scan needs to occur from front to back |
| 431 | * and the pointer into the region must be updated since a later |
| 432 | * routine will need to perform another test. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | */ |
| 434 | STATIC int |
| 435 | xlog_find_verify_cycle( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 436 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | xfs_daddr_t start_blk, |
| 438 | int nbblks, |
| 439 | uint stop_on_cycle_no, |
| 440 | xfs_daddr_t *new_blk) |
| 441 | { |
| 442 | xfs_daddr_t i, j; |
| 443 | uint cycle; |
| 444 | xfs_buf_t *bp; |
| 445 | xfs_daddr_t bufblks; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 446 | char *buf = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | int error = 0; |
| 448 | |
Alex Elder | 6881a22 | 2010-04-13 15:22:29 +1000 | [diff] [blame] | 449 | /* |
| 450 | * Greedily allocate a buffer big enough to handle the full |
| 451 | * range of basic blocks we'll be examining. If that fails, |
| 452 | * try a smaller size. We need to be able to read at least |
| 453 | * a log sector, or we're out of luck. |
| 454 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | bufblks = 1 << ffs(nbblks); |
Dave Chinner | 81158e0 | 2012-04-27 19:45:22 +1000 | [diff] [blame] | 456 | while (bufblks > log->l_logBBsize) |
| 457 | bufblks >>= 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | while (!(bp = xlog_get_bp(log, bufblks))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | bufblks >>= 1; |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 460 | if (bufblks < log->l_sectBBsize) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 461 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | } |
| 463 | |
| 464 | for (i = start_blk; i < start_blk + nbblks; i += bufblks) { |
| 465 | int bcount; |
| 466 | |
| 467 | bcount = min(bufblks, (start_blk + nbblks - i)); |
| 468 | |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 469 | error = xlog_bread(log, i, bcount, bp, &buf); |
| 470 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | goto out; |
| 472 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | for (j = 0; j < bcount; j++) { |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 474 | cycle = xlog_get_cycle(buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | if (cycle == stop_on_cycle_no) { |
| 476 | *new_blk = i+j; |
| 477 | goto out; |
| 478 | } |
| 479 | |
| 480 | buf += BBSIZE; |
| 481 | } |
| 482 | } |
| 483 | |
| 484 | *new_blk = -1; |
| 485 | |
| 486 | out: |
| 487 | xlog_put_bp(bp); |
| 488 | return error; |
| 489 | } |
| 490 | |
| 491 | /* |
| 492 | * Potentially backup over partial log record write. |
| 493 | * |
| 494 | * In the typical case, last_blk is the number of the block directly after |
| 495 | * a good log record. Therefore, we subtract one to get the block number |
| 496 | * of the last block in the given buffer. extra_bblks contains the number |
| 497 | * of blocks we would have read on a previous read. This happens when the |
| 498 | * last log record is split over the end of the physical log. |
| 499 | * |
| 500 | * extra_bblks is the number of blocks potentially verified on a previous |
| 501 | * call to this routine. |
| 502 | */ |
| 503 | STATIC int |
| 504 | xlog_find_verify_log_record( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 505 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | xfs_daddr_t start_blk, |
| 507 | xfs_daddr_t *last_blk, |
| 508 | int extra_bblks) |
| 509 | { |
| 510 | xfs_daddr_t i; |
| 511 | xfs_buf_t *bp; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 512 | char *offset = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | xlog_rec_header_t *head = NULL; |
| 514 | int error = 0; |
| 515 | int smallmem = 0; |
| 516 | int num_blks = *last_blk - start_blk; |
| 517 | int xhdrs; |
| 518 | |
| 519 | ASSERT(start_blk != 0 || *last_blk != start_blk); |
| 520 | |
| 521 | if (!(bp = xlog_get_bp(log, num_blks))) { |
| 522 | if (!(bp = xlog_get_bp(log, 1))) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 523 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | smallmem = 1; |
| 525 | } else { |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 526 | error = xlog_bread(log, start_blk, num_blks, bp, &offset); |
| 527 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | offset += ((num_blks - 1) << BBSHIFT); |
| 530 | } |
| 531 | |
| 532 | for (i = (*last_blk) - 1; i >= 0; i--) { |
| 533 | if (i < start_blk) { |
| 534 | /* valid log record not found */ |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 535 | xfs_warn(log->l_mp, |
| 536 | "Log inconsistent (didn't find previous header)"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | ASSERT(0); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 538 | error = -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | goto out; |
| 540 | } |
| 541 | |
| 542 | if (smallmem) { |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 543 | error = xlog_bread(log, i, 1, bp, &offset); |
| 544 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | } |
| 547 | |
| 548 | head = (xlog_rec_header_t *)offset; |
| 549 | |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 550 | if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | break; |
| 552 | |
| 553 | if (!smallmem) |
| 554 | offset -= BBSIZE; |
| 555 | } |
| 556 | |
| 557 | /* |
| 558 | * We hit the beginning of the physical log & still no header. Return |
| 559 | * to caller. If caller can handle a return of -1, then this routine |
| 560 | * will be called again for the end of the physical log. |
| 561 | */ |
| 562 | if (i == -1) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 563 | error = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | goto out; |
| 565 | } |
| 566 | |
| 567 | /* |
| 568 | * We have the final block of the good log (the first block |
| 569 | * of the log record _before_ the head. So we check the uuid. |
| 570 | */ |
| 571 | if ((error = xlog_header_check_mount(log->l_mp, head))) |
| 572 | goto out; |
| 573 | |
| 574 | /* |
| 575 | * We may have found a log record header before we expected one. |
| 576 | * last_blk will be the 1st block # with a given cycle #. We may end |
| 577 | * up reading an entire log record. In this case, we don't want to |
| 578 | * reset last_blk. Only when last_blk points in the middle of a log |
| 579 | * record do we update last_blk. |
| 580 | */ |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 581 | if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 582 | uint h_size = be32_to_cpu(head->h_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | |
| 584 | xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; |
| 585 | if (h_size % XLOG_HEADER_CYCLE_SIZE) |
| 586 | xhdrs++; |
| 587 | } else { |
| 588 | xhdrs = 1; |
| 589 | } |
| 590 | |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 591 | if (*last_blk - i + extra_bblks != |
| 592 | BTOBB(be32_to_cpu(head->h_len)) + xhdrs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | *last_blk = i; |
| 594 | |
| 595 | out: |
| 596 | xlog_put_bp(bp); |
| 597 | return error; |
| 598 | } |
| 599 | |
| 600 | /* |
| 601 | * Head is defined to be the point of the log where the next log write |
Zhi Yong Wu | 0a94da2 | 2013-08-07 10:11:08 +0000 | [diff] [blame] | 602 | * could go. This means that incomplete LR writes at the end are |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | * eliminated when calculating the head. We aren't guaranteed that previous |
| 604 | * LR have complete transactions. We only know that a cycle number of |
| 605 | * current cycle number -1 won't be present in the log if we start writing |
| 606 | * from our current block number. |
| 607 | * |
| 608 | * last_blk contains the block number of the first block with a given |
| 609 | * cycle number. |
| 610 | * |
| 611 | * Return: zero if normal, non-zero if error. |
| 612 | */ |
Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 613 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | xlog_find_head( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 615 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | xfs_daddr_t *return_head_blk) |
| 617 | { |
| 618 | xfs_buf_t *bp; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 619 | char *offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; |
| 621 | int num_scan_bblks; |
| 622 | uint first_half_cycle, last_half_cycle; |
| 623 | uint stop_on_cycle; |
| 624 | int error, log_bbnum = log->l_logBBsize; |
| 625 | |
| 626 | /* Is the end of the log device zeroed? */ |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 627 | error = xlog_find_zeroed(log, &first_blk); |
| 628 | if (error < 0) { |
| 629 | xfs_warn(log->l_mp, "empty log check failed"); |
| 630 | return error; |
| 631 | } |
| 632 | if (error == 1) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | *return_head_blk = first_blk; |
| 634 | |
| 635 | /* Is the whole lot zeroed? */ |
| 636 | if (!first_blk) { |
| 637 | /* Linux XFS shouldn't generate totally zeroed logs - |
| 638 | * mkfs etc write a dummy unmount record to a fresh |
| 639 | * log so we can store the uuid in there |
| 640 | */ |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 641 | xfs_warn(log->l_mp, "totally zeroed log"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | } |
| 643 | |
| 644 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | } |
| 646 | |
| 647 | first_blk = 0; /* get cycle # of 1st block */ |
| 648 | bp = xlog_get_bp(log, 1); |
| 649 | if (!bp) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 650 | return -ENOMEM; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 651 | |
| 652 | error = xlog_bread(log, 0, 1, bp, &offset); |
| 653 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | goto bp_err; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 655 | |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 656 | first_half_cycle = xlog_get_cycle(offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | |
| 658 | last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 659 | error = xlog_bread(log, last_blk, 1, bp, &offset); |
| 660 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | goto bp_err; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 662 | |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 663 | last_half_cycle = xlog_get_cycle(offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | ASSERT(last_half_cycle != 0); |
| 665 | |
| 666 | /* |
| 667 | * If the 1st half cycle number is equal to the last half cycle number, |
| 668 | * then the entire log is stamped with the same cycle number. In this |
| 669 | * case, head_blk can't be set to zero (which makes sense). The below |
| 670 | * math doesn't work out properly with head_blk equal to zero. Instead, |
| 671 | * we set it to log_bbnum which is an invalid block number, but this |
| 672 | * value makes the math correct. If head_blk doesn't changed through |
| 673 | * all the tests below, *head_blk is set to zero at the very end rather |
| 674 | * than log_bbnum. In a sense, log_bbnum and zero are the same block |
| 675 | * in a circular file. |
| 676 | */ |
| 677 | if (first_half_cycle == last_half_cycle) { |
| 678 | /* |
| 679 | * In this case we believe that the entire log should have |
| 680 | * cycle number last_half_cycle. We need to scan backwards |
| 681 | * from the end verifying that there are no holes still |
| 682 | * containing last_half_cycle - 1. If we find such a hole, |
| 683 | * then the start of that hole will be the new head. The |
| 684 | * simple case looks like |
| 685 | * x | x ... | x - 1 | x |
| 686 | * Another case that fits this picture would be |
| 687 | * x | x + 1 | x ... | x |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 688 | * In this case the head really is somewhere at the end of the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | * log, as one of the latest writes at the beginning was |
| 690 | * incomplete. |
| 691 | * One more case is |
| 692 | * x | x + 1 | x ... | x - 1 | x |
| 693 | * This is really the combination of the above two cases, and |
| 694 | * the head has to end up at the start of the x-1 hole at the |
| 695 | * end of the log. |
| 696 | * |
| 697 | * In the 256k log case, we will read from the beginning to the |
| 698 | * end of the log and search for cycle numbers equal to x-1. |
| 699 | * We don't worry about the x+1 blocks that we encounter, |
| 700 | * because we know that they cannot be the head since the log |
| 701 | * started with x. |
| 702 | */ |
| 703 | head_blk = log_bbnum; |
| 704 | stop_on_cycle = last_half_cycle - 1; |
| 705 | } else { |
| 706 | /* |
| 707 | * In this case we want to find the first block with cycle |
| 708 | * number matching last_half_cycle. We expect the log to be |
| 709 | * some variation on |
Alex Elder | 3f943d8 | 2010-04-15 18:17:34 +0000 | [diff] [blame] | 710 | * x + 1 ... | x ... | x |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | * The first block with cycle number x (last_half_cycle) will |
| 712 | * be where the new head belongs. First we do a binary search |
| 713 | * for the first occurrence of last_half_cycle. The binary |
| 714 | * search may not be totally accurate, so then we scan back |
| 715 | * from there looking for occurrences of last_half_cycle before |
| 716 | * us. If that backwards scan wraps around the beginning of |
| 717 | * the log, then we look for occurrences of last_half_cycle - 1 |
| 718 | * at the end of the log. The cases we're looking for look |
| 719 | * like |
Alex Elder | 3f943d8 | 2010-04-15 18:17:34 +0000 | [diff] [blame] | 720 | * v binary search stopped here |
| 721 | * x + 1 ... | x | x + 1 | x ... | x |
| 722 | * ^ but we want to locate this spot |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | * or |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | * <---------> less than scan distance |
Alex Elder | 3f943d8 | 2010-04-15 18:17:34 +0000 | [diff] [blame] | 725 | * x + 1 ... | x ... | x - 1 | x |
| 726 | * ^ we want to locate this spot |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | */ |
| 728 | stop_on_cycle = last_half_cycle; |
| 729 | if ((error = xlog_find_cycle_start(log, bp, first_blk, |
| 730 | &head_blk, last_half_cycle))) |
| 731 | goto bp_err; |
| 732 | } |
| 733 | |
| 734 | /* |
| 735 | * Now validate the answer. Scan back some number of maximum possible |
| 736 | * blocks and make sure each one has the expected cycle number. The |
| 737 | * maximum is determined by the total possible amount of buffering |
| 738 | * in the in-core log. The following number can be made tighter if |
| 739 | * we actually look at the block size of the filesystem. |
| 740 | */ |
| 741 | num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); |
| 742 | if (head_blk >= num_scan_bblks) { |
| 743 | /* |
| 744 | * We are guaranteed that the entire check can be performed |
| 745 | * in one buffer. |
| 746 | */ |
| 747 | start_blk = head_blk - num_scan_bblks; |
| 748 | if ((error = xlog_find_verify_cycle(log, |
| 749 | start_blk, num_scan_bblks, |
| 750 | stop_on_cycle, &new_blk))) |
| 751 | goto bp_err; |
| 752 | if (new_blk != -1) |
| 753 | head_blk = new_blk; |
| 754 | } else { /* need to read 2 parts of log */ |
| 755 | /* |
| 756 | * We are going to scan backwards in the log in two parts. |
| 757 | * First we scan the physical end of the log. In this part |
| 758 | * of the log, we are looking for blocks with cycle number |
| 759 | * last_half_cycle - 1. |
| 760 | * If we find one, then we know that the log starts there, as |
| 761 | * we've found a hole that didn't get written in going around |
| 762 | * the end of the physical log. The simple case for this is |
| 763 | * x + 1 ... | x ... | x - 1 | x |
| 764 | * <---------> less than scan distance |
| 765 | * If all of the blocks at the end of the log have cycle number |
| 766 | * last_half_cycle, then we check the blocks at the start of |
| 767 | * the log looking for occurrences of last_half_cycle. If we |
| 768 | * find one, then our current estimate for the location of the |
| 769 | * first occurrence of last_half_cycle is wrong and we move |
| 770 | * back to the hole we've found. This case looks like |
| 771 | * x + 1 ... | x | x + 1 | x ... |
| 772 | * ^ binary search stopped here |
| 773 | * Another case we need to handle that only occurs in 256k |
| 774 | * logs is |
| 775 | * x + 1 ... | x ... | x+1 | x ... |
| 776 | * ^ binary search stops here |
| 777 | * In a 256k log, the scan at the end of the log will see the |
| 778 | * x + 1 blocks. We need to skip past those since that is |
| 779 | * certainly not the head of the log. By searching for |
| 780 | * last_half_cycle-1 we accomplish that. |
| 781 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | ASSERT(head_blk <= INT_MAX && |
Alex Elder | 3f943d8 | 2010-04-15 18:17:34 +0000 | [diff] [blame] | 783 | (xfs_daddr_t) num_scan_bblks >= head_blk); |
| 784 | start_blk = log_bbnum - (num_scan_bblks - head_blk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | if ((error = xlog_find_verify_cycle(log, start_blk, |
| 786 | num_scan_bblks - (int)head_blk, |
| 787 | (stop_on_cycle - 1), &new_blk))) |
| 788 | goto bp_err; |
| 789 | if (new_blk != -1) { |
| 790 | head_blk = new_blk; |
Alex Elder | 9db127e | 2010-04-15 18:17:26 +0000 | [diff] [blame] | 791 | goto validate_head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | } |
| 793 | |
| 794 | /* |
| 795 | * Scan beginning of log now. The last part of the physical |
| 796 | * log is good. This scan needs to verify that it doesn't find |
| 797 | * the last_half_cycle. |
| 798 | */ |
| 799 | start_blk = 0; |
| 800 | ASSERT(head_blk <= INT_MAX); |
| 801 | if ((error = xlog_find_verify_cycle(log, |
| 802 | start_blk, (int)head_blk, |
| 803 | stop_on_cycle, &new_blk))) |
| 804 | goto bp_err; |
| 805 | if (new_blk != -1) |
| 806 | head_blk = new_blk; |
| 807 | } |
| 808 | |
Alex Elder | 9db127e | 2010-04-15 18:17:26 +0000 | [diff] [blame] | 809 | validate_head: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 810 | /* |
| 811 | * Now we need to make sure head_blk is not pointing to a block in |
| 812 | * the middle of a log record. |
| 813 | */ |
| 814 | num_scan_bblks = XLOG_REC_SHIFT(log); |
| 815 | if (head_blk >= num_scan_bblks) { |
| 816 | start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ |
| 817 | |
| 818 | /* start ptr at last block ptr before head_blk */ |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 819 | error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); |
| 820 | if (error == 1) |
| 821 | error = -EIO; |
| 822 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 | goto bp_err; |
| 824 | } else { |
| 825 | start_blk = 0; |
| 826 | ASSERT(head_blk <= INT_MAX); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 827 | error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); |
| 828 | if (error < 0) |
| 829 | goto bp_err; |
| 830 | if (error == 1) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | /* We hit the beginning of the log during our search */ |
Alex Elder | 3f943d8 | 2010-04-15 18:17:34 +0000 | [diff] [blame] | 832 | start_blk = log_bbnum - (num_scan_bblks - head_blk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | new_blk = log_bbnum; |
| 834 | ASSERT(start_blk <= INT_MAX && |
| 835 | (xfs_daddr_t) log_bbnum-start_blk >= 0); |
| 836 | ASSERT(head_blk <= INT_MAX); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 837 | error = xlog_find_verify_log_record(log, start_blk, |
| 838 | &new_blk, (int)head_blk); |
| 839 | if (error == 1) |
| 840 | error = -EIO; |
| 841 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | goto bp_err; |
| 843 | if (new_blk != log_bbnum) |
| 844 | head_blk = new_blk; |
| 845 | } else if (error) |
| 846 | goto bp_err; |
| 847 | } |
| 848 | |
| 849 | xlog_put_bp(bp); |
| 850 | if (head_blk == log_bbnum) |
| 851 | *return_head_blk = 0; |
| 852 | else |
| 853 | *return_head_blk = head_blk; |
| 854 | /* |
| 855 | * When returning here, we have a good block number. Bad block |
| 856 | * means that during a previous crash, we didn't have a clean break |
| 857 | * from cycle number N to cycle number N-1. In this case, we need |
| 858 | * to find the first block with cycle number N-1. |
| 859 | */ |
| 860 | return 0; |
| 861 | |
| 862 | bp_err: |
| 863 | xlog_put_bp(bp); |
| 864 | |
| 865 | if (error) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 866 | xfs_warn(log->l_mp, "failed to find log head"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | return error; |
| 868 | } |
| 869 | |
| 870 | /* |
| 871 | * Find the sync block number or the tail of the log. |
| 872 | * |
| 873 | * This will be the block number of the last record to have its |
| 874 | * associated buffers synced to disk. Every log record header has |
| 875 | * a sync lsn embedded in it. LSNs hold block numbers, so it is easy |
| 876 | * to get a sync block number. The only concern is to figure out which |
| 877 | * log record header to believe. |
| 878 | * |
| 879 | * The following algorithm uses the log record header with the largest |
| 880 | * lsn. The entire log record does not need to be valid. We only care |
| 881 | * that the header is valid. |
| 882 | * |
| 883 | * We could speed up search by using current head_blk buffer, but it is not |
| 884 | * available. |
| 885 | */ |
Eric Sandeen | 5d77c0d | 2009-11-19 15:52:00 +0000 | [diff] [blame] | 886 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | xlog_find_tail( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 888 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | xfs_daddr_t *head_blk, |
Eric Sandeen | 65be605 | 2006-01-11 15:34:19 +1100 | [diff] [blame] | 890 | xfs_daddr_t *tail_blk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | { |
| 892 | xlog_rec_header_t *rhead; |
| 893 | xlog_op_header_t *op_head; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 894 | char *offset = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | xfs_buf_t *bp; |
| 896 | int error, i, found; |
| 897 | xfs_daddr_t umount_data_blk; |
| 898 | xfs_daddr_t after_umount_blk; |
| 899 | xfs_lsn_t tail_lsn; |
| 900 | int hblks; |
| 901 | |
| 902 | found = 0; |
| 903 | |
| 904 | /* |
| 905 | * Find previous log record |
| 906 | */ |
| 907 | if ((error = xlog_find_head(log, head_blk))) |
| 908 | return error; |
| 909 | |
| 910 | bp = xlog_get_bp(log, 1); |
| 911 | if (!bp) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 912 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | if (*head_blk == 0) { /* special case */ |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 914 | error = xlog_bread(log, 0, 1, bp, &offset); |
| 915 | if (error) |
Alex Elder | 9db127e | 2010-04-15 18:17:26 +0000 | [diff] [blame] | 916 | goto done; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 917 | |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 918 | if (xlog_get_cycle(offset) == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | *tail_blk = 0; |
| 920 | /* leave all other log inited values alone */ |
Alex Elder | 9db127e | 2010-04-15 18:17:26 +0000 | [diff] [blame] | 921 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 | } |
| 923 | } |
| 924 | |
| 925 | /* |
| 926 | * Search backwards looking for log record header block |
| 927 | */ |
| 928 | ASSERT(*head_blk < INT_MAX); |
| 929 | for (i = (int)(*head_blk) - 1; i >= 0; i--) { |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 930 | error = xlog_bread(log, i, 1, bp, &offset); |
| 931 | if (error) |
Alex Elder | 9db127e | 2010-04-15 18:17:26 +0000 | [diff] [blame] | 932 | goto done; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 933 | |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 934 | if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | found = 1; |
| 936 | break; |
| 937 | } |
| 938 | } |
| 939 | /* |
| 940 | * If we haven't found the log record header block, start looking |
| 941 | * again from the end of the physical log. XXXmiken: There should be |
| 942 | * a check here to make sure we didn't search more than N blocks in |
| 943 | * the previous code. |
| 944 | */ |
| 945 | if (!found) { |
| 946 | for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) { |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 947 | error = xlog_bread(log, i, 1, bp, &offset); |
| 948 | if (error) |
Alex Elder | 9db127e | 2010-04-15 18:17:26 +0000 | [diff] [blame] | 949 | goto done; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 950 | |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 951 | if (*(__be32 *)offset == |
| 952 | cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 953 | found = 2; |
| 954 | break; |
| 955 | } |
| 956 | } |
| 957 | } |
| 958 | if (!found) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 959 | xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); |
Eric Sandeen | 050a195 | 2013-07-31 20:33:47 -0500 | [diff] [blame] | 960 | xlog_put_bp(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | ASSERT(0); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 962 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | } |
| 964 | |
| 965 | /* find blk_no of tail of log */ |
| 966 | rhead = (xlog_rec_header_t *)offset; |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 967 | *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 968 | |
| 969 | /* |
| 970 | * Reset log values according to the state of the log when we |
| 971 | * crashed. In the case where head_blk == 0, we bump curr_cycle |
| 972 | * one because the next write starts a new cycle rather than |
| 973 | * continuing the cycle of the last good log record. At this |
| 974 | * point we have guaranteed that all partial log records have been |
| 975 | * accounted for. Therefore, we know that the last good log record |
| 976 | * written was complete and ended exactly on the end boundary |
| 977 | * of the physical log. |
| 978 | */ |
| 979 | log->l_prev_block = i; |
| 980 | log->l_curr_block = (int)*head_blk; |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 981 | log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | if (found == 2) |
| 983 | log->l_curr_cycle++; |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 984 | atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 985 | atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 986 | xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle, |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 987 | BBTOB(log->l_curr_block)); |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 988 | xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle, |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 989 | BBTOB(log->l_curr_block)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 990 | |
| 991 | /* |
| 992 | * Look for unmount record. If we find it, then we know there |
| 993 | * was a clean unmount. Since 'i' could be the last block in |
| 994 | * the physical log, we convert to a log block before comparing |
| 995 | * to the head_blk. |
| 996 | * |
| 997 | * Save the current tail lsn to use to pass to |
| 998 | * xlog_clear_stale_blocks() below. We won't want to clear the |
| 999 | * unmount record if there is one, so we pass the lsn of the |
| 1000 | * unmount record rather than the block after it. |
| 1001 | */ |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 1002 | if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1003 | int h_size = be32_to_cpu(rhead->h_size); |
| 1004 | int h_version = be32_to_cpu(rhead->h_version); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | |
| 1006 | if ((h_version & XLOG_VERSION_2) && |
| 1007 | (h_size > XLOG_HEADER_CYCLE_SIZE)) { |
| 1008 | hblks = h_size / XLOG_HEADER_CYCLE_SIZE; |
| 1009 | if (h_size % XLOG_HEADER_CYCLE_SIZE) |
| 1010 | hblks++; |
| 1011 | } else { |
| 1012 | hblks = 1; |
| 1013 | } |
| 1014 | } else { |
| 1015 | hblks = 1; |
| 1016 | } |
| 1017 | after_umount_blk = (i + hblks + (int) |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1018 | BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1019 | tail_lsn = atomic64_read(&log->l_tail_lsn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | if (*head_blk == after_umount_blk && |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1021 | be32_to_cpu(rhead->h_num_logops) == 1) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1022 | umount_data_blk = (i + hblks) % log->l_logBBsize; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 1023 | error = xlog_bread(log, umount_data_blk, 1, bp, &offset); |
| 1024 | if (error) |
Alex Elder | 9db127e | 2010-04-15 18:17:26 +0000 | [diff] [blame] | 1025 | goto done; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 1026 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 | op_head = (xlog_op_header_t *)offset; |
| 1028 | if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { |
| 1029 | /* |
| 1030 | * Set tail and last sync so that newly written |
| 1031 | * log records will point recovery to after the |
| 1032 | * current unmount record. |
| 1033 | */ |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1034 | xlog_assign_atomic_lsn(&log->l_tail_lsn, |
| 1035 | log->l_curr_cycle, after_umount_blk); |
| 1036 | xlog_assign_atomic_lsn(&log->l_last_sync_lsn, |
| 1037 | log->l_curr_cycle, after_umount_blk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1038 | *tail_blk = after_umount_blk; |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1039 | |
| 1040 | /* |
| 1041 | * Note that the unmount was clean. If the unmount |
| 1042 | * was not clean, we need to know this to rebuild the |
| 1043 | * superblock counters from the perag headers if we |
| 1044 | * have a filesystem using non-persistent counters. |
| 1045 | */ |
| 1046 | log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 | } |
| 1048 | } |
| 1049 | |
| 1050 | /* |
| 1051 | * Make sure that there are no blocks in front of the head |
| 1052 | * with the same cycle number as the head. This can happen |
| 1053 | * because we allow multiple outstanding log writes concurrently, |
| 1054 | * and the later writes might make it out before earlier ones. |
| 1055 | * |
| 1056 | * We use the lsn from before modifying it so that we'll never |
| 1057 | * overwrite the unmount record after a clean unmount. |
| 1058 | * |
| 1059 | * Do this only if we are going to recover the filesystem |
| 1060 | * |
| 1061 | * NOTE: This used to say "if (!readonly)" |
| 1062 | * However on Linux, we can & do recover a read-only filesystem. |
| 1063 | * We only skip recovery if NORECOVERY is specified on mount, |
| 1064 | * in which case we would not be here. |
| 1065 | * |
| 1066 | * But... if the -device- itself is readonly, just skip this. |
| 1067 | * We can't recover this device anyway, so it won't matter. |
| 1068 | */ |
Alex Elder | 9db127e | 2010-04-15 18:17:26 +0000 | [diff] [blame] | 1069 | if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 | error = xlog_clear_stale_blocks(log, tail_lsn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | |
Alex Elder | 9db127e | 2010-04-15 18:17:26 +0000 | [diff] [blame] | 1072 | done: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | xlog_put_bp(bp); |
| 1074 | |
| 1075 | if (error) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1076 | xfs_warn(log->l_mp, "failed to locate log tail"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 | return error; |
| 1078 | } |
| 1079 | |
| 1080 | /* |
| 1081 | * Is the log zeroed at all? |
| 1082 | * |
| 1083 | * The last binary search should be changed to perform an X block read |
| 1084 | * once X becomes small enough. You can then search linearly through |
| 1085 | * the X blocks. This will cut down on the number of reads we need to do. |
| 1086 | * |
| 1087 | * If the log is partially zeroed, this routine will pass back the blkno |
| 1088 | * of the first block with cycle number 0. It won't have a complete LR |
| 1089 | * preceding it. |
| 1090 | * |
| 1091 | * Return: |
| 1092 | * 0 => the log is completely written to |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1093 | * 1 => use *blk_no as the first block of the log |
| 1094 | * <0 => error has occurred |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 1096 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | xlog_find_zeroed( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1098 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | xfs_daddr_t *blk_no) |
| 1100 | { |
| 1101 | xfs_buf_t *bp; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 1102 | char *offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | uint first_cycle, last_cycle; |
| 1104 | xfs_daddr_t new_blk, last_blk, start_blk; |
| 1105 | xfs_daddr_t num_scan_bblks; |
| 1106 | int error, log_bbnum = log->l_logBBsize; |
| 1107 | |
Nathan Scott | 6fdf8cc | 2006-06-28 10:13:52 +1000 | [diff] [blame] | 1108 | *blk_no = 0; |
| 1109 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | /* check totally zeroed log */ |
| 1111 | bp = xlog_get_bp(log, 1); |
| 1112 | if (!bp) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1113 | return -ENOMEM; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 1114 | error = xlog_bread(log, 0, 1, bp, &offset); |
| 1115 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | goto bp_err; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 1117 | |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 1118 | first_cycle = xlog_get_cycle(offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1119 | if (first_cycle == 0) { /* completely zeroed log */ |
| 1120 | *blk_no = 0; |
| 1121 | xlog_put_bp(bp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1122 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1123 | } |
| 1124 | |
| 1125 | /* check partially zeroed log */ |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 1126 | error = xlog_bread(log, log_bbnum-1, 1, bp, &offset); |
| 1127 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 | goto bp_err; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 1129 | |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 1130 | last_cycle = xlog_get_cycle(offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1131 | if (last_cycle != 0) { /* log completely written to */ |
| 1132 | xlog_put_bp(bp); |
| 1133 | return 0; |
| 1134 | } else if (first_cycle != 1) { |
| 1135 | /* |
| 1136 | * If the cycle of the last block is zero, the cycle of |
| 1137 | * the first block must be 1. If it's not, maybe we're |
| 1138 | * not looking at a log... Bail out. |
| 1139 | */ |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1140 | xfs_warn(log->l_mp, |
| 1141 | "Log inconsistent or not a log (last==0, first!=1)"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1142 | error = -EINVAL; |
Eric Sandeen | 5d0a654 | 2013-07-31 20:32:30 -0500 | [diff] [blame] | 1143 | goto bp_err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1144 | } |
| 1145 | |
| 1146 | /* we have a partially zeroed log */ |
| 1147 | last_blk = log_bbnum-1; |
| 1148 | if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0))) |
| 1149 | goto bp_err; |
| 1150 | |
| 1151 | /* |
| 1152 | * Validate the answer. Because there is no way to guarantee that |
| 1153 | * the entire log is made up of log records which are the same size, |
| 1154 | * we scan over the defined maximum blocks. At this point, the maximum |
| 1155 | * is not chosen to mean anything special. XXXmiken |
| 1156 | */ |
| 1157 | num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); |
| 1158 | ASSERT(num_scan_bblks <= INT_MAX); |
| 1159 | |
| 1160 | if (last_blk < num_scan_bblks) |
| 1161 | num_scan_bblks = last_blk; |
| 1162 | start_blk = last_blk - num_scan_bblks; |
| 1163 | |
| 1164 | /* |
| 1165 | * We search for any instances of cycle number 0 that occur before |
| 1166 | * our current estimate of the head. What we're trying to detect is |
| 1167 | * 1 ... | 0 | 1 | 0... |
| 1168 | * ^ binary search ends here |
| 1169 | */ |
| 1170 | if ((error = xlog_find_verify_cycle(log, start_blk, |
| 1171 | (int)num_scan_bblks, 0, &new_blk))) |
| 1172 | goto bp_err; |
| 1173 | if (new_blk != -1) |
| 1174 | last_blk = new_blk; |
| 1175 | |
| 1176 | /* |
| 1177 | * Potentially backup over partial log record write. We don't need |
| 1178 | * to search the end of the log because we know it is zero. |
| 1179 | */ |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1180 | error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0); |
| 1181 | if (error == 1) |
| 1182 | error = -EIO; |
| 1183 | if (error) |
| 1184 | goto bp_err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1185 | |
| 1186 | *blk_no = last_blk; |
| 1187 | bp_err: |
| 1188 | xlog_put_bp(bp); |
| 1189 | if (error) |
| 1190 | return error; |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1191 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | } |
| 1193 | |
| 1194 | /* |
| 1195 | * These are simple subroutines used by xlog_clear_stale_blocks() below |
| 1196 | * to initialize a buffer full of empty log record headers and write |
| 1197 | * them into the log. |
| 1198 | */ |
| 1199 | STATIC void |
| 1200 | xlog_add_record( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1201 | struct xlog *log, |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 1202 | char *buf, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1203 | int cycle, |
| 1204 | int block, |
| 1205 | int tail_cycle, |
| 1206 | int tail_block) |
| 1207 | { |
| 1208 | xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; |
| 1209 | |
| 1210 | memset(buf, 0, BBSIZE); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1211 | recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); |
| 1212 | recp->h_cycle = cpu_to_be32(cycle); |
| 1213 | recp->h_version = cpu_to_be32( |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 1214 | xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1215 | recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); |
| 1216 | recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); |
| 1217 | recp->h_fmt = cpu_to_be32(XLOG_FMT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); |
| 1219 | } |
| 1220 | |
| 1221 | STATIC int |
| 1222 | xlog_write_log_records( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1223 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1224 | int cycle, |
| 1225 | int start_block, |
| 1226 | int blocks, |
| 1227 | int tail_cycle, |
| 1228 | int tail_block) |
| 1229 | { |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 1230 | char *offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | xfs_buf_t *bp; |
| 1232 | int balign, ealign; |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1233 | int sectbb = log->l_sectBBsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1234 | int end_block = start_block + blocks; |
| 1235 | int bufblks; |
| 1236 | int error = 0; |
| 1237 | int i, j = 0; |
| 1238 | |
Alex Elder | 6881a22 | 2010-04-13 15:22:29 +1000 | [diff] [blame] | 1239 | /* |
| 1240 | * Greedily allocate a buffer big enough to handle the full |
| 1241 | * range of basic blocks to be written. If that fails, try |
| 1242 | * a smaller size. We need to be able to write at least a |
| 1243 | * log sector, or we're out of luck. |
| 1244 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1245 | bufblks = 1 << ffs(blocks); |
Dave Chinner | 81158e0 | 2012-04-27 19:45:22 +1000 | [diff] [blame] | 1246 | while (bufblks > log->l_logBBsize) |
| 1247 | bufblks >>= 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1248 | while (!(bp = xlog_get_bp(log, bufblks))) { |
| 1249 | bufblks >>= 1; |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1250 | if (bufblks < sectbb) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1251 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1252 | } |
| 1253 | |
| 1254 | /* We may need to do a read at the start to fill in part of |
| 1255 | * the buffer in the starting sector not covered by the first |
| 1256 | * write below. |
| 1257 | */ |
Alex Elder | 5c17f53 | 2010-04-13 15:22:48 +1000 | [diff] [blame] | 1258 | balign = round_down(start_block, sectbb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | if (balign != start_block) { |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 1260 | error = xlog_bread_noalign(log, start_block, 1, bp); |
| 1261 | if (error) |
| 1262 | goto out_put_bp; |
| 1263 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 | j = start_block - balign; |
| 1265 | } |
| 1266 | |
| 1267 | for (i = start_block; i < end_block; i += bufblks) { |
| 1268 | int bcount, endcount; |
| 1269 | |
| 1270 | bcount = min(bufblks, end_block - start_block); |
| 1271 | endcount = bcount - j; |
| 1272 | |
| 1273 | /* We may need to do a read at the end to fill in part of |
| 1274 | * the buffer in the final sector not covered by the write. |
| 1275 | * If this is the same sector as the above read, skip it. |
| 1276 | */ |
Alex Elder | 5c17f53 | 2010-04-13 15:22:48 +1000 | [diff] [blame] | 1277 | ealign = round_down(end_block, sectbb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 | if (j == 0 && (start_block + endcount > ealign)) { |
Chandra Seetharaman | 6292604 | 2011-07-22 23:40:15 +0000 | [diff] [blame] | 1279 | offset = bp->b_addr + BBTOB(ealign - start_block); |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 1280 | error = xlog_bread_offset(log, ealign, sectbb, |
| 1281 | bp, offset); |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 1282 | if (error) |
| 1283 | break; |
| 1284 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | } |
| 1286 | |
| 1287 | offset = xlog_align(log, start_block, endcount, bp); |
| 1288 | for (; j < endcount; j++) { |
| 1289 | xlog_add_record(log, offset, cycle, i+j, |
| 1290 | tail_cycle, tail_block); |
| 1291 | offset += BBSIZE; |
| 1292 | } |
| 1293 | error = xlog_bwrite(log, start_block, endcount, bp); |
| 1294 | if (error) |
| 1295 | break; |
| 1296 | start_block += endcount; |
| 1297 | j = 0; |
| 1298 | } |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 1299 | |
| 1300 | out_put_bp: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | xlog_put_bp(bp); |
| 1302 | return error; |
| 1303 | } |
| 1304 | |
| 1305 | /* |
| 1306 | * This routine is called to blow away any incomplete log writes out |
| 1307 | * in front of the log head. We do this so that we won't become confused |
| 1308 | * if we come up, write only a little bit more, and then crash again. |
| 1309 | * If we leave the partial log records out there, this situation could |
| 1310 | * cause us to think those partial writes are valid blocks since they |
| 1311 | * have the current cycle number. We get rid of them by overwriting them |
| 1312 | * with empty log records with the old cycle number rather than the |
| 1313 | * current one. |
| 1314 | * |
| 1315 | * The tail lsn is passed in rather than taken from |
| 1316 | * the log so that we will not write over the unmount record after a |
| 1317 | * clean unmount in a 512 block log. Doing so would leave the log without |
| 1318 | * any valid log records in it until a new one was written. If we crashed |
| 1319 | * during that time we would not be able to recover. |
| 1320 | */ |
| 1321 | STATIC int |
| 1322 | xlog_clear_stale_blocks( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1323 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1324 | xfs_lsn_t tail_lsn) |
| 1325 | { |
| 1326 | int tail_cycle, head_cycle; |
| 1327 | int tail_block, head_block; |
| 1328 | int tail_distance, max_distance; |
| 1329 | int distance; |
| 1330 | int error; |
| 1331 | |
| 1332 | tail_cycle = CYCLE_LSN(tail_lsn); |
| 1333 | tail_block = BLOCK_LSN(tail_lsn); |
| 1334 | head_cycle = log->l_curr_cycle; |
| 1335 | head_block = log->l_curr_block; |
| 1336 | |
| 1337 | /* |
| 1338 | * Figure out the distance between the new head of the log |
| 1339 | * and the tail. We want to write over any blocks beyond the |
| 1340 | * head that we may have written just before the crash, but |
| 1341 | * we don't want to overwrite the tail of the log. |
| 1342 | */ |
| 1343 | if (head_cycle == tail_cycle) { |
| 1344 | /* |
| 1345 | * The tail is behind the head in the physical log, |
| 1346 | * so the distance from the head to the tail is the |
| 1347 | * distance from the head to the end of the log plus |
| 1348 | * the distance from the beginning of the log to the |
| 1349 | * tail. |
| 1350 | */ |
| 1351 | if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) { |
| 1352 | XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)", |
| 1353 | XFS_ERRLEVEL_LOW, log->l_mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1354 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1355 | } |
| 1356 | tail_distance = tail_block + (log->l_logBBsize - head_block); |
| 1357 | } else { |
| 1358 | /* |
| 1359 | * The head is behind the tail in the physical log, |
| 1360 | * so the distance from the head to the tail is just |
| 1361 | * the tail block minus the head block. |
| 1362 | */ |
| 1363 | if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){ |
| 1364 | XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)", |
| 1365 | XFS_ERRLEVEL_LOW, log->l_mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1366 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1367 | } |
| 1368 | tail_distance = tail_block - head_block; |
| 1369 | } |
| 1370 | |
| 1371 | /* |
| 1372 | * If the head is right up against the tail, we can't clear |
| 1373 | * anything. |
| 1374 | */ |
| 1375 | if (tail_distance <= 0) { |
| 1376 | ASSERT(tail_distance == 0); |
| 1377 | return 0; |
| 1378 | } |
| 1379 | |
| 1380 | max_distance = XLOG_TOTAL_REC_SHIFT(log); |
| 1381 | /* |
| 1382 | * Take the smaller of the maximum amount of outstanding I/O |
| 1383 | * we could have and the distance to the tail to clear out. |
| 1384 | * We take the smaller so that we don't overwrite the tail and |
| 1385 | * we don't waste all day writing from the head to the tail |
| 1386 | * for no reason. |
| 1387 | */ |
| 1388 | max_distance = MIN(max_distance, tail_distance); |
| 1389 | |
| 1390 | if ((head_block + max_distance) <= log->l_logBBsize) { |
| 1391 | /* |
| 1392 | * We can stomp all the blocks we need to without |
| 1393 | * wrapping around the end of the log. Just do it |
| 1394 | * in a single write. Use the cycle number of the |
| 1395 | * current cycle minus one so that the log will look like: |
| 1396 | * n ... | n - 1 ... |
| 1397 | */ |
| 1398 | error = xlog_write_log_records(log, (head_cycle - 1), |
| 1399 | head_block, max_distance, tail_cycle, |
| 1400 | tail_block); |
| 1401 | if (error) |
| 1402 | return error; |
| 1403 | } else { |
| 1404 | /* |
| 1405 | * We need to wrap around the end of the physical log in |
| 1406 | * order to clear all the blocks. Do it in two separate |
| 1407 | * I/Os. The first write should be from the head to the |
| 1408 | * end of the physical log, and it should use the current |
| 1409 | * cycle number minus one just like above. |
| 1410 | */ |
| 1411 | distance = log->l_logBBsize - head_block; |
| 1412 | error = xlog_write_log_records(log, (head_cycle - 1), |
| 1413 | head_block, distance, tail_cycle, |
| 1414 | tail_block); |
| 1415 | |
| 1416 | if (error) |
| 1417 | return error; |
| 1418 | |
| 1419 | /* |
| 1420 | * Now write the blocks at the start of the physical log. |
| 1421 | * This writes the remainder of the blocks we want to clear. |
| 1422 | * It uses the current cycle number since we're now on the |
| 1423 | * same cycle as the head so that we get: |
| 1424 | * n ... n ... | n - 1 ... |
| 1425 | * ^^^^^ blocks we're writing |
| 1426 | */ |
| 1427 | distance = max_distance - (log->l_logBBsize - head_block); |
| 1428 | error = xlog_write_log_records(log, head_cycle, 0, distance, |
| 1429 | tail_cycle, tail_block); |
| 1430 | if (error) |
| 1431 | return error; |
| 1432 | } |
| 1433 | |
| 1434 | return 0; |
| 1435 | } |
| 1436 | |
| 1437 | /****************************************************************************** |
| 1438 | * |
| 1439 | * Log recover routines |
| 1440 | * |
| 1441 | ****************************************************************************** |
| 1442 | */ |
| 1443 | |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 1444 | /* |
Dave Chinner | a775ad7 | 2013-06-05 12:09:07 +1000 | [diff] [blame] | 1445 | * Sort the log items in the transaction. |
| 1446 | * |
| 1447 | * The ordering constraints are defined by the inode allocation and unlink |
| 1448 | * behaviour. The rules are: |
| 1449 | * |
| 1450 | * 1. Every item is only logged once in a given transaction. Hence it |
| 1451 | * represents the last logged state of the item. Hence ordering is |
| 1452 | * dependent on the order in which operations need to be performed so |
| 1453 | * required initial conditions are always met. |
| 1454 | * |
| 1455 | * 2. Cancelled buffers are recorded in pass 1 in a separate table and |
| 1456 | * there's nothing to replay from them so we can simply cull them |
| 1457 | * from the transaction. However, we can't do that until after we've |
| 1458 | * replayed all the other items because they may be dependent on the |
| 1459 | * cancelled buffer and replaying the cancelled buffer can remove it |
| 1460 | * form the cancelled buffer table. Hence they have tobe done last. |
| 1461 | * |
| 1462 | * 3. Inode allocation buffers must be replayed before inode items that |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 1463 | * read the buffer and replay changes into it. For filesystems using the |
| 1464 | * ICREATE transactions, this means XFS_LI_ICREATE objects need to get |
| 1465 | * treated the same as inode allocation buffers as they create and |
| 1466 | * initialise the buffers directly. |
Dave Chinner | a775ad7 | 2013-06-05 12:09:07 +1000 | [diff] [blame] | 1467 | * |
| 1468 | * 4. Inode unlink buffers must be replayed after inode items are replayed. |
| 1469 | * This ensures that inodes are completely flushed to the inode buffer |
| 1470 | * in a "free" state before we remove the unlinked inode list pointer. |
| 1471 | * |
| 1472 | * Hence the ordering needs to be inode allocation buffers first, inode items |
| 1473 | * second, inode unlink buffers third and cancelled buffers last. |
| 1474 | * |
| 1475 | * But there's a problem with that - we can't tell an inode allocation buffer |
| 1476 | * apart from a regular buffer, so we can't separate them. We can, however, |
| 1477 | * tell an inode unlink buffer from the others, and so we can separate them out |
| 1478 | * from all the other buffers and move them to last. |
| 1479 | * |
| 1480 | * Hence, 4 lists, in order from head to tail: |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 1481 | * - buffer_list for all buffers except cancelled/inode unlink buffers |
| 1482 | * - item_list for all non-buffer items |
| 1483 | * - inode_buffer_list for inode unlink buffers |
| 1484 | * - cancel_list for the cancelled buffers |
| 1485 | * |
| 1486 | * Note that we add objects to the tail of the lists so that first-to-last |
| 1487 | * ordering is preserved within the lists. Adding objects to the head of the |
| 1488 | * list means when we traverse from the head we walk them in last-to-first |
| 1489 | * order. For cancelled buffers and inode unlink buffers this doesn't matter, |
| 1490 | * but for all other items there may be specific ordering that we need to |
| 1491 | * preserve. |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 1492 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1493 | STATIC int |
| 1494 | xlog_recover_reorder_trans( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1495 | struct xlog *log, |
| 1496 | struct xlog_recover *trans, |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 1497 | int pass) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | { |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 1499 | xlog_recover_item_t *item, *n; |
Mark Tinguely | 2a84108 | 2013-10-02 07:51:12 -0500 | [diff] [blame] | 1500 | int error = 0; |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 1501 | LIST_HEAD(sort_list); |
Dave Chinner | a775ad7 | 2013-06-05 12:09:07 +1000 | [diff] [blame] | 1502 | LIST_HEAD(cancel_list); |
| 1503 | LIST_HEAD(buffer_list); |
| 1504 | LIST_HEAD(inode_buffer_list); |
| 1505 | LIST_HEAD(inode_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1506 | |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 1507 | list_splice_init(&trans->r_itemq, &sort_list); |
| 1508 | list_for_each_entry_safe(item, n, &sort_list, ri_list) { |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 1509 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 1510 | |
| 1511 | switch (ITEM_TYPE(item)) { |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 1512 | case XFS_LI_ICREATE: |
| 1513 | list_move_tail(&item->ri_list, &buffer_list); |
| 1514 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | case XFS_LI_BUF: |
Dave Chinner | a775ad7 | 2013-06-05 12:09:07 +1000 | [diff] [blame] | 1516 | if (buf_f->blf_flags & XFS_BLF_CANCEL) { |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 1517 | trace_xfs_log_recover_item_reorder_head(log, |
| 1518 | trans, item, pass); |
Dave Chinner | a775ad7 | 2013-06-05 12:09:07 +1000 | [diff] [blame] | 1519 | list_move(&item->ri_list, &cancel_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 | break; |
| 1521 | } |
Dave Chinner | a775ad7 | 2013-06-05 12:09:07 +1000 | [diff] [blame] | 1522 | if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { |
| 1523 | list_move(&item->ri_list, &inode_buffer_list); |
| 1524 | break; |
| 1525 | } |
| 1526 | list_move_tail(&item->ri_list, &buffer_list); |
| 1527 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1528 | case XFS_LI_INODE: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 | case XFS_LI_DQUOT: |
| 1530 | case XFS_LI_QUOTAOFF: |
| 1531 | case XFS_LI_EFD: |
| 1532 | case XFS_LI_EFI: |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 1533 | trace_xfs_log_recover_item_reorder_tail(log, |
| 1534 | trans, item, pass); |
Dave Chinner | a775ad7 | 2013-06-05 12:09:07 +1000 | [diff] [blame] | 1535 | list_move_tail(&item->ri_list, &inode_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1536 | break; |
| 1537 | default: |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1538 | xfs_warn(log->l_mp, |
| 1539 | "%s: unrecognized type of log operation", |
| 1540 | __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1541 | ASSERT(0); |
Mark Tinguely | 2a84108 | 2013-10-02 07:51:12 -0500 | [diff] [blame] | 1542 | /* |
| 1543 | * return the remaining items back to the transaction |
| 1544 | * item list so they can be freed in caller. |
| 1545 | */ |
| 1546 | if (!list_empty(&sort_list)) |
| 1547 | list_splice_init(&sort_list, &trans->r_itemq); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1548 | error = -EIO; |
Mark Tinguely | 2a84108 | 2013-10-02 07:51:12 -0500 | [diff] [blame] | 1549 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1550 | } |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 1551 | } |
Mark Tinguely | 2a84108 | 2013-10-02 07:51:12 -0500 | [diff] [blame] | 1552 | out: |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 1553 | ASSERT(list_empty(&sort_list)); |
Dave Chinner | a775ad7 | 2013-06-05 12:09:07 +1000 | [diff] [blame] | 1554 | if (!list_empty(&buffer_list)) |
| 1555 | list_splice(&buffer_list, &trans->r_itemq); |
| 1556 | if (!list_empty(&inode_list)) |
| 1557 | list_splice_tail(&inode_list, &trans->r_itemq); |
| 1558 | if (!list_empty(&inode_buffer_list)) |
| 1559 | list_splice_tail(&inode_buffer_list, &trans->r_itemq); |
| 1560 | if (!list_empty(&cancel_list)) |
| 1561 | list_splice_tail(&cancel_list, &trans->r_itemq); |
Mark Tinguely | 2a84108 | 2013-10-02 07:51:12 -0500 | [diff] [blame] | 1562 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1563 | } |
| 1564 | |
| 1565 | /* |
| 1566 | * Build up the table of buf cancel records so that we don't replay |
| 1567 | * cancelled data in the second pass. For buffer records that are |
| 1568 | * not cancel records, there is nothing to do here so we just return. |
| 1569 | * |
| 1570 | * If we get a cancel record which is already in the table, this indicates |
| 1571 | * that the buffer was cancelled multiple times. In order to ensure |
| 1572 | * that during pass 2 we keep the record in the table until we reach its |
| 1573 | * last occurrence in the log, we keep a reference count in the cancel |
| 1574 | * record in the table to tell us how many times we expect to see this |
| 1575 | * record during the second pass. |
| 1576 | */ |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 1577 | STATIC int |
| 1578 | xlog_recover_buffer_pass1( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1579 | struct xlog *log, |
| 1580 | struct xlog_recover_item *item) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1581 | { |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 1582 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 1583 | struct list_head *bucket; |
| 1584 | struct xfs_buf_cancel *bcp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1585 | |
| 1586 | /* |
| 1587 | * If this isn't a cancel buffer item, then just return. |
| 1588 | */ |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 1589 | if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 1590 | trace_xfs_log_recover_buf_not_cancel(log, buf_f); |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 1591 | return 0; |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 1592 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1593 | |
| 1594 | /* |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 1595 | * Insert an xfs_buf_cancel record into the hash table of them. |
| 1596 | * If there is already an identical record, bump its reference count. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | */ |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 1598 | bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno); |
| 1599 | list_for_each_entry(bcp, bucket, bc_list) { |
| 1600 | if (bcp->bc_blkno == buf_f->blf_blkno && |
| 1601 | bcp->bc_len == buf_f->blf_len) { |
| 1602 | bcp->bc_refcount++; |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 1603 | trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 1604 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1606 | } |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 1607 | |
| 1608 | bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP); |
| 1609 | bcp->bc_blkno = buf_f->blf_blkno; |
| 1610 | bcp->bc_len = buf_f->blf_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1611 | bcp->bc_refcount = 1; |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 1612 | list_add_tail(&bcp->bc_list, bucket); |
| 1613 | |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 1614 | trace_xfs_log_recover_buf_cancel_add(log, buf_f); |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 1615 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1616 | } |
| 1617 | |
| 1618 | /* |
| 1619 | * Check to see whether the buffer being recovered has a corresponding |
Dave Chinner | 84a5b73 | 2013-08-27 08:10:53 +1000 | [diff] [blame] | 1620 | * entry in the buffer cancel record table. If it is, return the cancel |
| 1621 | * buffer structure to the caller. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | */ |
Dave Chinner | 84a5b73 | 2013-08-27 08:10:53 +1000 | [diff] [blame] | 1623 | STATIC struct xfs_buf_cancel * |
| 1624 | xlog_peek_buffer_cancelled( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1625 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1626 | xfs_daddr_t blkno, |
| 1627 | uint len, |
| 1628 | ushort flags) |
| 1629 | { |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 1630 | struct list_head *bucket; |
| 1631 | struct xfs_buf_cancel *bcp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1632 | |
Dave Chinner | 84a5b73 | 2013-08-27 08:10:53 +1000 | [diff] [blame] | 1633 | if (!log->l_buf_cancel_table) { |
| 1634 | /* empty table means no cancelled buffers in the log */ |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 1635 | ASSERT(!(flags & XFS_BLF_CANCEL)); |
Dave Chinner | 84a5b73 | 2013-08-27 08:10:53 +1000 | [diff] [blame] | 1636 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1637 | } |
| 1638 | |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 1639 | bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); |
| 1640 | list_for_each_entry(bcp, bucket, bc_list) { |
| 1641 | if (bcp->bc_blkno == blkno && bcp->bc_len == len) |
Dave Chinner | 84a5b73 | 2013-08-27 08:10:53 +1000 | [diff] [blame] | 1642 | return bcp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1643 | } |
| 1644 | |
| 1645 | /* |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 1646 | * We didn't find a corresponding entry in the table, so return 0 so |
| 1647 | * that the buffer is NOT cancelled. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1648 | */ |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 1649 | ASSERT(!(flags & XFS_BLF_CANCEL)); |
Dave Chinner | 84a5b73 | 2013-08-27 08:10:53 +1000 | [diff] [blame] | 1650 | return NULL; |
| 1651 | } |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 1652 | |
Dave Chinner | 84a5b73 | 2013-08-27 08:10:53 +1000 | [diff] [blame] | 1653 | /* |
| 1654 | * If the buffer is being cancelled then return 1 so that it will be cancelled, |
| 1655 | * otherwise return 0. If the buffer is actually a buffer cancel item |
| 1656 | * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the |
| 1657 | * table and remove it from the table if this is the last reference. |
| 1658 | * |
| 1659 | * We remove the cancel record from the table when we encounter its last |
| 1660 | * occurrence in the log so that if the same buffer is re-used again after its |
| 1661 | * last cancellation we actually replay the changes made at that point. |
| 1662 | */ |
| 1663 | STATIC int |
| 1664 | xlog_check_buffer_cancelled( |
| 1665 | struct xlog *log, |
| 1666 | xfs_daddr_t blkno, |
| 1667 | uint len, |
| 1668 | ushort flags) |
| 1669 | { |
| 1670 | struct xfs_buf_cancel *bcp; |
| 1671 | |
| 1672 | bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags); |
| 1673 | if (!bcp) |
| 1674 | return 0; |
| 1675 | |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 1676 | /* |
| 1677 | * We've go a match, so return 1 so that the recovery of this buffer |
| 1678 | * is cancelled. If this buffer is actually a buffer cancel log |
| 1679 | * item, then decrement the refcount on the one in the table and |
| 1680 | * remove it if this is the last reference. |
| 1681 | */ |
| 1682 | if (flags & XFS_BLF_CANCEL) { |
| 1683 | if (--bcp->bc_refcount == 0) { |
| 1684 | list_del(&bcp->bc_list); |
| 1685 | kmem_free(bcp); |
| 1686 | } |
| 1687 | } |
| 1688 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1689 | } |
| 1690 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1691 | /* |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 1692 | * Perform recovery for a buffer full of inodes. In these buffers, the only |
| 1693 | * data which should be recovered is that which corresponds to the |
| 1694 | * di_next_unlinked pointers in the on disk inode structures. The rest of the |
| 1695 | * data for the inodes is always logged through the inodes themselves rather |
| 1696 | * than the inode buffer and is recovered in xlog_recover_inode_pass2(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1697 | * |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 1698 | * The only time when buffers full of inodes are fully recovered is when the |
| 1699 | * buffer is full of newly allocated inodes. In this case the buffer will |
| 1700 | * not be marked as an inode buffer and so will be sent to |
| 1701 | * xlog_recover_do_reg_buffer() below during recovery. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1702 | */ |
| 1703 | STATIC int |
| 1704 | xlog_recover_do_inode_buffer( |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 1705 | struct xfs_mount *mp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1706 | xlog_recover_item_t *item, |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 1707 | struct xfs_buf *bp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1708 | xfs_buf_log_format_t *buf_f) |
| 1709 | { |
| 1710 | int i; |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 1711 | int item_index = 0; |
| 1712 | int bit = 0; |
| 1713 | int nbits = 0; |
| 1714 | int reg_buf_offset = 0; |
| 1715 | int reg_buf_bytes = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1716 | int next_unlinked_offset; |
| 1717 | int inodes_per_buf; |
| 1718 | xfs_agino_t *logged_nextp; |
| 1719 | xfs_agino_t *buffer_nextp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 1721 | trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); |
Dave Chinner | 9222a9c | 2013-06-12 12:19:06 +1000 | [diff] [blame] | 1722 | |
| 1723 | /* |
| 1724 | * Post recovery validation only works properly on CRC enabled |
| 1725 | * filesystems. |
| 1726 | */ |
| 1727 | if (xfs_sb_version_hascrc(&mp->m_sb)) |
| 1728 | bp->b_ops = &xfs_inode_buf_ops; |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 1729 | |
Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 1730 | inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1731 | for (i = 0; i < inodes_per_buf; i++) { |
| 1732 | next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + |
| 1733 | offsetof(xfs_dinode_t, di_next_unlinked); |
| 1734 | |
| 1735 | while (next_unlinked_offset >= |
| 1736 | (reg_buf_offset + reg_buf_bytes)) { |
| 1737 | /* |
| 1738 | * The next di_next_unlinked field is beyond |
| 1739 | * the current logged region. Find the next |
| 1740 | * logged region that contains or is beyond |
| 1741 | * the current di_next_unlinked field. |
| 1742 | */ |
| 1743 | bit += nbits; |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 1744 | bit = xfs_next_bit(buf_f->blf_data_map, |
| 1745 | buf_f->blf_map_size, bit); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1746 | |
| 1747 | /* |
| 1748 | * If there are no more logged regions in the |
| 1749 | * buffer, then we're done. |
| 1750 | */ |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 1751 | if (bit == -1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1752 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1753 | |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 1754 | nbits = xfs_contig_bits(buf_f->blf_data_map, |
| 1755 | buf_f->blf_map_size, bit); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1756 | ASSERT(nbits > 0); |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 1757 | reg_buf_offset = bit << XFS_BLF_SHIFT; |
| 1758 | reg_buf_bytes = nbits << XFS_BLF_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1759 | item_index++; |
| 1760 | } |
| 1761 | |
| 1762 | /* |
| 1763 | * If the current logged region starts after the current |
| 1764 | * di_next_unlinked field, then move on to the next |
| 1765 | * di_next_unlinked field. |
| 1766 | */ |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 1767 | if (next_unlinked_offset < reg_buf_offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1769 | |
| 1770 | ASSERT(item->ri_buf[item_index].i_addr != NULL); |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 1771 | ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); |
Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 1772 | ASSERT((reg_buf_offset + reg_buf_bytes) <= |
| 1773 | BBTOB(bp->b_io_length)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1774 | |
| 1775 | /* |
| 1776 | * The current logged region contains a copy of the |
| 1777 | * current di_next_unlinked field. Extract its value |
| 1778 | * and copy it to the buffer copy. |
| 1779 | */ |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 1780 | logged_nextp = item->ri_buf[item_index].i_addr + |
| 1781 | next_unlinked_offset - reg_buf_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1782 | if (unlikely(*logged_nextp == 0)) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1783 | xfs_alert(mp, |
| 1784 | "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). " |
| 1785 | "Trying to replay bad (0) inode di_next_unlinked field.", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1786 | item, bp); |
| 1787 | XFS_ERROR_REPORT("xlog_recover_do_inode_buf", |
| 1788 | XFS_ERRLEVEL_LOW, mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1789 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1790 | } |
| 1791 | |
Christoph Hellwig | 88ee2df | 2015-06-22 09:44:29 +1000 | [diff] [blame] | 1792 | buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset); |
Tim Shimmin | 87c199c | 2006-06-09 14:56:16 +1000 | [diff] [blame] | 1793 | *buffer_nextp = *logged_nextp; |
Dave Chinner | 0a32c26 | 2013-06-05 12:09:08 +1000 | [diff] [blame] | 1794 | |
| 1795 | /* |
| 1796 | * If necessary, recalculate the CRC in the on-disk inode. We |
| 1797 | * have to leave the inode in a consistent state for whoever |
| 1798 | * reads it next.... |
| 1799 | */ |
Christoph Hellwig | 88ee2df | 2015-06-22 09:44:29 +1000 | [diff] [blame] | 1800 | xfs_dinode_calc_crc(mp, |
Dave Chinner | 0a32c26 | 2013-06-05 12:09:08 +1000 | [diff] [blame] | 1801 | xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); |
| 1802 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1803 | } |
| 1804 | |
| 1805 | return 0; |
| 1806 | } |
| 1807 | |
| 1808 | /* |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1809 | * V5 filesystems know the age of the buffer on disk being recovered. We can |
| 1810 | * have newer objects on disk than we are replaying, and so for these cases we |
| 1811 | * don't want to replay the current change as that will make the buffer contents |
| 1812 | * temporarily invalid on disk. |
| 1813 | * |
| 1814 | * The magic number might not match the buffer type we are going to recover |
| 1815 | * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence |
| 1816 | * extract the LSN of the existing object in the buffer based on it's current |
| 1817 | * magic number. If we don't recognise the magic number in the buffer, then |
| 1818 | * return a LSN of -1 so that the caller knows it was an unrecognised block and |
| 1819 | * so can recover the buffer. |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1820 | * |
| 1821 | * Note: we cannot rely solely on magic number matches to determine that the |
| 1822 | * buffer has a valid LSN - we also need to verify that it belongs to this |
| 1823 | * filesystem, so we need to extract the object's LSN and compare it to that |
| 1824 | * which we read from the superblock. If the UUIDs don't match, then we've got a |
| 1825 | * stale metadata block from an old filesystem instance that we need to recover |
| 1826 | * over the top of. |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1827 | */ |
| 1828 | static xfs_lsn_t |
| 1829 | xlog_recover_get_buf_lsn( |
| 1830 | struct xfs_mount *mp, |
| 1831 | struct xfs_buf *bp) |
| 1832 | { |
| 1833 | __uint32_t magic32; |
| 1834 | __uint16_t magic16; |
| 1835 | __uint16_t magicda; |
| 1836 | void *blk = bp->b_addr; |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1837 | uuid_t *uuid; |
| 1838 | xfs_lsn_t lsn = -1; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1839 | |
| 1840 | /* v4 filesystems always recover immediately */ |
| 1841 | if (!xfs_sb_version_hascrc(&mp->m_sb)) |
| 1842 | goto recover_immediately; |
| 1843 | |
| 1844 | magic32 = be32_to_cpu(*(__be32 *)blk); |
| 1845 | switch (magic32) { |
| 1846 | case XFS_ABTB_CRC_MAGIC: |
| 1847 | case XFS_ABTC_CRC_MAGIC: |
| 1848 | case XFS_ABTB_MAGIC: |
| 1849 | case XFS_ABTC_MAGIC: |
| 1850 | case XFS_IBT_CRC_MAGIC: |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1851 | case XFS_IBT_MAGIC: { |
| 1852 | struct xfs_btree_block *btb = blk; |
| 1853 | |
| 1854 | lsn = be64_to_cpu(btb->bb_u.s.bb_lsn); |
| 1855 | uuid = &btb->bb_u.s.bb_uuid; |
| 1856 | break; |
| 1857 | } |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1858 | case XFS_BMAP_CRC_MAGIC: |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1859 | case XFS_BMAP_MAGIC: { |
| 1860 | struct xfs_btree_block *btb = blk; |
| 1861 | |
| 1862 | lsn = be64_to_cpu(btb->bb_u.l.bb_lsn); |
| 1863 | uuid = &btb->bb_u.l.bb_uuid; |
| 1864 | break; |
| 1865 | } |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1866 | case XFS_AGF_MAGIC: |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1867 | lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); |
| 1868 | uuid = &((struct xfs_agf *)blk)->agf_uuid; |
| 1869 | break; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1870 | case XFS_AGFL_MAGIC: |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1871 | lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); |
| 1872 | uuid = &((struct xfs_agfl *)blk)->agfl_uuid; |
| 1873 | break; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1874 | case XFS_AGI_MAGIC: |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1875 | lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); |
| 1876 | uuid = &((struct xfs_agi *)blk)->agi_uuid; |
| 1877 | break; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1878 | case XFS_SYMLINK_MAGIC: |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1879 | lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); |
| 1880 | uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid; |
| 1881 | break; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1882 | case XFS_DIR3_BLOCK_MAGIC: |
| 1883 | case XFS_DIR3_DATA_MAGIC: |
| 1884 | case XFS_DIR3_FREE_MAGIC: |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1885 | lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); |
| 1886 | uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; |
| 1887 | break; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1888 | case XFS_ATTR3_RMT_MAGIC: |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1889 | lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); |
| 1890 | uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid; |
| 1891 | break; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1892 | case XFS_SB_MAGIC: |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1893 | lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); |
| 1894 | uuid = &((struct xfs_dsb *)blk)->sb_uuid; |
| 1895 | break; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1896 | default: |
| 1897 | break; |
| 1898 | } |
| 1899 | |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1900 | if (lsn != (xfs_lsn_t)-1) { |
| 1901 | if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) |
| 1902 | goto recover_immediately; |
| 1903 | return lsn; |
| 1904 | } |
| 1905 | |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1906 | magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); |
| 1907 | switch (magicda) { |
| 1908 | case XFS_DIR3_LEAF1_MAGIC: |
| 1909 | case XFS_DIR3_LEAFN_MAGIC: |
| 1910 | case XFS_DA3_NODE_MAGIC: |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1911 | lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); |
| 1912 | uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; |
| 1913 | break; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1914 | default: |
| 1915 | break; |
| 1916 | } |
| 1917 | |
Dave Chinner | 566055d | 2013-09-24 16:01:16 +1000 | [diff] [blame] | 1918 | if (lsn != (xfs_lsn_t)-1) { |
| 1919 | if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) |
| 1920 | goto recover_immediately; |
| 1921 | return lsn; |
| 1922 | } |
| 1923 | |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1924 | /* |
| 1925 | * We do individual object checks on dquot and inode buffers as they |
| 1926 | * have their own individual LSN records. Also, we could have a stale |
| 1927 | * buffer here, so we have to at least recognise these buffer types. |
| 1928 | * |
| 1929 | * A notd complexity here is inode unlinked list processing - it logs |
| 1930 | * the inode directly in the buffer, but we don't know which inodes have |
| 1931 | * been modified, and there is no global buffer LSN. Hence we need to |
| 1932 | * recover all inode buffer types immediately. This problem will be |
| 1933 | * fixed by logical logging of the unlinked list modifications. |
| 1934 | */ |
| 1935 | magic16 = be16_to_cpu(*(__be16 *)blk); |
| 1936 | switch (magic16) { |
| 1937 | case XFS_DQUOT_MAGIC: |
| 1938 | case XFS_DINODE_MAGIC: |
| 1939 | goto recover_immediately; |
| 1940 | default: |
| 1941 | break; |
| 1942 | } |
| 1943 | |
| 1944 | /* unknown buffer contents, recover immediately */ |
| 1945 | |
| 1946 | recover_immediately: |
| 1947 | return (xfs_lsn_t)-1; |
| 1948 | |
| 1949 | } |
| 1950 | |
| 1951 | /* |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 1952 | * Validate the recovered buffer is of the correct type and attach the |
| 1953 | * appropriate buffer operations to them for writeback. Magic numbers are in a |
| 1954 | * few places: |
| 1955 | * the first 16 bits of the buffer (inode buffer, dquot buffer), |
| 1956 | * the first 32 bits of the buffer (most blocks), |
| 1957 | * inside a struct xfs_da_blkinfo at the start of the buffer. |
| 1958 | */ |
| 1959 | static void |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 1960 | xlog_recover_validate_buf_type( |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 1961 | struct xfs_mount *mp, |
| 1962 | struct xfs_buf *bp, |
| 1963 | xfs_buf_log_format_t *buf_f) |
| 1964 | { |
| 1965 | struct xfs_da_blkinfo *info = bp->b_addr; |
| 1966 | __uint32_t magic32; |
| 1967 | __uint16_t magic16; |
| 1968 | __uint16_t magicda; |
| 1969 | |
Dave Chinner | 67dc288 | 2014-08-04 12:43:06 +1000 | [diff] [blame] | 1970 | /* |
| 1971 | * We can only do post recovery validation on items on CRC enabled |
| 1972 | * fielsystems as we need to know when the buffer was written to be able |
| 1973 | * to determine if we should have replayed the item. If we replay old |
| 1974 | * metadata over a newer buffer, then it will enter a temporarily |
| 1975 | * inconsistent state resulting in verification failures. Hence for now |
| 1976 | * just avoid the verification stage for non-crc filesystems |
| 1977 | */ |
| 1978 | if (!xfs_sb_version_hascrc(&mp->m_sb)) |
| 1979 | return; |
| 1980 | |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 1981 | magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); |
| 1982 | magic16 = be16_to_cpu(*(__be16*)bp->b_addr); |
| 1983 | magicda = be16_to_cpu(info->magic); |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 1984 | switch (xfs_blft_from_flags(buf_f)) { |
| 1985 | case XFS_BLFT_BTREE_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 1986 | switch (magic32) { |
| 1987 | case XFS_ABTB_CRC_MAGIC: |
| 1988 | case XFS_ABTC_CRC_MAGIC: |
| 1989 | case XFS_ABTB_MAGIC: |
| 1990 | case XFS_ABTC_MAGIC: |
| 1991 | bp->b_ops = &xfs_allocbt_buf_ops; |
| 1992 | break; |
| 1993 | case XFS_IBT_CRC_MAGIC: |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 1994 | case XFS_FIBT_CRC_MAGIC: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 1995 | case XFS_IBT_MAGIC: |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 1996 | case XFS_FIBT_MAGIC: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 1997 | bp->b_ops = &xfs_inobt_buf_ops; |
| 1998 | break; |
| 1999 | case XFS_BMAP_CRC_MAGIC: |
| 2000 | case XFS_BMAP_MAGIC: |
| 2001 | bp->b_ops = &xfs_bmbt_buf_ops; |
| 2002 | break; |
| 2003 | default: |
| 2004 | xfs_warn(mp, "Bad btree block magic!"); |
| 2005 | ASSERT(0); |
| 2006 | break; |
| 2007 | } |
| 2008 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2009 | case XFS_BLFT_AGF_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2010 | if (magic32 != XFS_AGF_MAGIC) { |
| 2011 | xfs_warn(mp, "Bad AGF block magic!"); |
| 2012 | ASSERT(0); |
| 2013 | break; |
| 2014 | } |
| 2015 | bp->b_ops = &xfs_agf_buf_ops; |
| 2016 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2017 | case XFS_BLFT_AGFL_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2018 | if (magic32 != XFS_AGFL_MAGIC) { |
| 2019 | xfs_warn(mp, "Bad AGFL block magic!"); |
| 2020 | ASSERT(0); |
| 2021 | break; |
| 2022 | } |
| 2023 | bp->b_ops = &xfs_agfl_buf_ops; |
| 2024 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2025 | case XFS_BLFT_AGI_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2026 | if (magic32 != XFS_AGI_MAGIC) { |
| 2027 | xfs_warn(mp, "Bad AGI block magic!"); |
| 2028 | ASSERT(0); |
| 2029 | break; |
| 2030 | } |
| 2031 | bp->b_ops = &xfs_agi_buf_ops; |
| 2032 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2033 | case XFS_BLFT_UDQUOT_BUF: |
| 2034 | case XFS_BLFT_PDQUOT_BUF: |
| 2035 | case XFS_BLFT_GDQUOT_BUF: |
Dave Chinner | 123887e | 2013-04-30 21:39:33 +1000 | [diff] [blame] | 2036 | #ifdef CONFIG_XFS_QUOTA |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2037 | if (magic16 != XFS_DQUOT_MAGIC) { |
| 2038 | xfs_warn(mp, "Bad DQUOT block magic!"); |
| 2039 | ASSERT(0); |
| 2040 | break; |
| 2041 | } |
| 2042 | bp->b_ops = &xfs_dquot_buf_ops; |
Dave Chinner | 123887e | 2013-04-30 21:39:33 +1000 | [diff] [blame] | 2043 | #else |
| 2044 | xfs_alert(mp, |
| 2045 | "Trying to recover dquots without QUOTA support built in!"); |
| 2046 | ASSERT(0); |
| 2047 | #endif |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2048 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2049 | case XFS_BLFT_DINO_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2050 | if (magic16 != XFS_DINODE_MAGIC) { |
| 2051 | xfs_warn(mp, "Bad INODE block magic!"); |
| 2052 | ASSERT(0); |
| 2053 | break; |
| 2054 | } |
| 2055 | bp->b_ops = &xfs_inode_buf_ops; |
| 2056 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2057 | case XFS_BLFT_SYMLINK_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2058 | if (magic32 != XFS_SYMLINK_MAGIC) { |
| 2059 | xfs_warn(mp, "Bad symlink block magic!"); |
| 2060 | ASSERT(0); |
| 2061 | break; |
| 2062 | } |
| 2063 | bp->b_ops = &xfs_symlink_buf_ops; |
| 2064 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2065 | case XFS_BLFT_DIR_BLOCK_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2066 | if (magic32 != XFS_DIR2_BLOCK_MAGIC && |
| 2067 | magic32 != XFS_DIR3_BLOCK_MAGIC) { |
| 2068 | xfs_warn(mp, "Bad dir block magic!"); |
| 2069 | ASSERT(0); |
| 2070 | break; |
| 2071 | } |
| 2072 | bp->b_ops = &xfs_dir3_block_buf_ops; |
| 2073 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2074 | case XFS_BLFT_DIR_DATA_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2075 | if (magic32 != XFS_DIR2_DATA_MAGIC && |
| 2076 | magic32 != XFS_DIR3_DATA_MAGIC) { |
| 2077 | xfs_warn(mp, "Bad dir data magic!"); |
| 2078 | ASSERT(0); |
| 2079 | break; |
| 2080 | } |
| 2081 | bp->b_ops = &xfs_dir3_data_buf_ops; |
| 2082 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2083 | case XFS_BLFT_DIR_FREE_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2084 | if (magic32 != XFS_DIR2_FREE_MAGIC && |
| 2085 | magic32 != XFS_DIR3_FREE_MAGIC) { |
| 2086 | xfs_warn(mp, "Bad dir3 free magic!"); |
| 2087 | ASSERT(0); |
| 2088 | break; |
| 2089 | } |
| 2090 | bp->b_ops = &xfs_dir3_free_buf_ops; |
| 2091 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2092 | case XFS_BLFT_DIR_LEAF1_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2093 | if (magicda != XFS_DIR2_LEAF1_MAGIC && |
| 2094 | magicda != XFS_DIR3_LEAF1_MAGIC) { |
| 2095 | xfs_warn(mp, "Bad dir leaf1 magic!"); |
| 2096 | ASSERT(0); |
| 2097 | break; |
| 2098 | } |
| 2099 | bp->b_ops = &xfs_dir3_leaf1_buf_ops; |
| 2100 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2101 | case XFS_BLFT_DIR_LEAFN_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2102 | if (magicda != XFS_DIR2_LEAFN_MAGIC && |
| 2103 | magicda != XFS_DIR3_LEAFN_MAGIC) { |
| 2104 | xfs_warn(mp, "Bad dir leafn magic!"); |
| 2105 | ASSERT(0); |
| 2106 | break; |
| 2107 | } |
| 2108 | bp->b_ops = &xfs_dir3_leafn_buf_ops; |
| 2109 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2110 | case XFS_BLFT_DA_NODE_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2111 | if (magicda != XFS_DA_NODE_MAGIC && |
| 2112 | magicda != XFS_DA3_NODE_MAGIC) { |
| 2113 | xfs_warn(mp, "Bad da node magic!"); |
| 2114 | ASSERT(0); |
| 2115 | break; |
| 2116 | } |
| 2117 | bp->b_ops = &xfs_da3_node_buf_ops; |
| 2118 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2119 | case XFS_BLFT_ATTR_LEAF_BUF: |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2120 | if (magicda != XFS_ATTR_LEAF_MAGIC && |
| 2121 | magicda != XFS_ATTR3_LEAF_MAGIC) { |
| 2122 | xfs_warn(mp, "Bad attr leaf magic!"); |
| 2123 | ASSERT(0); |
| 2124 | break; |
| 2125 | } |
| 2126 | bp->b_ops = &xfs_attr3_leaf_buf_ops; |
| 2127 | break; |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2128 | case XFS_BLFT_ATTR_RMT_BUF: |
Dave Chinner | cab09a8 | 2013-04-30 21:39:36 +1000 | [diff] [blame] | 2129 | if (magic32 != XFS_ATTR3_RMT_MAGIC) { |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2130 | xfs_warn(mp, "Bad attr remote magic!"); |
| 2131 | ASSERT(0); |
| 2132 | break; |
| 2133 | } |
| 2134 | bp->b_ops = &xfs_attr3_rmt_buf_ops; |
| 2135 | break; |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 2136 | case XFS_BLFT_SB_BUF: |
| 2137 | if (magic32 != XFS_SB_MAGIC) { |
| 2138 | xfs_warn(mp, "Bad SB block magic!"); |
| 2139 | ASSERT(0); |
| 2140 | break; |
| 2141 | } |
| 2142 | bp->b_ops = &xfs_sb_buf_ops; |
| 2143 | break; |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2144 | default: |
Dave Chinner | 61fe135 | 2013-04-03 16:11:30 +1100 | [diff] [blame] | 2145 | xfs_warn(mp, "Unknown buffer type %d!", |
| 2146 | xfs_blft_from_flags(buf_f)); |
Dave Chinner | d75afeb | 2013-04-03 16:11:29 +1100 | [diff] [blame] | 2147 | break; |
| 2148 | } |
| 2149 | } |
| 2150 | |
| 2151 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2152 | * Perform a 'normal' buffer recovery. Each logged region of the |
| 2153 | * buffer should be copied over the corresponding region in the |
| 2154 | * given buffer. The bitmap in the buf log format structure indicates |
| 2155 | * where to place the logged data. |
| 2156 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2157 | STATIC void |
| 2158 | xlog_recover_do_reg_buffer( |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 2159 | struct xfs_mount *mp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2160 | xlog_recover_item_t *item, |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 2161 | struct xfs_buf *bp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2162 | xfs_buf_log_format_t *buf_f) |
| 2163 | { |
| 2164 | int i; |
| 2165 | int bit; |
| 2166 | int nbits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2167 | int error; |
| 2168 | |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 2169 | trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); |
| 2170 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2171 | bit = 0; |
| 2172 | i = 1; /* 0 is the buf format structure */ |
| 2173 | while (1) { |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 2174 | bit = xfs_next_bit(buf_f->blf_data_map, |
| 2175 | buf_f->blf_map_size, bit); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2176 | if (bit == -1) |
| 2177 | break; |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 2178 | nbits = xfs_contig_bits(buf_f->blf_data_map, |
| 2179 | buf_f->blf_map_size, bit); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2180 | ASSERT(nbits > 0); |
Christoph Hellwig | 4b80916 | 2007-08-16 15:37:36 +1000 | [diff] [blame] | 2181 | ASSERT(item->ri_buf[i].i_addr != NULL); |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 2182 | ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); |
Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 2183 | ASSERT(BBTOB(bp->b_io_length) >= |
| 2184 | ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2185 | |
| 2186 | /* |
Dave Chinner | 709da6a | 2013-05-27 16:38:23 +1000 | [diff] [blame] | 2187 | * The dirty regions logged in the buffer, even though |
| 2188 | * contiguous, may span multiple chunks. This is because the |
| 2189 | * dirty region may span a physical page boundary in a buffer |
| 2190 | * and hence be split into two separate vectors for writing into |
| 2191 | * the log. Hence we need to trim nbits back to the length of |
| 2192 | * the current region being copied out of the log. |
| 2193 | */ |
| 2194 | if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT)) |
| 2195 | nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT; |
| 2196 | |
| 2197 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2198 | * Do a sanity check if this is a dquot buffer. Just checking |
| 2199 | * the first dquot in the buffer should do. XXXThis is |
| 2200 | * probably a good thing to do for other buf types also. |
| 2201 | */ |
| 2202 | error = 0; |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 2203 | if (buf_f->blf_flags & |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 2204 | (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { |
Christoph Hellwig | 0c5e1ce | 2009-06-08 15:33:21 +0200 | [diff] [blame] | 2205 | if (item->ri_buf[i].i_addr == NULL) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2206 | xfs_alert(mp, |
Christoph Hellwig | 0c5e1ce | 2009-06-08 15:33:21 +0200 | [diff] [blame] | 2207 | "XFS: NULL dquot in %s.", __func__); |
| 2208 | goto next; |
| 2209 | } |
Jan Rekorajski | 8ec6dba | 2009-11-16 11:57:02 +0000 | [diff] [blame] | 2210 | if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2211 | xfs_alert(mp, |
Christoph Hellwig | 0c5e1ce | 2009-06-08 15:33:21 +0200 | [diff] [blame] | 2212 | "XFS: dquot too small (%d) in %s.", |
| 2213 | item->ri_buf[i].i_len, __func__); |
| 2214 | goto next; |
| 2215 | } |
Dave Chinner | 9aede1d | 2013-10-15 09:17:52 +1100 | [diff] [blame] | 2216 | error = xfs_dqcheck(mp, item->ri_buf[i].i_addr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2217 | -1, 0, XFS_QMOPT_DOWARN, |
| 2218 | "dquot_buf_recover"); |
Christoph Hellwig | 0c5e1ce | 2009-06-08 15:33:21 +0200 | [diff] [blame] | 2219 | if (error) |
| 2220 | goto next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2221 | } |
Christoph Hellwig | 0c5e1ce | 2009-06-08 15:33:21 +0200 | [diff] [blame] | 2222 | |
| 2223 | memcpy(xfs_buf_offset(bp, |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 2224 | (uint)bit << XFS_BLF_SHIFT), /* dest */ |
Christoph Hellwig | 0c5e1ce | 2009-06-08 15:33:21 +0200 | [diff] [blame] | 2225 | item->ri_buf[i].i_addr, /* source */ |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 2226 | nbits<<XFS_BLF_SHIFT); /* length */ |
Christoph Hellwig | 0c5e1ce | 2009-06-08 15:33:21 +0200 | [diff] [blame] | 2227 | next: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2228 | i++; |
| 2229 | bit += nbits; |
| 2230 | } |
| 2231 | |
| 2232 | /* Shouldn't be any more regions */ |
| 2233 | ASSERT(i == item->ri_total); |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 2234 | |
Dave Chinner | 67dc288 | 2014-08-04 12:43:06 +1000 | [diff] [blame] | 2235 | xlog_recover_validate_buf_type(mp, bp, buf_f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2236 | } |
| 2237 | |
| 2238 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2239 | * Perform a dquot buffer recovery. |
Zhi Yong Wu | 8ba701e | 2013-08-12 03:15:01 +0000 | [diff] [blame] | 2240 | * Simple algorithm: if we have found a QUOTAOFF log item of the same type |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2241 | * (ie. USR or GRP), then just toss this buffer away; don't recover it. |
| 2242 | * Else, treat it as a regular buffer and do recovery. |
Dave Chinner | ad3714b | 2014-08-04 12:59:31 +1000 | [diff] [blame] | 2243 | * |
| 2244 | * Return false if the buffer was tossed and true if we recovered the buffer to |
| 2245 | * indicate to the caller if the buffer needs writing. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2246 | */ |
Dave Chinner | ad3714b | 2014-08-04 12:59:31 +1000 | [diff] [blame] | 2247 | STATIC bool |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2248 | xlog_recover_do_dquot_buffer( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2249 | struct xfs_mount *mp, |
| 2250 | struct xlog *log, |
| 2251 | struct xlog_recover_item *item, |
| 2252 | struct xfs_buf *bp, |
| 2253 | struct xfs_buf_log_format *buf_f) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2254 | { |
| 2255 | uint type; |
| 2256 | |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 2257 | trace_xfs_log_recover_buf_dquot_buf(log, buf_f); |
| 2258 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2259 | /* |
| 2260 | * Filesystems are required to send in quota flags at mount time. |
| 2261 | */ |
Dave Chinner | ad3714b | 2014-08-04 12:59:31 +1000 | [diff] [blame] | 2262 | if (!mp->m_qflags) |
| 2263 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2264 | |
| 2265 | type = 0; |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 2266 | if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2267 | type |= XFS_DQ_USER; |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 2268 | if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF) |
Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 2269 | type |= XFS_DQ_PROJ; |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 2270 | if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2271 | type |= XFS_DQ_GROUP; |
| 2272 | /* |
| 2273 | * This type of quotas was turned off, so ignore this buffer |
| 2274 | */ |
| 2275 | if (log->l_quotaoffs_flag & type) |
Dave Chinner | ad3714b | 2014-08-04 12:59:31 +1000 | [diff] [blame] | 2276 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2277 | |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 2278 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f); |
Dave Chinner | ad3714b | 2014-08-04 12:59:31 +1000 | [diff] [blame] | 2279 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2280 | } |
| 2281 | |
| 2282 | /* |
| 2283 | * This routine replays a modification made to a buffer at runtime. |
| 2284 | * There are actually two types of buffer, regular and inode, which |
| 2285 | * are handled differently. Inode buffers are handled differently |
| 2286 | * in that we only recover a specific set of data from them, namely |
| 2287 | * the inode di_next_unlinked fields. This is because all other inode |
| 2288 | * data is actually logged via inode records and any data we replay |
| 2289 | * here which overlaps that may be stale. |
| 2290 | * |
| 2291 | * When meta-data buffers are freed at run time we log a buffer item |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 2292 | * with the XFS_BLF_CANCEL bit set to indicate that previous copies |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2293 | * of the buffer in the log should not be replayed at recovery time. |
| 2294 | * This is so that if the blocks covered by the buffer are reused for |
| 2295 | * file data before we crash we don't end up replaying old, freed |
| 2296 | * meta-data into a user's file. |
| 2297 | * |
| 2298 | * To handle the cancellation of buffer log items, we make two passes |
| 2299 | * over the log during recovery. During the first we build a table of |
| 2300 | * those buffers which have been cancelled, and during the second we |
| 2301 | * only replay those buffers which do not have corresponding cancel |
Zhi Yong Wu | 34be5ff | 2013-08-07 10:11:07 +0000 | [diff] [blame] | 2302 | * records in the table. See xlog_recover_buffer_pass[1,2] above |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2303 | * for more details on the implementation of the table of cancel records. |
| 2304 | */ |
| 2305 | STATIC int |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2306 | xlog_recover_buffer_pass2( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2307 | struct xlog *log, |
| 2308 | struct list_head *buffer_list, |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2309 | struct xlog_recover_item *item, |
| 2310 | xfs_lsn_t current_lsn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2311 | { |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 2312 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 2313 | xfs_mount_t *mp = log->l_mp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2314 | xfs_buf_t *bp; |
| 2315 | int error; |
Christoph Hellwig | 6ad112b | 2009-11-24 18:02:23 +0000 | [diff] [blame] | 2316 | uint buf_flags; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2317 | xfs_lsn_t lsn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2318 | |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2319 | /* |
| 2320 | * In this pass we only want to recover all the buffers which have |
| 2321 | * not been cancelled and are not cancellation buffers themselves. |
| 2322 | */ |
| 2323 | if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno, |
| 2324 | buf_f->blf_len, buf_f->blf_flags)) { |
| 2325 | trace_xfs_log_recover_buf_cancel(log, buf_f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2326 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2327 | } |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2328 | |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 2329 | trace_xfs_log_recover_buf_recover(log, buf_f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2330 | |
Dave Chinner | a8acad7 | 2012-04-23 15:58:54 +1000 | [diff] [blame] | 2331 | buf_flags = 0; |
Dave Chinner | 611c994 | 2012-04-23 15:59:07 +1000 | [diff] [blame] | 2332 | if (buf_f->blf_flags & XFS_BLF_INODE_BUF) |
| 2333 | buf_flags |= XBF_UNMAPPED; |
Christoph Hellwig | 6ad112b | 2009-11-24 18:02:23 +0000 | [diff] [blame] | 2334 | |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 2335 | bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, |
Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 2336 | buf_flags, NULL); |
Chandra Seetharaman | ac4d688 | 2011-08-03 02:18:29 +0000 | [diff] [blame] | 2337 | if (!bp) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2338 | return -ENOMEM; |
Chandra Seetharaman | e570280 | 2011-08-03 02:18:34 +0000 | [diff] [blame] | 2339 | error = bp->b_error; |
Chandra Seetharaman | 5a52c2a58 | 2011-07-22 23:39:51 +0000 | [diff] [blame] | 2340 | if (error) { |
Christoph Hellwig | 901796a | 2011-10-10 16:52:49 +0000 | [diff] [blame] | 2341 | xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)"); |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2342 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2343 | } |
| 2344 | |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2345 | /* |
Dave Chinner | 67dc288 | 2014-08-04 12:43:06 +1000 | [diff] [blame] | 2346 | * Recover the buffer only if we get an LSN from it and it's less than |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2347 | * the lsn of the transaction we are replaying. |
Dave Chinner | 67dc288 | 2014-08-04 12:43:06 +1000 | [diff] [blame] | 2348 | * |
| 2349 | * Note that we have to be extremely careful of readahead here. |
| 2350 | * Readahead does not attach verfiers to the buffers so if we don't |
| 2351 | * actually do any replay after readahead because of the LSN we found |
| 2352 | * in the buffer if more recent than that current transaction then we |
| 2353 | * need to attach the verifier directly. Failure to do so can lead to |
| 2354 | * future recovery actions (e.g. EFI and unlinked list recovery) can |
| 2355 | * operate on the buffers and they won't get the verifier attached. This |
| 2356 | * can lead to blocks on disk having the correct content but a stale |
| 2357 | * CRC. |
| 2358 | * |
| 2359 | * It is safe to assume these clean buffers are currently up to date. |
| 2360 | * If the buffer is dirtied by a later transaction being replayed, then |
| 2361 | * the verifier will be reset to match whatever recover turns that |
| 2362 | * buffer into. |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2363 | */ |
| 2364 | lsn = xlog_recover_get_buf_lsn(mp, bp); |
Dave Chinner | 67dc288 | 2014-08-04 12:43:06 +1000 | [diff] [blame] | 2365 | if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { |
| 2366 | xlog_recover_validate_buf_type(mp, bp, buf_f); |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2367 | goto out_release; |
Dave Chinner | 67dc288 | 2014-08-04 12:43:06 +1000 | [diff] [blame] | 2368 | } |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2369 | |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 2370 | if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2371 | error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); |
Dave Chinner | ad3714b | 2014-08-04 12:59:31 +1000 | [diff] [blame] | 2372 | if (error) |
| 2373 | goto out_release; |
Christoph Hellwig | e2714bf | 2010-12-01 22:06:21 +0000 | [diff] [blame] | 2374 | } else if (buf_f->blf_flags & |
Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 2375 | (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { |
Dave Chinner | ad3714b | 2014-08-04 12:59:31 +1000 | [diff] [blame] | 2376 | bool dirty; |
| 2377 | |
| 2378 | dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); |
| 2379 | if (!dirty) |
| 2380 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2381 | } else { |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 2382 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2383 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2384 | |
| 2385 | /* |
| 2386 | * Perform delayed write on the buffer. Asynchronous writes will be |
| 2387 | * slower when taking into account all the buffers to be flushed. |
| 2388 | * |
| 2389 | * Also make sure that only inode buffers with good sizes stay in |
| 2390 | * the buffer cache. The kernel moves inodes in buffers of 1 block |
Jie Liu | 0f49efd | 2013-12-13 15:51:48 +1100 | [diff] [blame] | 2391 | * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2392 | * buffers in the log can be a different size if the log was generated |
| 2393 | * by an older kernel using unclustered inode buffers or a newer kernel |
| 2394 | * running with a different inode cluster size. Regardless, if the |
Jie Liu | 0f49efd | 2013-12-13 15:51:48 +1100 | [diff] [blame] | 2395 | * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size) |
| 2396 | * for *our* value of mp->m_inode_cluster_size, then we need to keep |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2397 | * the buffer out of the buffer cache so that the buffer won't |
| 2398 | * overlap with future reads of those inodes. |
| 2399 | */ |
| 2400 | if (XFS_DINODE_MAGIC == |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 2401 | be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && |
Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 2402 | (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, |
Jie Liu | 0f49efd | 2013-12-13 15:51:48 +1100 | [diff] [blame] | 2403 | (__uint32_t)log->l_mp->m_inode_cluster_size))) { |
Christoph Hellwig | c867cb6 | 2011-10-10 16:52:46 +0000 | [diff] [blame] | 2404 | xfs_buf_stale(bp); |
Christoph Hellwig | c2b006c | 2011-08-23 08:28:07 +0000 | [diff] [blame] | 2405 | error = xfs_bwrite(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2406 | } else { |
Dave Chinner | ebad861 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 2407 | ASSERT(bp->b_target->bt_mount == mp); |
Christoph Hellwig | cb669ca | 2011-07-13 13:43:49 +0200 | [diff] [blame] | 2408 | bp->b_iodone = xlog_recover_iodone; |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2409 | xfs_buf_delwri_queue(bp, buffer_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2410 | } |
| 2411 | |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2412 | out_release: |
Christoph Hellwig | c2b006c | 2011-08-23 08:28:07 +0000 | [diff] [blame] | 2413 | xfs_buf_relse(bp); |
| 2414 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2415 | } |
| 2416 | |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2417 | /* |
| 2418 | * Inode fork owner changes |
| 2419 | * |
| 2420 | * If we have been told that we have to reparent the inode fork, it's because an |
| 2421 | * extent swap operation on a CRC enabled filesystem has been done and we are |
| 2422 | * replaying it. We need to walk the BMBT of the appropriate fork and change the |
| 2423 | * owners of it. |
| 2424 | * |
| 2425 | * The complexity here is that we don't have an inode context to work with, so |
| 2426 | * after we've replayed the inode we need to instantiate one. This is where the |
| 2427 | * fun begins. |
| 2428 | * |
| 2429 | * We are in the middle of log recovery, so we can't run transactions. That |
| 2430 | * means we cannot use cache coherent inode instantiation via xfs_iget(), as |
| 2431 | * that will result in the corresponding iput() running the inode through |
| 2432 | * xfs_inactive(). If we've just replayed an inode core that changes the link |
| 2433 | * count to zero (i.e. it's been unlinked), then xfs_inactive() will run |
| 2434 | * transactions (bad!). |
| 2435 | * |
| 2436 | * So, to avoid this, we instantiate an inode directly from the inode core we've |
| 2437 | * just recovered. We have the buffer still locked, and all we really need to |
| 2438 | * instantiate is the inode core and the forks being modified. We can do this |
| 2439 | * manually, then run the inode btree owner change, and then tear down the |
| 2440 | * xfs_inode without having to run any transactions at all. |
| 2441 | * |
| 2442 | * Also, because we don't have a transaction context available here but need to |
| 2443 | * gather all the buffers we modify for writeback so we pass the buffer_list |
| 2444 | * instead for the operation to use. |
| 2445 | */ |
| 2446 | |
| 2447 | STATIC int |
| 2448 | xfs_recover_inode_owner_change( |
| 2449 | struct xfs_mount *mp, |
| 2450 | struct xfs_dinode *dip, |
| 2451 | struct xfs_inode_log_format *in_f, |
| 2452 | struct list_head *buffer_list) |
| 2453 | { |
| 2454 | struct xfs_inode *ip; |
| 2455 | int error; |
| 2456 | |
| 2457 | ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)); |
| 2458 | |
| 2459 | ip = xfs_inode_alloc(mp, in_f->ilf_ino); |
| 2460 | if (!ip) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2461 | return -ENOMEM; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2462 | |
| 2463 | /* instantiate the inode */ |
| 2464 | xfs_dinode_from_disk(&ip->i_d, dip); |
| 2465 | ASSERT(ip->i_d.di_version >= 3); |
| 2466 | |
| 2467 | error = xfs_iformat_fork(ip, dip); |
| 2468 | if (error) |
| 2469 | goto out_free_ip; |
| 2470 | |
| 2471 | |
| 2472 | if (in_f->ilf_fields & XFS_ILOG_DOWNER) { |
| 2473 | ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT); |
| 2474 | error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK, |
| 2475 | ip->i_ino, buffer_list); |
| 2476 | if (error) |
| 2477 | goto out_free_ip; |
| 2478 | } |
| 2479 | |
| 2480 | if (in_f->ilf_fields & XFS_ILOG_AOWNER) { |
| 2481 | ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT); |
| 2482 | error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK, |
| 2483 | ip->i_ino, buffer_list); |
| 2484 | if (error) |
| 2485 | goto out_free_ip; |
| 2486 | } |
| 2487 | |
| 2488 | out_free_ip: |
| 2489 | xfs_inode_free(ip); |
| 2490 | return error; |
| 2491 | } |
| 2492 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | STATIC int |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2494 | xlog_recover_inode_pass2( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2495 | struct xlog *log, |
| 2496 | struct list_head *buffer_list, |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2497 | struct xlog_recover_item *item, |
| 2498 | xfs_lsn_t current_lsn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2499 | { |
| 2500 | xfs_inode_log_format_t *in_f; |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2501 | xfs_mount_t *mp = log->l_mp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2502 | xfs_buf_t *bp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2503 | xfs_dinode_t *dip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2504 | int len; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 2505 | char *src; |
| 2506 | char *dest; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2507 | int error; |
| 2508 | int attr_index; |
| 2509 | uint fields; |
Christoph Hellwig | 347d1c0 | 2007-08-28 13:57:51 +1000 | [diff] [blame] | 2510 | xfs_icdinode_t *dicp; |
Christoph Hellwig | 93848a9 | 2013-04-03 16:11:17 +1100 | [diff] [blame] | 2511 | uint isize; |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2512 | int need_free = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2513 | |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2514 | if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 2515 | in_f = item->ri_buf[0].i_addr; |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2516 | } else { |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 2517 | in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP); |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2518 | need_free = 1; |
| 2519 | error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); |
| 2520 | if (error) |
| 2521 | goto error; |
| 2522 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2523 | |
| 2524 | /* |
| 2525 | * Inode buffers can be freed, look out for it, |
| 2526 | * and do not replay the inode. |
| 2527 | */ |
Christoph Hellwig | a194189 | 2008-11-28 14:23:40 +1100 | [diff] [blame] | 2528 | if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno, |
| 2529 | in_f->ilf_len, 0)) { |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2530 | error = 0; |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 2531 | trace_xfs_log_recover_inode_cancel(log, in_f); |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2532 | goto error; |
| 2533 | } |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 2534 | trace_xfs_log_recover_inode_recover(log, in_f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2535 | |
Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 2536 | bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0, |
Christoph Hellwig | 93848a9 | 2013-04-03 16:11:17 +1100 | [diff] [blame] | 2537 | &xfs_inode_buf_ops); |
Chandra Seetharaman | ac4d688 | 2011-08-03 02:18:29 +0000 | [diff] [blame] | 2538 | if (!bp) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2539 | error = -ENOMEM; |
Chandra Seetharaman | ac4d688 | 2011-08-03 02:18:29 +0000 | [diff] [blame] | 2540 | goto error; |
| 2541 | } |
Chandra Seetharaman | e570280 | 2011-08-03 02:18:34 +0000 | [diff] [blame] | 2542 | error = bp->b_error; |
Chandra Seetharaman | 5a52c2a58 | 2011-07-22 23:39:51 +0000 | [diff] [blame] | 2543 | if (error) { |
Christoph Hellwig | 901796a | 2011-10-10 16:52:49 +0000 | [diff] [blame] | 2544 | xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)"); |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2545 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2546 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2547 | ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); |
Christoph Hellwig | 88ee2df | 2015-06-22 09:44:29 +1000 | [diff] [blame] | 2548 | dip = xfs_buf_offset(bp, in_f->ilf_boffset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2549 | |
| 2550 | /* |
| 2551 | * Make sure the place we're flushing out to really looks |
| 2552 | * like an inode! |
| 2553 | */ |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 2554 | if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2555 | xfs_alert(mp, |
| 2556 | "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld", |
| 2557 | __func__, dip, bp, in_f->ilf_ino); |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2558 | XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2559 | XFS_ERRLEVEL_LOW, mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2560 | error = -EFSCORRUPTED; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2561 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2562 | } |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 2563 | dicp = item->ri_buf[1].i_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2564 | if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2565 | xfs_alert(mp, |
| 2566 | "%s: Bad inode log record, rec ptr 0x%p, ino %Ld", |
| 2567 | __func__, item, in_f->ilf_ino); |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2568 | XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2569 | XFS_ERRLEVEL_LOW, mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2570 | error = -EFSCORRUPTED; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2571 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2572 | } |
| 2573 | |
Dave Chinner | e60896d | 2013-07-24 15:47:30 +1000 | [diff] [blame] | 2574 | /* |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2575 | * If the inode has an LSN in it, recover the inode only if it's less |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2576 | * than the lsn of the transaction we are replaying. Note: we still |
| 2577 | * need to replay an owner change even though the inode is more recent |
| 2578 | * than the transaction as there is no guarantee that all the btree |
| 2579 | * blocks are more recent than this transaction, too. |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2580 | */ |
| 2581 | if (dip->di_version >= 3) { |
| 2582 | xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn); |
| 2583 | |
| 2584 | if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { |
| 2585 | trace_xfs_log_recover_inode_skip(log, in_f); |
| 2586 | error = 0; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2587 | goto out_owner_change; |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2588 | } |
| 2589 | } |
| 2590 | |
| 2591 | /* |
Dave Chinner | e60896d | 2013-07-24 15:47:30 +1000 | [diff] [blame] | 2592 | * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes |
| 2593 | * are transactional and if ordering is necessary we can determine that |
| 2594 | * more accurately by the LSN field in the V3 inode core. Don't trust |
| 2595 | * the inode versions we might be changing them here - use the |
| 2596 | * superblock flag to determine whether we need to look at di_flushiter |
| 2597 | * to skip replay when the on disk inode is newer than the log one |
| 2598 | */ |
| 2599 | if (!xfs_sb_version_hascrc(&mp->m_sb) && |
| 2600 | dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2601 | /* |
| 2602 | * Deal with the wrap case, DI_MAX_FLUSH is less |
| 2603 | * than smaller numbers |
| 2604 | */ |
Christoph Hellwig | 81591fe | 2008-11-28 14:23:39 +1100 | [diff] [blame] | 2605 | if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH && |
Christoph Hellwig | 347d1c0 | 2007-08-28 13:57:51 +1000 | [diff] [blame] | 2606 | dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2607 | /* do nothing */ |
| 2608 | } else { |
Dave Chinner | 9abbc53 | 2010-04-13 15:06:46 +1000 | [diff] [blame] | 2609 | trace_xfs_log_recover_inode_skip(log, in_f); |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2610 | error = 0; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2611 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2612 | } |
| 2613 | } |
Dave Chinner | e60896d | 2013-07-24 15:47:30 +1000 | [diff] [blame] | 2614 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2615 | /* Take the opportunity to reset the flush iteration count */ |
| 2616 | dicp->di_flushiter = 0; |
| 2617 | |
Al Viro | abbede1 | 2011-07-26 02:31:30 -0400 | [diff] [blame] | 2618 | if (unlikely(S_ISREG(dicp->di_mode))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2619 | if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && |
| 2620 | (dicp->di_format != XFS_DINODE_FMT_BTREE)) { |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2621 | XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2622 | XFS_ERRLEVEL_LOW, mp, dicp); |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2623 | xfs_alert(mp, |
| 2624 | "%s: Bad regular inode log record, rec ptr 0x%p, " |
| 2625 | "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", |
| 2626 | __func__, item, dip, bp, in_f->ilf_ino); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2627 | error = -EFSCORRUPTED; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2628 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2629 | } |
Al Viro | abbede1 | 2011-07-26 02:31:30 -0400 | [diff] [blame] | 2630 | } else if (unlikely(S_ISDIR(dicp->di_mode))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2631 | if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && |
| 2632 | (dicp->di_format != XFS_DINODE_FMT_BTREE) && |
| 2633 | (dicp->di_format != XFS_DINODE_FMT_LOCAL)) { |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2634 | XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2635 | XFS_ERRLEVEL_LOW, mp, dicp); |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2636 | xfs_alert(mp, |
| 2637 | "%s: Bad dir inode log record, rec ptr 0x%p, " |
| 2638 | "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", |
| 2639 | __func__, item, dip, bp, in_f->ilf_ino); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2640 | error = -EFSCORRUPTED; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2641 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2642 | } |
| 2643 | } |
| 2644 | if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){ |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2645 | XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2646 | XFS_ERRLEVEL_LOW, mp, dicp); |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2647 | xfs_alert(mp, |
| 2648 | "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " |
| 2649 | "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", |
| 2650 | __func__, item, dip, bp, in_f->ilf_ino, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2651 | dicp->di_nextents + dicp->di_anextents, |
| 2652 | dicp->di_nblocks); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2653 | error = -EFSCORRUPTED; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2654 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2655 | } |
| 2656 | if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2657 | XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2658 | XFS_ERRLEVEL_LOW, mp, dicp); |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2659 | xfs_alert(mp, |
| 2660 | "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " |
| 2661 | "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2662 | item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2663 | error = -EFSCORRUPTED; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2664 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2665 | } |
Christoph Hellwig | 93848a9 | 2013-04-03 16:11:17 +1100 | [diff] [blame] | 2666 | isize = xfs_icdinode_size(dicp->di_version); |
| 2667 | if (unlikely(item->ri_buf[1].i_len > isize)) { |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2668 | XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2669 | XFS_ERRLEVEL_LOW, mp, dicp); |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2670 | xfs_alert(mp, |
| 2671 | "%s: Bad inode log record length %d, rec ptr 0x%p", |
| 2672 | __func__, item->ri_buf[1].i_len, item); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2673 | error = -EFSCORRUPTED; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2674 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2675 | } |
| 2676 | |
| 2677 | /* The core is in in-core format */ |
Christoph Hellwig | 93848a9 | 2013-04-03 16:11:17 +1100 | [diff] [blame] | 2678 | xfs_dinode_to_disk(dip, dicp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2679 | |
| 2680 | /* the rest is in on-disk format */ |
Christoph Hellwig | 93848a9 | 2013-04-03 16:11:17 +1100 | [diff] [blame] | 2681 | if (item->ri_buf[1].i_len > isize) { |
| 2682 | memcpy((char *)dip + isize, |
| 2683 | item->ri_buf[1].i_addr + isize, |
| 2684 | item->ri_buf[1].i_len - isize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2685 | } |
| 2686 | |
| 2687 | fields = in_f->ilf_fields; |
| 2688 | switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) { |
| 2689 | case XFS_ILOG_DEV: |
Christoph Hellwig | 81591fe | 2008-11-28 14:23:39 +1100 | [diff] [blame] | 2690 | xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2691 | break; |
| 2692 | case XFS_ILOG_UUID: |
Christoph Hellwig | 81591fe | 2008-11-28 14:23:39 +1100 | [diff] [blame] | 2693 | memcpy(XFS_DFORK_DPTR(dip), |
| 2694 | &in_f->ilf_u.ilfu_uuid, |
| 2695 | sizeof(uuid_t)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2696 | break; |
| 2697 | } |
| 2698 | |
| 2699 | if (in_f->ilf_size == 2) |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2700 | goto out_owner_change; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2701 | len = item->ri_buf[2].i_len; |
| 2702 | src = item->ri_buf[2].i_addr; |
| 2703 | ASSERT(in_f->ilf_size <= 4); |
| 2704 | ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK)); |
| 2705 | ASSERT(!(fields & XFS_ILOG_DFORK) || |
| 2706 | (len == in_f->ilf_dsize)); |
| 2707 | |
| 2708 | switch (fields & XFS_ILOG_DFORK) { |
| 2709 | case XFS_ILOG_DDATA: |
| 2710 | case XFS_ILOG_DEXT: |
Christoph Hellwig | 81591fe | 2008-11-28 14:23:39 +1100 | [diff] [blame] | 2711 | memcpy(XFS_DFORK_DPTR(dip), src, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2712 | break; |
| 2713 | |
| 2714 | case XFS_ILOG_DBROOT: |
Christoph Hellwig | 7cc95a8 | 2008-10-30 17:14:34 +1100 | [diff] [blame] | 2715 | xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len, |
Christoph Hellwig | 81591fe | 2008-11-28 14:23:39 +1100 | [diff] [blame] | 2716 | (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2717 | XFS_DFORK_DSIZE(dip, mp)); |
| 2718 | break; |
| 2719 | |
| 2720 | default: |
| 2721 | /* |
| 2722 | * There are no data fork flags set. |
| 2723 | */ |
| 2724 | ASSERT((fields & XFS_ILOG_DFORK) == 0); |
| 2725 | break; |
| 2726 | } |
| 2727 | |
| 2728 | /* |
| 2729 | * If we logged any attribute data, recover it. There may or |
| 2730 | * may not have been any other non-core data logged in this |
| 2731 | * transaction. |
| 2732 | */ |
| 2733 | if (in_f->ilf_fields & XFS_ILOG_AFORK) { |
| 2734 | if (in_f->ilf_fields & XFS_ILOG_DFORK) { |
| 2735 | attr_index = 3; |
| 2736 | } else { |
| 2737 | attr_index = 2; |
| 2738 | } |
| 2739 | len = item->ri_buf[attr_index].i_len; |
| 2740 | src = item->ri_buf[attr_index].i_addr; |
| 2741 | ASSERT(len == in_f->ilf_asize); |
| 2742 | |
| 2743 | switch (in_f->ilf_fields & XFS_ILOG_AFORK) { |
| 2744 | case XFS_ILOG_ADATA: |
| 2745 | case XFS_ILOG_AEXT: |
| 2746 | dest = XFS_DFORK_APTR(dip); |
| 2747 | ASSERT(len <= XFS_DFORK_ASIZE(dip, mp)); |
| 2748 | memcpy(dest, src, len); |
| 2749 | break; |
| 2750 | |
| 2751 | case XFS_ILOG_ABROOT: |
| 2752 | dest = XFS_DFORK_APTR(dip); |
Christoph Hellwig | 7cc95a8 | 2008-10-30 17:14:34 +1100 | [diff] [blame] | 2753 | xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, |
| 2754 | len, (xfs_bmdr_block_t*)dest, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2755 | XFS_DFORK_ASIZE(dip, mp)); |
| 2756 | break; |
| 2757 | |
| 2758 | default: |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2759 | xfs_warn(log->l_mp, "%s: Invalid flag", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2760 | ASSERT(0); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2761 | error = -EIO; |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2762 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2763 | } |
| 2764 | } |
| 2765 | |
Dave Chinner | 638f4416 | 2013-08-30 10:23:45 +1000 | [diff] [blame] | 2766 | out_owner_change: |
| 2767 | if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) |
| 2768 | error = xfs_recover_inode_owner_change(mp, dip, in_f, |
| 2769 | buffer_list); |
Christoph Hellwig | 93848a9 | 2013-04-03 16:11:17 +1100 | [diff] [blame] | 2770 | /* re-generate the checksum. */ |
| 2771 | xfs_dinode_calc_crc(log->l_mp, dip); |
| 2772 | |
Dave Chinner | ebad861 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 2773 | ASSERT(bp->b_target->bt_mount == mp); |
Christoph Hellwig | cb669ca | 2011-07-13 13:43:49 +0200 | [diff] [blame] | 2774 | bp->b_iodone = xlog_recover_iodone; |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2775 | xfs_buf_delwri_queue(bp, buffer_list); |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2776 | |
| 2777 | out_release: |
Christoph Hellwig | 61551f1 | 2011-08-23 08:28:06 +0000 | [diff] [blame] | 2778 | xfs_buf_relse(bp); |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2779 | error: |
| 2780 | if (need_free) |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 2781 | kmem_free(in_f); |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 2782 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2783 | } |
| 2784 | |
| 2785 | /* |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2786 | * Recover QUOTAOFF records. We simply make a note of it in the xlog |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2787 | * structure, so that we know not to do any dquot item or dquot buffer recovery, |
| 2788 | * of that type. |
| 2789 | */ |
| 2790 | STATIC int |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2791 | xlog_recover_quotaoff_pass1( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2792 | struct xlog *log, |
| 2793 | struct xlog_recover_item *item) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2794 | { |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2795 | xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2796 | ASSERT(qoff_f); |
| 2797 | |
| 2798 | /* |
| 2799 | * The logitem format's flag tells us if this was user quotaoff, |
Nathan Scott | 77a7cce | 2006-01-11 15:35:57 +1100 | [diff] [blame] | 2800 | * group/project quotaoff or both. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2801 | */ |
| 2802 | if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) |
| 2803 | log->l_quotaoffs_flag |= XFS_DQ_USER; |
Nathan Scott | 77a7cce | 2006-01-11 15:35:57 +1100 | [diff] [blame] | 2804 | if (qoff_f->qf_flags & XFS_PQUOTA_ACCT) |
| 2805 | log->l_quotaoffs_flag |= XFS_DQ_PROJ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2806 | if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) |
| 2807 | log->l_quotaoffs_flag |= XFS_DQ_GROUP; |
| 2808 | |
Eric Sandeen | d99831f | 2014-06-22 15:03:54 +1000 | [diff] [blame] | 2809 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2810 | } |
| 2811 | |
| 2812 | /* |
| 2813 | * Recover a dquot record |
| 2814 | */ |
| 2815 | STATIC int |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2816 | xlog_recover_dquot_pass2( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2817 | struct xlog *log, |
| 2818 | struct list_head *buffer_list, |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2819 | struct xlog_recover_item *item, |
| 2820 | xfs_lsn_t current_lsn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2821 | { |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2822 | xfs_mount_t *mp = log->l_mp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2823 | xfs_buf_t *bp; |
| 2824 | struct xfs_disk_dquot *ddq, *recddq; |
| 2825 | int error; |
| 2826 | xfs_dq_logformat_t *dq_f; |
| 2827 | uint type; |
| 2828 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2829 | |
| 2830 | /* |
| 2831 | * Filesystems are required to send in quota flags at mount time. |
| 2832 | */ |
| 2833 | if (mp->m_qflags == 0) |
Eric Sandeen | d99831f | 2014-06-22 15:03:54 +1000 | [diff] [blame] | 2834 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2835 | |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 2836 | recddq = item->ri_buf[1].i_addr; |
| 2837 | if (recddq == NULL) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2838 | xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2839 | return -EIO; |
Christoph Hellwig | 0c5e1ce | 2009-06-08 15:33:21 +0200 | [diff] [blame] | 2840 | } |
Jan Rekorajski | 8ec6dba | 2009-11-16 11:57:02 +0000 | [diff] [blame] | 2841 | if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2842 | xfs_alert(log->l_mp, "dquot too small (%d) in %s.", |
Christoph Hellwig | 0c5e1ce | 2009-06-08 15:33:21 +0200 | [diff] [blame] | 2843 | item->ri_buf[1].i_len, __func__); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2844 | return -EIO; |
Christoph Hellwig | 0c5e1ce | 2009-06-08 15:33:21 +0200 | [diff] [blame] | 2845 | } |
| 2846 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2847 | /* |
| 2848 | * This type of quotas was turned off, so ignore this record. |
| 2849 | */ |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 2850 | type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2851 | ASSERT(type); |
| 2852 | if (log->l_quotaoffs_flag & type) |
Eric Sandeen | d99831f | 2014-06-22 15:03:54 +1000 | [diff] [blame] | 2853 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2854 | |
| 2855 | /* |
| 2856 | * At this point we know that quota was _not_ turned off. |
| 2857 | * Since the mount flags are not indicating to us otherwise, this |
| 2858 | * must mean that quota is on, and the dquot needs to be replayed. |
| 2859 | * Remember that we may not have fully recovered the superblock yet, |
| 2860 | * so we can't do the usual trick of looking at the SB quota bits. |
| 2861 | * |
| 2862 | * The other possibility, of course, is that the quota subsystem was |
| 2863 | * removed since the last mount - ENOSYS. |
| 2864 | */ |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 2865 | dq_f = item->ri_buf[0].i_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2866 | ASSERT(dq_f); |
Dave Chinner | 9aede1d | 2013-10-15 09:17:52 +1100 | [diff] [blame] | 2867 | error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2868 | "xlog_recover_dquot_pass2 (log copy)"); |
| 2869 | if (error) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2870 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2871 | ASSERT(dq_f->qlf_len == 1); |
| 2872 | |
Dave Chinner | ad3714b | 2014-08-04 12:59:31 +1000 | [diff] [blame] | 2873 | /* |
| 2874 | * At this point we are assuming that the dquots have been allocated |
| 2875 | * and hence the buffer has valid dquots stamped in it. It should, |
| 2876 | * therefore, pass verifier validation. If the dquot is bad, then the |
| 2877 | * we'll return an error here, so we don't need to specifically check |
| 2878 | * the dquot in the buffer after the verifier has run. |
| 2879 | */ |
Dave Chinner | 7ca790a | 2012-04-23 15:58:55 +1000 | [diff] [blame] | 2880 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno, |
Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 2881 | XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, |
Dave Chinner | ad3714b | 2014-08-04 12:59:31 +1000 | [diff] [blame] | 2882 | &xfs_dquot_buf_ops); |
Dave Chinner | 7ca790a | 2012-04-23 15:58:55 +1000 | [diff] [blame] | 2883 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2884 | return error; |
Dave Chinner | 7ca790a | 2012-04-23 15:58:55 +1000 | [diff] [blame] | 2885 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2886 | ASSERT(bp); |
Christoph Hellwig | 88ee2df | 2015-06-22 09:44:29 +1000 | [diff] [blame] | 2887 | ddq = xfs_buf_offset(bp, dq_f->qlf_boffset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2888 | |
| 2889 | /* |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2890 | * If the dquot has an LSN in it, recover the dquot only if it's less |
| 2891 | * than the lsn of the transaction we are replaying. |
| 2892 | */ |
| 2893 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
| 2894 | struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq; |
| 2895 | xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn); |
| 2896 | |
| 2897 | if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { |
| 2898 | goto out_release; |
| 2899 | } |
| 2900 | } |
| 2901 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2902 | memcpy(ddq, recddq, item->ri_buf[1].i_len); |
Dave Chinner | 6fcdc59 | 2013-06-03 15:28:46 +1000 | [diff] [blame] | 2903 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
| 2904 | xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk), |
| 2905 | XFS_DQUOT_CRC_OFF); |
| 2906 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2907 | |
| 2908 | ASSERT(dq_f->qlf_size == 2); |
Dave Chinner | ebad861 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 2909 | ASSERT(bp->b_target->bt_mount == mp); |
Christoph Hellwig | cb669ca | 2011-07-13 13:43:49 +0200 | [diff] [blame] | 2910 | bp->b_iodone = xlog_recover_iodone; |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2911 | xfs_buf_delwri_queue(bp, buffer_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2912 | |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 2913 | out_release: |
| 2914 | xfs_buf_relse(bp); |
| 2915 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2916 | } |
| 2917 | |
| 2918 | /* |
| 2919 | * This routine is called to create an in-core extent free intent |
| 2920 | * item from the efi format structure which was logged on disk. |
| 2921 | * It allocates an in-core efi, copies the extents from the format |
| 2922 | * structure into it, and adds the efi to the AIL with the given |
| 2923 | * LSN. |
| 2924 | */ |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2925 | STATIC int |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2926 | xlog_recover_efi_pass2( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2927 | struct xlog *log, |
| 2928 | struct xlog_recover_item *item, |
| 2929 | xfs_lsn_t lsn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2930 | { |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2931 | int error; |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2932 | xfs_mount_t *mp = log->l_mp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2933 | xfs_efi_log_item_t *efip; |
| 2934 | xfs_efi_log_format_t *efi_formatp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2935 | |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 2936 | efi_formatp = item->ri_buf[0].i_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2937 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2938 | efip = xfs_efi_init(mp, efi_formatp->efi_nextents); |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2939 | if ((error = xfs_efi_copy_format(&(item->ri_buf[0]), |
| 2940 | &(efip->efi_format)))) { |
| 2941 | xfs_efi_item_free(efip); |
| 2942 | return error; |
| 2943 | } |
Dave Chinner | b199c8a | 2010-12-20 11:59:49 +1100 | [diff] [blame] | 2944 | atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2945 | |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 2946 | spin_lock(&log->l_ailp->xa_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2947 | /* |
David Chinner | 783a2f6 | 2008-10-30 17:39:58 +1100 | [diff] [blame] | 2948 | * xfs_trans_ail_update() drops the AIL lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2949 | */ |
Dave Chinner | e605994 | 2010-12-20 12:34:26 +1100 | [diff] [blame] | 2950 | xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn); |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2951 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2952 | } |
| 2953 | |
| 2954 | |
| 2955 | /* |
| 2956 | * This routine is called when an efd format structure is found in |
| 2957 | * a committed transaction in the log. It's purpose is to cancel |
| 2958 | * the corresponding efi if it was still in the log. To do this |
| 2959 | * it searches the AIL for the efi with an id equal to that in the |
| 2960 | * efd format structure. If we find it, we remove the efi from the |
| 2961 | * AIL and free it. |
| 2962 | */ |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 2963 | STATIC int |
| 2964 | xlog_recover_efd_pass2( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2965 | struct xlog *log, |
| 2966 | struct xlog_recover_item *item) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2967 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2968 | xfs_efd_log_format_t *efd_formatp; |
| 2969 | xfs_efi_log_item_t *efip = NULL; |
| 2970 | xfs_log_item_t *lip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2971 | __uint64_t efi_id; |
David Chinner | 27d8d5f | 2008-10-30 17:38:39 +1100 | [diff] [blame] | 2972 | struct xfs_ail_cursor cur; |
David Chinner | 783a2f6 | 2008-10-30 17:39:58 +1100 | [diff] [blame] | 2973 | struct xfs_ail *ailp = log->l_ailp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2974 | |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 2975 | efd_formatp = item->ri_buf[0].i_addr; |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 2976 | ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + |
| 2977 | ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || |
| 2978 | (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + |
| 2979 | ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t))))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2980 | efi_id = efd_formatp->efd_efi_id; |
| 2981 | |
| 2982 | /* |
| 2983 | * Search for the efi with the id in the efd format structure |
| 2984 | * in the AIL. |
| 2985 | */ |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 2986 | spin_lock(&ailp->xa_lock); |
| 2987 | lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2988 | while (lip != NULL) { |
| 2989 | if (lip->li_type == XFS_LI_EFI) { |
| 2990 | efip = (xfs_efi_log_item_t *)lip; |
| 2991 | if (efip->efi_format.efi_id == efi_id) { |
| 2992 | /* |
David Chinner | 783a2f6 | 2008-10-30 17:39:58 +1100 | [diff] [blame] | 2993 | * xfs_trans_ail_delete() drops the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2994 | * AIL lock. |
| 2995 | */ |
Dave Chinner | 04913fd | 2012-04-23 15:58:41 +1000 | [diff] [blame] | 2996 | xfs_trans_ail_delete(ailp, lip, |
| 2997 | SHUTDOWN_CORRUPT_INCORE); |
David Chinner | 8ae2c0f | 2007-11-23 16:28:17 +1100 | [diff] [blame] | 2998 | xfs_efi_item_free(efip); |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 2999 | spin_lock(&ailp->xa_lock); |
David Chinner | 27d8d5f | 2008-10-30 17:38:39 +1100 | [diff] [blame] | 3000 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3001 | } |
| 3002 | } |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 3003 | lip = xfs_trans_ail_cursor_next(ailp, &cur); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3004 | } |
Eric Sandeen | e4a1e29 | 2014-04-14 19:06:05 +1000 | [diff] [blame] | 3005 | xfs_trans_ail_cursor_done(&cur); |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 3006 | spin_unlock(&ailp->xa_lock); |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3007 | |
| 3008 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3009 | } |
| 3010 | |
| 3011 | /* |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3012 | * This routine is called when an inode create format structure is found in a |
| 3013 | * committed transaction in the log. It's purpose is to initialise the inodes |
| 3014 | * being allocated on disk. This requires us to get inode cluster buffers that |
| 3015 | * match the range to be intialised, stamped with inode templates and written |
| 3016 | * by delayed write so that subsequent modifications will hit the cached buffer |
| 3017 | * and only need writing out at the end of recovery. |
| 3018 | */ |
| 3019 | STATIC int |
| 3020 | xlog_recover_do_icreate_pass2( |
| 3021 | struct xlog *log, |
| 3022 | struct list_head *buffer_list, |
| 3023 | xlog_recover_item_t *item) |
| 3024 | { |
| 3025 | struct xfs_mount *mp = log->l_mp; |
| 3026 | struct xfs_icreate_log *icl; |
| 3027 | xfs_agnumber_t agno; |
| 3028 | xfs_agblock_t agbno; |
| 3029 | unsigned int count; |
| 3030 | unsigned int isize; |
| 3031 | xfs_agblock_t length; |
| 3032 | |
| 3033 | icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; |
| 3034 | if (icl->icl_type != XFS_LI_ICREATE) { |
| 3035 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3036 | return -EINVAL; |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3037 | } |
| 3038 | |
| 3039 | if (icl->icl_size != 1) { |
| 3040 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3041 | return -EINVAL; |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3042 | } |
| 3043 | |
| 3044 | agno = be32_to_cpu(icl->icl_ag); |
| 3045 | if (agno >= mp->m_sb.sb_agcount) { |
| 3046 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3047 | return -EINVAL; |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3048 | } |
| 3049 | agbno = be32_to_cpu(icl->icl_agbno); |
| 3050 | if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) { |
| 3051 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3052 | return -EINVAL; |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3053 | } |
| 3054 | isize = be32_to_cpu(icl->icl_isize); |
| 3055 | if (isize != mp->m_sb.sb_inodesize) { |
| 3056 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3057 | return -EINVAL; |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3058 | } |
| 3059 | count = be32_to_cpu(icl->icl_count); |
| 3060 | if (!count) { |
| 3061 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3062 | return -EINVAL; |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3063 | } |
| 3064 | length = be32_to_cpu(icl->icl_length); |
| 3065 | if (!length || length >= mp->m_sb.sb_agblocks) { |
| 3066 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3067 | return -EINVAL; |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3068 | } |
| 3069 | |
Brian Foster | 7f43c90 | 2015-05-29 09:06:30 +1000 | [diff] [blame] | 3070 | /* |
| 3071 | * The inode chunk is either full or sparse and we only support |
| 3072 | * m_ialloc_min_blks sized sparse allocations at this time. |
| 3073 | */ |
| 3074 | if (length != mp->m_ialloc_blks && |
| 3075 | length != mp->m_ialloc_min_blks) { |
| 3076 | xfs_warn(log->l_mp, |
| 3077 | "%s: unsupported chunk length", __FUNCTION__); |
| 3078 | return -EINVAL; |
| 3079 | } |
| 3080 | |
| 3081 | /* verify inode count is consistent with extent length */ |
| 3082 | if ((count >> mp->m_sb.sb_inopblog) != length) { |
| 3083 | xfs_warn(log->l_mp, |
| 3084 | "%s: inconsistent inode count and chunk length", |
| 3085 | __FUNCTION__); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3086 | return -EINVAL; |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3087 | } |
| 3088 | |
| 3089 | /* |
| 3090 | * Inode buffers can be freed. Do not replay the inode initialisation as |
| 3091 | * we could be overwriting something written after this inode buffer was |
| 3092 | * cancelled. |
| 3093 | * |
| 3094 | * XXX: we need to iterate all buffers and only init those that are not |
| 3095 | * cancelled. I think that a more fine grained factoring of |
| 3096 | * xfs_ialloc_inode_init may be appropriate here to enable this to be |
| 3097 | * done easily. |
| 3098 | */ |
| 3099 | if (xlog_check_buffer_cancelled(log, |
| 3100 | XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0)) |
| 3101 | return 0; |
| 3102 | |
Brian Foster | 463958a | 2015-05-29 09:05:49 +1000 | [diff] [blame] | 3103 | xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno, length, |
| 3104 | be32_to_cpu(icl->icl_gen)); |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3105 | return 0; |
| 3106 | } |
| 3107 | |
Zhi Yong Wu | 00574da | 2013-08-14 15:16:03 +0800 | [diff] [blame] | 3108 | STATIC void |
| 3109 | xlog_recover_buffer_ra_pass2( |
| 3110 | struct xlog *log, |
| 3111 | struct xlog_recover_item *item) |
| 3112 | { |
| 3113 | struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; |
| 3114 | struct xfs_mount *mp = log->l_mp; |
| 3115 | |
Dave Chinner | 84a5b73 | 2013-08-27 08:10:53 +1000 | [diff] [blame] | 3116 | if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno, |
Zhi Yong Wu | 00574da | 2013-08-14 15:16:03 +0800 | [diff] [blame] | 3117 | buf_f->blf_len, buf_f->blf_flags)) { |
| 3118 | return; |
| 3119 | } |
| 3120 | |
| 3121 | xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno, |
| 3122 | buf_f->blf_len, NULL); |
| 3123 | } |
| 3124 | |
| 3125 | STATIC void |
| 3126 | xlog_recover_inode_ra_pass2( |
| 3127 | struct xlog *log, |
| 3128 | struct xlog_recover_item *item) |
| 3129 | { |
| 3130 | struct xfs_inode_log_format ilf_buf; |
| 3131 | struct xfs_inode_log_format *ilfp; |
| 3132 | struct xfs_mount *mp = log->l_mp; |
| 3133 | int error; |
| 3134 | |
| 3135 | if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { |
| 3136 | ilfp = item->ri_buf[0].i_addr; |
| 3137 | } else { |
| 3138 | ilfp = &ilf_buf; |
| 3139 | memset(ilfp, 0, sizeof(*ilfp)); |
| 3140 | error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp); |
| 3141 | if (error) |
| 3142 | return; |
| 3143 | } |
| 3144 | |
Dave Chinner | 84a5b73 | 2013-08-27 08:10:53 +1000 | [diff] [blame] | 3145 | if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0)) |
Zhi Yong Wu | 00574da | 2013-08-14 15:16:03 +0800 | [diff] [blame] | 3146 | return; |
| 3147 | |
| 3148 | xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno, |
Dave Chinner | d891400 | 2013-08-27 11:39:37 +1000 | [diff] [blame] | 3149 | ilfp->ilf_len, &xfs_inode_buf_ra_ops); |
Zhi Yong Wu | 00574da | 2013-08-14 15:16:03 +0800 | [diff] [blame] | 3150 | } |
| 3151 | |
| 3152 | STATIC void |
| 3153 | xlog_recover_dquot_ra_pass2( |
| 3154 | struct xlog *log, |
| 3155 | struct xlog_recover_item *item) |
| 3156 | { |
| 3157 | struct xfs_mount *mp = log->l_mp; |
| 3158 | struct xfs_disk_dquot *recddq; |
| 3159 | struct xfs_dq_logformat *dq_f; |
| 3160 | uint type; |
| 3161 | |
| 3162 | |
| 3163 | if (mp->m_qflags == 0) |
| 3164 | return; |
| 3165 | |
| 3166 | recddq = item->ri_buf[1].i_addr; |
| 3167 | if (recddq == NULL) |
| 3168 | return; |
| 3169 | if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) |
| 3170 | return; |
| 3171 | |
| 3172 | type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); |
| 3173 | ASSERT(type); |
| 3174 | if (log->l_quotaoffs_flag & type) |
| 3175 | return; |
| 3176 | |
| 3177 | dq_f = item->ri_buf[0].i_addr; |
| 3178 | ASSERT(dq_f); |
| 3179 | ASSERT(dq_f->qlf_len == 1); |
| 3180 | |
| 3181 | xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, |
Dave Chinner | 0f0d334 | 2013-08-27 13:25:43 +1000 | [diff] [blame] | 3182 | XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL); |
Zhi Yong Wu | 00574da | 2013-08-14 15:16:03 +0800 | [diff] [blame] | 3183 | } |
| 3184 | |
| 3185 | STATIC void |
| 3186 | xlog_recover_ra_pass2( |
| 3187 | struct xlog *log, |
| 3188 | struct xlog_recover_item *item) |
| 3189 | { |
| 3190 | switch (ITEM_TYPE(item)) { |
| 3191 | case XFS_LI_BUF: |
| 3192 | xlog_recover_buffer_ra_pass2(log, item); |
| 3193 | break; |
| 3194 | case XFS_LI_INODE: |
| 3195 | xlog_recover_inode_ra_pass2(log, item); |
| 3196 | break; |
| 3197 | case XFS_LI_DQUOT: |
| 3198 | xlog_recover_dquot_ra_pass2(log, item); |
| 3199 | break; |
| 3200 | case XFS_LI_EFI: |
| 3201 | case XFS_LI_EFD: |
| 3202 | case XFS_LI_QUOTAOFF: |
| 3203 | default: |
| 3204 | break; |
| 3205 | } |
| 3206 | } |
| 3207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3208 | STATIC int |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3209 | xlog_recover_commit_pass1( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 3210 | struct xlog *log, |
| 3211 | struct xlog_recover *trans, |
| 3212 | struct xlog_recover_item *item) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3213 | { |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3214 | trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3215 | |
| 3216 | switch (ITEM_TYPE(item)) { |
| 3217 | case XFS_LI_BUF: |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3218 | return xlog_recover_buffer_pass1(log, item); |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3219 | case XFS_LI_QUOTAOFF: |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3220 | return xlog_recover_quotaoff_pass1(log, item); |
| 3221 | case XFS_LI_INODE: |
| 3222 | case XFS_LI_EFI: |
| 3223 | case XFS_LI_EFD: |
| 3224 | case XFS_LI_DQUOT: |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3225 | case XFS_LI_ICREATE: |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3226 | /* nothing to do in pass 1 */ |
| 3227 | return 0; |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3228 | default: |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3229 | xfs_warn(log->l_mp, "%s: invalid item type (%d)", |
| 3230 | __func__, ITEM_TYPE(item)); |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3231 | ASSERT(0); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3232 | return -EIO; |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3233 | } |
| 3234 | } |
| 3235 | |
| 3236 | STATIC int |
| 3237 | xlog_recover_commit_pass2( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 3238 | struct xlog *log, |
| 3239 | struct xlog_recover *trans, |
| 3240 | struct list_head *buffer_list, |
| 3241 | struct xlog_recover_item *item) |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3242 | { |
| 3243 | trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); |
| 3244 | |
| 3245 | switch (ITEM_TYPE(item)) { |
| 3246 | case XFS_LI_BUF: |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 3247 | return xlog_recover_buffer_pass2(log, buffer_list, item, |
| 3248 | trans->r_lsn); |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3249 | case XFS_LI_INODE: |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 3250 | return xlog_recover_inode_pass2(log, buffer_list, item, |
| 3251 | trans->r_lsn); |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3252 | case XFS_LI_EFI: |
| 3253 | return xlog_recover_efi_pass2(log, item, trans->r_lsn); |
| 3254 | case XFS_LI_EFD: |
| 3255 | return xlog_recover_efd_pass2(log, item); |
| 3256 | case XFS_LI_DQUOT: |
Dave Chinner | 50d5c8d | 2013-08-28 21:22:47 +1000 | [diff] [blame] | 3257 | return xlog_recover_dquot_pass2(log, buffer_list, item, |
| 3258 | trans->r_lsn); |
Dave Chinner | 28c8e41 | 2013-06-27 16:04:55 +1000 | [diff] [blame] | 3259 | case XFS_LI_ICREATE: |
| 3260 | return xlog_recover_do_icreate_pass2(log, buffer_list, item); |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3261 | case XFS_LI_QUOTAOFF: |
| 3262 | /* nothing to do in pass2 */ |
| 3263 | return 0; |
| 3264 | default: |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3265 | xfs_warn(log->l_mp, "%s: invalid item type (%d)", |
| 3266 | __func__, ITEM_TYPE(item)); |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3267 | ASSERT(0); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3268 | return -EIO; |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3269 | } |
| 3270 | } |
| 3271 | |
Zhi Yong Wu | 00574da | 2013-08-14 15:16:03 +0800 | [diff] [blame] | 3272 | STATIC int |
| 3273 | xlog_recover_items_pass2( |
| 3274 | struct xlog *log, |
| 3275 | struct xlog_recover *trans, |
| 3276 | struct list_head *buffer_list, |
| 3277 | struct list_head *item_list) |
| 3278 | { |
| 3279 | struct xlog_recover_item *item; |
| 3280 | int error = 0; |
| 3281 | |
| 3282 | list_for_each_entry(item, item_list, ri_list) { |
| 3283 | error = xlog_recover_commit_pass2(log, trans, |
| 3284 | buffer_list, item); |
| 3285 | if (error) |
| 3286 | return error; |
| 3287 | } |
| 3288 | |
| 3289 | return error; |
| 3290 | } |
| 3291 | |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3292 | /* |
| 3293 | * Perform the transaction. |
| 3294 | * |
| 3295 | * If the transaction modifies a buffer or inode, do it now. Otherwise, |
| 3296 | * EFIs and EFDs get queued up by adding entries into the AIL for them. |
| 3297 | */ |
| 3298 | STATIC int |
| 3299 | xlog_recover_commit_trans( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 3300 | struct xlog *log, |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3301 | struct xlog_recover *trans, |
| 3302 | int pass) |
| 3303 | { |
Zhi Yong Wu | 00574da | 2013-08-14 15:16:03 +0800 | [diff] [blame] | 3304 | int error = 0; |
| 3305 | int error2; |
| 3306 | int items_queued = 0; |
| 3307 | struct xlog_recover_item *item; |
| 3308 | struct xlog_recover_item *next; |
| 3309 | LIST_HEAD (buffer_list); |
| 3310 | LIST_HEAD (ra_list); |
| 3311 | LIST_HEAD (done_list); |
| 3312 | |
| 3313 | #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3314 | |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 3315 | hlist_del(&trans->r_list); |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3316 | |
| 3317 | error = xlog_recover_reorder_trans(log, trans, pass); |
| 3318 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3319 | return error; |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3320 | |
Zhi Yong Wu | 00574da | 2013-08-14 15:16:03 +0800 | [diff] [blame] | 3321 | list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) { |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 3322 | switch (pass) { |
| 3323 | case XLOG_RECOVER_PASS1: |
Christoph Hellwig | c9f71f5 | 2010-12-01 22:06:24 +0000 | [diff] [blame] | 3324 | error = xlog_recover_commit_pass1(log, trans, item); |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 3325 | break; |
| 3326 | case XLOG_RECOVER_PASS2: |
Zhi Yong Wu | 00574da | 2013-08-14 15:16:03 +0800 | [diff] [blame] | 3327 | xlog_recover_ra_pass2(log, item); |
| 3328 | list_move_tail(&item->ri_list, &ra_list); |
| 3329 | items_queued++; |
| 3330 | if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) { |
| 3331 | error = xlog_recover_items_pass2(log, trans, |
| 3332 | &buffer_list, &ra_list); |
| 3333 | list_splice_tail_init(&ra_list, &done_list); |
| 3334 | items_queued = 0; |
| 3335 | } |
| 3336 | |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 3337 | break; |
| 3338 | default: |
| 3339 | ASSERT(0); |
| 3340 | } |
| 3341 | |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3342 | if (error) |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 3343 | goto out; |
Christoph Hellwig | d045094 | 2010-12-01 22:06:23 +0000 | [diff] [blame] | 3344 | } |
| 3345 | |
Zhi Yong Wu | 00574da | 2013-08-14 15:16:03 +0800 | [diff] [blame] | 3346 | out: |
| 3347 | if (!list_empty(&ra_list)) { |
| 3348 | if (!error) |
| 3349 | error = xlog_recover_items_pass2(log, trans, |
| 3350 | &buffer_list, &ra_list); |
| 3351 | list_splice_tail_init(&ra_list, &done_list); |
| 3352 | } |
| 3353 | |
| 3354 | if (!list_empty(&done_list)) |
| 3355 | list_splice_init(&done_list, &trans->r_itemq); |
| 3356 | |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 3357 | error2 = xfs_buf_delwri_submit(&buffer_list); |
| 3358 | return error ? error : error2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3359 | } |
| 3360 | |
Dave Chinner | 7656066 | 2014-09-29 09:45:42 +1000 | [diff] [blame] | 3361 | STATIC void |
| 3362 | xlog_recover_add_item( |
| 3363 | struct list_head *head) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3364 | { |
Dave Chinner | 7656066 | 2014-09-29 09:45:42 +1000 | [diff] [blame] | 3365 | xlog_recover_item_t *item; |
| 3366 | |
| 3367 | item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); |
| 3368 | INIT_LIST_HEAD(&item->ri_list); |
| 3369 | list_add_tail(&item->ri_list, head); |
| 3370 | } |
| 3371 | |
| 3372 | STATIC int |
| 3373 | xlog_recover_add_to_cont_trans( |
| 3374 | struct xlog *log, |
| 3375 | struct xlog_recover *trans, |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3376 | char *dp, |
Dave Chinner | 7656066 | 2014-09-29 09:45:42 +1000 | [diff] [blame] | 3377 | int len) |
| 3378 | { |
| 3379 | xlog_recover_item_t *item; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3380 | char *ptr, *old_ptr; |
Dave Chinner | 7656066 | 2014-09-29 09:45:42 +1000 | [diff] [blame] | 3381 | int old_len; |
| 3382 | |
| 3383 | if (list_empty(&trans->r_itemq)) { |
| 3384 | /* finish copying rest of trans header */ |
| 3385 | xlog_recover_add_item(&trans->r_itemq); |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3386 | ptr = (char *)&trans->r_theader + |
Dave Chinner | 7656066 | 2014-09-29 09:45:42 +1000 | [diff] [blame] | 3387 | sizeof(xfs_trans_header_t) - len; |
| 3388 | memcpy(ptr, dp, len); |
| 3389 | return 0; |
| 3390 | } |
| 3391 | /* take the tail entry */ |
| 3392 | item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); |
| 3393 | |
| 3394 | old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; |
| 3395 | old_len = item->ri_buf[item->ri_cnt-1].i_len; |
| 3396 | |
| 3397 | ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP); |
| 3398 | memcpy(&ptr[old_len], dp, len); |
| 3399 | item->ri_buf[item->ri_cnt-1].i_len += len; |
| 3400 | item->ri_buf[item->ri_cnt-1].i_addr = ptr; |
| 3401 | trace_xfs_log_recover_item_add_cont(log, trans, item, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3402 | return 0; |
| 3403 | } |
| 3404 | |
| 3405 | /* |
Dave Chinner | 7656066 | 2014-09-29 09:45:42 +1000 | [diff] [blame] | 3406 | * The next region to add is the start of a new region. It could be |
| 3407 | * a whole region or it could be the first part of a new region. Because |
| 3408 | * of this, the assumption here is that the type and size fields of all |
| 3409 | * format structures fit into the first 32 bits of the structure. |
| 3410 | * |
| 3411 | * This works because all regions must be 32 bit aligned. Therefore, we |
| 3412 | * either have both fields or we have neither field. In the case we have |
| 3413 | * neither field, the data part of the region is zero length. We only have |
| 3414 | * a log_op_header and can throw away the header since a new one will appear |
| 3415 | * later. If we have at least 4 bytes, then we can determine how many regions |
| 3416 | * will appear in the current log item. |
| 3417 | */ |
| 3418 | STATIC int |
| 3419 | xlog_recover_add_to_trans( |
| 3420 | struct xlog *log, |
| 3421 | struct xlog_recover *trans, |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3422 | char *dp, |
Dave Chinner | 7656066 | 2014-09-29 09:45:42 +1000 | [diff] [blame] | 3423 | int len) |
| 3424 | { |
| 3425 | xfs_inode_log_format_t *in_f; /* any will do */ |
| 3426 | xlog_recover_item_t *item; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3427 | char *ptr; |
Dave Chinner | 7656066 | 2014-09-29 09:45:42 +1000 | [diff] [blame] | 3428 | |
| 3429 | if (!len) |
| 3430 | return 0; |
| 3431 | if (list_empty(&trans->r_itemq)) { |
| 3432 | /* we need to catch log corruptions here */ |
| 3433 | if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { |
| 3434 | xfs_warn(log->l_mp, "%s: bad header magic number", |
| 3435 | __func__); |
| 3436 | ASSERT(0); |
| 3437 | return -EIO; |
| 3438 | } |
| 3439 | if (len == sizeof(xfs_trans_header_t)) |
| 3440 | xlog_recover_add_item(&trans->r_itemq); |
| 3441 | memcpy(&trans->r_theader, dp, len); |
| 3442 | return 0; |
| 3443 | } |
| 3444 | |
| 3445 | ptr = kmem_alloc(len, KM_SLEEP); |
| 3446 | memcpy(ptr, dp, len); |
| 3447 | in_f = (xfs_inode_log_format_t *)ptr; |
| 3448 | |
| 3449 | /* take the tail entry */ |
| 3450 | item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); |
| 3451 | if (item->ri_total != 0 && |
| 3452 | item->ri_total == item->ri_cnt) { |
| 3453 | /* tail item is in use, get a new one */ |
| 3454 | xlog_recover_add_item(&trans->r_itemq); |
| 3455 | item = list_entry(trans->r_itemq.prev, |
| 3456 | xlog_recover_item_t, ri_list); |
| 3457 | } |
| 3458 | |
| 3459 | if (item->ri_total == 0) { /* first region to be added */ |
| 3460 | if (in_f->ilf_size == 0 || |
| 3461 | in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { |
| 3462 | xfs_warn(log->l_mp, |
| 3463 | "bad number of regions (%d) in inode log format", |
| 3464 | in_f->ilf_size); |
| 3465 | ASSERT(0); |
| 3466 | kmem_free(ptr); |
| 3467 | return -EIO; |
| 3468 | } |
| 3469 | |
| 3470 | item->ri_total = in_f->ilf_size; |
| 3471 | item->ri_buf = |
| 3472 | kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), |
| 3473 | KM_SLEEP); |
| 3474 | } |
| 3475 | ASSERT(item->ri_total > item->ri_cnt); |
| 3476 | /* Description region is ri_buf[0] */ |
| 3477 | item->ri_buf[item->ri_cnt].i_addr = ptr; |
| 3478 | item->ri_buf[item->ri_cnt].i_len = len; |
| 3479 | item->ri_cnt++; |
| 3480 | trace_xfs_log_recover_item_add(log, trans, item, 0); |
| 3481 | return 0; |
| 3482 | } |
Dave Chinner | b818cca | 2014-09-29 09:45:54 +1000 | [diff] [blame] | 3483 | |
Dave Chinner | 7656066 | 2014-09-29 09:45:42 +1000 | [diff] [blame] | 3484 | /* |
| 3485 | * Free up any resources allocated by the transaction |
| 3486 | * |
| 3487 | * Remember that EFIs, EFDs, and IUNLINKs are handled later. |
| 3488 | */ |
| 3489 | STATIC void |
| 3490 | xlog_recover_free_trans( |
| 3491 | struct xlog_recover *trans) |
| 3492 | { |
| 3493 | xlog_recover_item_t *item, *n; |
| 3494 | int i; |
| 3495 | |
| 3496 | list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { |
| 3497 | /* Free the regions in the item. */ |
| 3498 | list_del(&item->ri_list); |
| 3499 | for (i = 0; i < item->ri_cnt; i++) |
| 3500 | kmem_free(item->ri_buf[i].i_addr); |
| 3501 | /* Free the item itself */ |
| 3502 | kmem_free(item->ri_buf); |
| 3503 | kmem_free(item); |
| 3504 | } |
| 3505 | /* Free the transaction recover structure */ |
| 3506 | kmem_free(trans); |
| 3507 | } |
| 3508 | |
Dave Chinner | e9131e5 | 2014-09-29 09:45:18 +1000 | [diff] [blame] | 3509 | /* |
| 3510 | * On error or completion, trans is freed. |
| 3511 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3512 | STATIC int |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3513 | xlog_recovery_process_trans( |
| 3514 | struct xlog *log, |
| 3515 | struct xlog_recover *trans, |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3516 | char *dp, |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3517 | unsigned int len, |
| 3518 | unsigned int flags, |
| 3519 | int pass) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3520 | { |
Dave Chinner | e9131e5 | 2014-09-29 09:45:18 +1000 | [diff] [blame] | 3521 | int error = 0; |
| 3522 | bool freeit = false; |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3523 | |
| 3524 | /* mask off ophdr transaction container flags */ |
| 3525 | flags &= ~XLOG_END_TRANS; |
| 3526 | if (flags & XLOG_WAS_CONT_TRANS) |
| 3527 | flags &= ~XLOG_CONTINUE_TRANS; |
| 3528 | |
Dave Chinner | 88b863d | 2014-09-29 09:45:32 +1000 | [diff] [blame] | 3529 | /* |
| 3530 | * Callees must not free the trans structure. We'll decide if we need to |
| 3531 | * free it or not based on the operation being done and it's result. |
| 3532 | */ |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3533 | switch (flags) { |
| 3534 | /* expected flag values */ |
| 3535 | case 0: |
| 3536 | case XLOG_CONTINUE_TRANS: |
| 3537 | error = xlog_recover_add_to_trans(log, trans, dp, len); |
| 3538 | break; |
| 3539 | case XLOG_WAS_CONT_TRANS: |
| 3540 | error = xlog_recover_add_to_cont_trans(log, trans, dp, len); |
| 3541 | break; |
| 3542 | case XLOG_COMMIT_TRANS: |
| 3543 | error = xlog_recover_commit_trans(log, trans, pass); |
Dave Chinner | 88b863d | 2014-09-29 09:45:32 +1000 | [diff] [blame] | 3544 | /* success or fail, we are now done with this transaction. */ |
| 3545 | freeit = true; |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3546 | break; |
| 3547 | |
| 3548 | /* unexpected flag values */ |
| 3549 | case XLOG_UNMOUNT_TRANS: |
Dave Chinner | e9131e5 | 2014-09-29 09:45:18 +1000 | [diff] [blame] | 3550 | /* just skip trans */ |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3551 | xfs_warn(log->l_mp, "%s: Unmount LR", __func__); |
Dave Chinner | e9131e5 | 2014-09-29 09:45:18 +1000 | [diff] [blame] | 3552 | freeit = true; |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3553 | break; |
| 3554 | case XLOG_START_TRANS: |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3555 | default: |
| 3556 | xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags); |
| 3557 | ASSERT(0); |
Dave Chinner | e9131e5 | 2014-09-29 09:45:18 +1000 | [diff] [blame] | 3558 | error = -EIO; |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3559 | break; |
| 3560 | } |
Dave Chinner | e9131e5 | 2014-09-29 09:45:18 +1000 | [diff] [blame] | 3561 | if (error || freeit) |
| 3562 | xlog_recover_free_trans(trans); |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3563 | return error; |
| 3564 | } |
| 3565 | |
Dave Chinner | b818cca | 2014-09-29 09:45:54 +1000 | [diff] [blame] | 3566 | /* |
| 3567 | * Lookup the transaction recovery structure associated with the ID in the |
| 3568 | * current ophdr. If the transaction doesn't exist and the start flag is set in |
| 3569 | * the ophdr, then allocate a new transaction for future ID matches to find. |
| 3570 | * Either way, return what we found during the lookup - an existing transaction |
| 3571 | * or nothing. |
| 3572 | */ |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3573 | STATIC struct xlog_recover * |
| 3574 | xlog_recover_ophdr_to_trans( |
| 3575 | struct hlist_head rhash[], |
| 3576 | struct xlog_rec_header *rhead, |
| 3577 | struct xlog_op_header *ohead) |
| 3578 | { |
| 3579 | struct xlog_recover *trans; |
| 3580 | xlog_tid_t tid; |
| 3581 | struct hlist_head *rhp; |
| 3582 | |
| 3583 | tid = be32_to_cpu(ohead->oh_tid); |
| 3584 | rhp = &rhash[XLOG_RHASH(tid)]; |
Dave Chinner | b818cca | 2014-09-29 09:45:54 +1000 | [diff] [blame] | 3585 | hlist_for_each_entry(trans, rhp, r_list) { |
| 3586 | if (trans->r_log_tid == tid) |
| 3587 | return trans; |
| 3588 | } |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3589 | |
| 3590 | /* |
Dave Chinner | b818cca | 2014-09-29 09:45:54 +1000 | [diff] [blame] | 3591 | * skip over non-start transaction headers - we could be |
| 3592 | * processing slack space before the next transaction starts |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3593 | */ |
Dave Chinner | b818cca | 2014-09-29 09:45:54 +1000 | [diff] [blame] | 3594 | if (!(ohead->oh_flags & XLOG_START_TRANS)) |
| 3595 | return NULL; |
| 3596 | |
| 3597 | ASSERT(be32_to_cpu(ohead->oh_len) == 0); |
| 3598 | |
| 3599 | /* |
| 3600 | * This is a new transaction so allocate a new recovery container to |
| 3601 | * hold the recovery ops that will follow. |
| 3602 | */ |
| 3603 | trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP); |
| 3604 | trans->r_log_tid = tid; |
| 3605 | trans->r_lsn = be64_to_cpu(rhead->h_lsn); |
| 3606 | INIT_LIST_HEAD(&trans->r_itemq); |
| 3607 | INIT_HLIST_NODE(&trans->r_list); |
| 3608 | hlist_add_head(&trans->r_list, rhp); |
| 3609 | |
| 3610 | /* |
| 3611 | * Nothing more to do for this ophdr. Items to be added to this new |
| 3612 | * transaction will be in subsequent ophdr containers. |
| 3613 | */ |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3614 | return NULL; |
| 3615 | } |
| 3616 | |
| 3617 | STATIC int |
| 3618 | xlog_recover_process_ophdr( |
| 3619 | struct xlog *log, |
| 3620 | struct hlist_head rhash[], |
| 3621 | struct xlog_rec_header *rhead, |
| 3622 | struct xlog_op_header *ohead, |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3623 | char *dp, |
| 3624 | char *end, |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3625 | int pass) |
| 3626 | { |
| 3627 | struct xlog_recover *trans; |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3628 | unsigned int len; |
| 3629 | |
| 3630 | /* Do we understand who wrote this op? */ |
| 3631 | if (ohead->oh_clientid != XFS_TRANSACTION && |
| 3632 | ohead->oh_clientid != XFS_LOG) { |
| 3633 | xfs_warn(log->l_mp, "%s: bad clientid 0x%x", |
| 3634 | __func__, ohead->oh_clientid); |
| 3635 | ASSERT(0); |
| 3636 | return -EIO; |
| 3637 | } |
| 3638 | |
| 3639 | /* |
| 3640 | * Check the ophdr contains all the data it is supposed to contain. |
| 3641 | */ |
| 3642 | len = be32_to_cpu(ohead->oh_len); |
| 3643 | if (dp + len > end) { |
| 3644 | xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len); |
| 3645 | WARN_ON(1); |
| 3646 | return -EIO; |
| 3647 | } |
| 3648 | |
| 3649 | trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead); |
| 3650 | if (!trans) { |
| 3651 | /* nothing to do, so skip over this ophdr */ |
| 3652 | return 0; |
| 3653 | } |
| 3654 | |
Dave Chinner | e9131e5 | 2014-09-29 09:45:18 +1000 | [diff] [blame] | 3655 | return xlog_recovery_process_trans(log, trans, dp, len, |
| 3656 | ohead->oh_flags, pass); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3657 | } |
| 3658 | |
| 3659 | /* |
| 3660 | * There are two valid states of the r_state field. 0 indicates that the |
| 3661 | * transaction structure is in a normal state. We have either seen the |
| 3662 | * start of the transaction or the last operation we added was not a partial |
| 3663 | * operation. If the last operation we added to the transaction was a |
| 3664 | * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS. |
| 3665 | * |
| 3666 | * NOTE: skip LRs with 0 data length. |
| 3667 | */ |
| 3668 | STATIC int |
| 3669 | xlog_recover_process_data( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3670 | struct xlog *log, |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 3671 | struct hlist_head rhash[], |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3672 | struct xlog_rec_header *rhead, |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3673 | char *dp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3674 | int pass) |
| 3675 | { |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3676 | struct xlog_op_header *ohead; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3677 | char *end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3678 | int num_logops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3679 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3680 | |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3681 | end = dp + be32_to_cpu(rhead->h_len); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3682 | num_logops = be32_to_cpu(rhead->h_num_logops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3683 | |
| 3684 | /* check the log format matches our own - else we can't recover */ |
| 3685 | if (xlog_header_check_recover(log->l_mp, rhead)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3686 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3687 | |
Dave Chinner | eeb1168 | 2014-09-29 09:45:03 +1000 | [diff] [blame] | 3688 | while ((dp < end) && num_logops) { |
| 3689 | |
| 3690 | ohead = (struct xlog_op_header *)dp; |
| 3691 | dp += sizeof(*ohead); |
| 3692 | ASSERT(dp <= end); |
| 3693 | |
| 3694 | /* errors will abort recovery */ |
| 3695 | error = xlog_recover_process_ophdr(log, rhash, rhead, ohead, |
| 3696 | dp, end, pass); |
| 3697 | if (error) |
| 3698 | return error; |
| 3699 | |
Christoph Hellwig | 67fcb7b | 2007-10-12 10:58:59 +1000 | [diff] [blame] | 3700 | dp += be32_to_cpu(ohead->oh_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3701 | num_logops--; |
| 3702 | } |
| 3703 | return 0; |
| 3704 | } |
| 3705 | |
| 3706 | /* |
| 3707 | * Process an extent free intent item that was recovered from |
| 3708 | * the log. We need to free the extents that it describes. |
| 3709 | */ |
David Chinner | 3c1e2bb | 2008-04-10 12:21:11 +1000 | [diff] [blame] | 3710 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3711 | xlog_recover_process_efi( |
| 3712 | xfs_mount_t *mp, |
| 3713 | xfs_efi_log_item_t *efip) |
| 3714 | { |
| 3715 | xfs_efd_log_item_t *efdp; |
| 3716 | xfs_trans_t *tp; |
| 3717 | int i; |
David Chinner | 3c1e2bb | 2008-04-10 12:21:11 +1000 | [diff] [blame] | 3718 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3719 | xfs_extent_t *extp; |
| 3720 | xfs_fsblock_t startblock_fsb; |
| 3721 | |
Dave Chinner | b199c8a | 2010-12-20 11:59:49 +1100 | [diff] [blame] | 3722 | ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3723 | |
| 3724 | /* |
| 3725 | * First check the validity of the extents described by the |
| 3726 | * EFI. If any are bad, then assume that all are bad and |
| 3727 | * just toss the EFI. |
| 3728 | */ |
| 3729 | for (i = 0; i < efip->efi_format.efi_nextents; i++) { |
| 3730 | extp = &(efip->efi_format.efi_extents[i]); |
| 3731 | startblock_fsb = XFS_BB_TO_FSB(mp, |
| 3732 | XFS_FSB_TO_DADDR(mp, extp->ext_start)); |
| 3733 | if ((startblock_fsb == 0) || |
| 3734 | (extp->ext_len == 0) || |
| 3735 | (startblock_fsb >= mp->m_sb.sb_dblocks) || |
| 3736 | (extp->ext_len >= mp->m_sb.sb_agblocks)) { |
| 3737 | /* |
| 3738 | * This will pull the EFI from the AIL and |
| 3739 | * free the memory associated with it. |
| 3740 | */ |
Dave Chinner | 666d644 | 2013-04-03 14:09:21 +1100 | [diff] [blame] | 3741 | set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3742 | xfs_efi_release(efip, efip->efi_format.efi_nextents); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3743 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3744 | } |
| 3745 | } |
| 3746 | |
| 3747 | tp = xfs_trans_alloc(mp, 0); |
Jie Liu | 3d3c8b5 | 2013-08-12 20:49:59 +1000 | [diff] [blame] | 3748 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0); |
David Chinner | fc6149d | 2008-04-10 12:21:53 +1000 | [diff] [blame] | 3749 | if (error) |
| 3750 | goto abort_error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3751 | efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); |
| 3752 | |
| 3753 | for (i = 0; i < efip->efi_format.efi_nextents; i++) { |
| 3754 | extp = &(efip->efi_format.efi_extents[i]); |
David Chinner | fc6149d | 2008-04-10 12:21:53 +1000 | [diff] [blame] | 3755 | error = xfs_free_extent(tp, extp->ext_start, extp->ext_len); |
| 3756 | if (error) |
| 3757 | goto abort_error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3758 | xfs_trans_log_efd_extent(tp, efdp, extp->ext_start, |
| 3759 | extp->ext_len); |
| 3760 | } |
| 3761 | |
Dave Chinner | b199c8a | 2010-12-20 11:59:49 +1100 | [diff] [blame] | 3762 | set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 3763 | error = xfs_trans_commit(tp); |
David Chinner | 3c1e2bb | 2008-04-10 12:21:11 +1000 | [diff] [blame] | 3764 | return error; |
David Chinner | fc6149d | 2008-04-10 12:21:53 +1000 | [diff] [blame] | 3765 | |
| 3766 | abort_error: |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 3767 | xfs_trans_cancel(tp); |
David Chinner | fc6149d | 2008-04-10 12:21:53 +1000 | [diff] [blame] | 3768 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3769 | } |
| 3770 | |
| 3771 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3772 | * When this is called, all of the EFIs which did not have |
| 3773 | * corresponding EFDs should be in the AIL. What we do now |
| 3774 | * is free the extents associated with each one. |
| 3775 | * |
| 3776 | * Since we process the EFIs in normal transactions, they |
| 3777 | * will be removed at some point after the commit. This prevents |
| 3778 | * us from just walking down the list processing each one. |
| 3779 | * We'll use a flag in the EFI to skip those that we've already |
| 3780 | * processed and use the AIL iteration mechanism's generation |
| 3781 | * count to try to speed this up at least a bit. |
| 3782 | * |
| 3783 | * When we start, we know that the EFIs are the only things in |
| 3784 | * the AIL. As we process them, however, other items are added |
| 3785 | * to the AIL. Since everything added to the AIL must come after |
| 3786 | * everything already in the AIL, we stop processing as soon as |
| 3787 | * we see something other than an EFI in the AIL. |
| 3788 | */ |
David Chinner | 3c1e2bb | 2008-04-10 12:21:11 +1000 | [diff] [blame] | 3789 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3790 | xlog_recover_process_efis( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3791 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3792 | { |
| 3793 | xfs_log_item_t *lip; |
| 3794 | xfs_efi_log_item_t *efip; |
David Chinner | 3c1e2bb | 2008-04-10 12:21:11 +1000 | [diff] [blame] | 3795 | int error = 0; |
David Chinner | 27d8d5f | 2008-10-30 17:38:39 +1100 | [diff] [blame] | 3796 | struct xfs_ail_cursor cur; |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 3797 | struct xfs_ail *ailp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3798 | |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 3799 | ailp = log->l_ailp; |
| 3800 | spin_lock(&ailp->xa_lock); |
| 3801 | lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3802 | while (lip != NULL) { |
| 3803 | /* |
| 3804 | * We're done when we see something other than an EFI. |
David Chinner | 27d8d5f | 2008-10-30 17:38:39 +1100 | [diff] [blame] | 3805 | * There should be no EFIs left in the AIL now. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3806 | */ |
| 3807 | if (lip->li_type != XFS_LI_EFI) { |
David Chinner | 27d8d5f | 2008-10-30 17:38:39 +1100 | [diff] [blame] | 3808 | #ifdef DEBUG |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 3809 | for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) |
David Chinner | 27d8d5f | 2008-10-30 17:38:39 +1100 | [diff] [blame] | 3810 | ASSERT(lip->li_type != XFS_LI_EFI); |
| 3811 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3812 | break; |
| 3813 | } |
| 3814 | |
| 3815 | /* |
| 3816 | * Skip EFIs that we've already processed. |
| 3817 | */ |
| 3818 | efip = (xfs_efi_log_item_t *)lip; |
Dave Chinner | b199c8a | 2010-12-20 11:59:49 +1100 | [diff] [blame] | 3819 | if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) { |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 3820 | lip = xfs_trans_ail_cursor_next(ailp, &cur); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3821 | continue; |
| 3822 | } |
| 3823 | |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 3824 | spin_unlock(&ailp->xa_lock); |
| 3825 | error = xlog_recover_process_efi(log->l_mp, efip); |
| 3826 | spin_lock(&ailp->xa_lock); |
David Chinner | 27d8d5f | 2008-10-30 17:38:39 +1100 | [diff] [blame] | 3827 | if (error) |
| 3828 | goto out; |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 3829 | lip = xfs_trans_ail_cursor_next(ailp, &cur); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3830 | } |
David Chinner | 27d8d5f | 2008-10-30 17:38:39 +1100 | [diff] [blame] | 3831 | out: |
Eric Sandeen | e4a1e29 | 2014-04-14 19:06:05 +1000 | [diff] [blame] | 3832 | xfs_trans_ail_cursor_done(&cur); |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 3833 | spin_unlock(&ailp->xa_lock); |
David Chinner | 3c1e2bb | 2008-04-10 12:21:11 +1000 | [diff] [blame] | 3834 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3835 | } |
| 3836 | |
| 3837 | /* |
| 3838 | * This routine performs a transaction to null out a bad inode pointer |
| 3839 | * in an agi unlinked inode hash bucket. |
| 3840 | */ |
| 3841 | STATIC void |
| 3842 | xlog_recover_clear_agi_bucket( |
| 3843 | xfs_mount_t *mp, |
| 3844 | xfs_agnumber_t agno, |
| 3845 | int bucket) |
| 3846 | { |
| 3847 | xfs_trans_t *tp; |
| 3848 | xfs_agi_t *agi; |
| 3849 | xfs_buf_t *agibp; |
| 3850 | int offset; |
| 3851 | int error; |
| 3852 | |
| 3853 | tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET); |
Jie Liu | 3d3c8b5 | 2013-08-12 20:49:59 +1000 | [diff] [blame] | 3854 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0); |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 3855 | if (error) |
| 3856 | goto out_abort; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3857 | |
Christoph Hellwig | 5e1be0f | 2008-11-28 14:23:37 +1100 | [diff] [blame] | 3858 | error = xfs_read_agi(mp, tp, agno, &agibp); |
| 3859 | if (error) |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 3860 | goto out_abort; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3861 | |
Christoph Hellwig | 5e1be0f | 2008-11-28 14:23:37 +1100 | [diff] [blame] | 3862 | agi = XFS_BUF_TO_AGI(agibp); |
Christoph Hellwig | 16259e7 | 2005-11-02 15:11:25 +1100 | [diff] [blame] | 3863 | agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3864 | offset = offsetof(xfs_agi_t, agi_unlinked) + |
| 3865 | (sizeof(xfs_agino_t) * bucket); |
| 3866 | xfs_trans_log_buf(tp, agibp, offset, |
| 3867 | (offset + sizeof(xfs_agino_t) - 1)); |
| 3868 | |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 3869 | error = xfs_trans_commit(tp); |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 3870 | if (error) |
| 3871 | goto out_error; |
| 3872 | return; |
| 3873 | |
| 3874 | out_abort: |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 3875 | xfs_trans_cancel(tp); |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 3876 | out_error: |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3877 | xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno); |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 3878 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3879 | } |
| 3880 | |
Christoph Hellwig | 23fac50 | 2008-11-28 14:23:40 +1100 | [diff] [blame] | 3881 | STATIC xfs_agino_t |
| 3882 | xlog_recover_process_one_iunlink( |
| 3883 | struct xfs_mount *mp, |
| 3884 | xfs_agnumber_t agno, |
| 3885 | xfs_agino_t agino, |
| 3886 | int bucket) |
| 3887 | { |
| 3888 | struct xfs_buf *ibp; |
| 3889 | struct xfs_dinode *dip; |
| 3890 | struct xfs_inode *ip; |
| 3891 | xfs_ino_t ino; |
| 3892 | int error; |
| 3893 | |
| 3894 | ino = XFS_AGINO_TO_INO(mp, agno, agino); |
Dave Chinner | 7b6259e | 2010-06-24 11:35:17 +1000 | [diff] [blame] | 3895 | error = xfs_iget(mp, NULL, ino, 0, 0, &ip); |
Christoph Hellwig | 23fac50 | 2008-11-28 14:23:40 +1100 | [diff] [blame] | 3896 | if (error) |
| 3897 | goto fail; |
| 3898 | |
| 3899 | /* |
| 3900 | * Get the on disk inode to find the next inode in the bucket. |
| 3901 | */ |
Christoph Hellwig | 475ee41 | 2012-07-03 12:21:22 -0400 | [diff] [blame] | 3902 | error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0); |
Christoph Hellwig | 23fac50 | 2008-11-28 14:23:40 +1100 | [diff] [blame] | 3903 | if (error) |
Christoph Hellwig | 0e44667 | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 3904 | goto fail_iput; |
Christoph Hellwig | 23fac50 | 2008-11-28 14:23:40 +1100 | [diff] [blame] | 3905 | |
Christoph Hellwig | 23fac50 | 2008-11-28 14:23:40 +1100 | [diff] [blame] | 3906 | ASSERT(ip->i_d.di_nlink == 0); |
Christoph Hellwig | 0e44667 | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 3907 | ASSERT(ip->i_d.di_mode != 0); |
Christoph Hellwig | 23fac50 | 2008-11-28 14:23:40 +1100 | [diff] [blame] | 3908 | |
| 3909 | /* setup for the next pass */ |
| 3910 | agino = be32_to_cpu(dip->di_next_unlinked); |
| 3911 | xfs_buf_relse(ibp); |
| 3912 | |
| 3913 | /* |
| 3914 | * Prevent any DMAPI event from being sent when the reference on |
| 3915 | * the inode is dropped. |
| 3916 | */ |
| 3917 | ip->i_d.di_dmevmask = 0; |
| 3918 | |
Christoph Hellwig | 0e44667 | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 3919 | IRELE(ip); |
Christoph Hellwig | 23fac50 | 2008-11-28 14:23:40 +1100 | [diff] [blame] | 3920 | return agino; |
| 3921 | |
Christoph Hellwig | 0e44667 | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 3922 | fail_iput: |
| 3923 | IRELE(ip); |
Christoph Hellwig | 23fac50 | 2008-11-28 14:23:40 +1100 | [diff] [blame] | 3924 | fail: |
| 3925 | /* |
| 3926 | * We can't read in the inode this bucket points to, or this inode |
| 3927 | * is messed up. Just ditch this bucket of inodes. We will lose |
| 3928 | * some inodes and space, but at least we won't hang. |
| 3929 | * |
| 3930 | * Call xlog_recover_clear_agi_bucket() to perform a transaction to |
| 3931 | * clear the inode pointer in the bucket. |
| 3932 | */ |
| 3933 | xlog_recover_clear_agi_bucket(mp, agno, bucket); |
| 3934 | return NULLAGINO; |
| 3935 | } |
| 3936 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3937 | /* |
| 3938 | * xlog_iunlink_recover |
| 3939 | * |
| 3940 | * This is called during recovery to process any inodes which |
| 3941 | * we unlinked but not freed when the system crashed. These |
| 3942 | * inodes will be on the lists in the AGI blocks. What we do |
| 3943 | * here is scan all the AGIs and fully truncate and free any |
| 3944 | * inodes found on the lists. Each inode is removed from the |
| 3945 | * lists when it has been fully truncated and is freed. The |
| 3946 | * freeing of the inode and its removal from the list must be |
| 3947 | * atomic. |
| 3948 | */ |
Eric Sandeen | d96f8f8 | 2009-07-02 00:09:33 -0500 | [diff] [blame] | 3949 | STATIC void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3950 | xlog_recover_process_iunlinks( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3951 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3952 | { |
| 3953 | xfs_mount_t *mp; |
| 3954 | xfs_agnumber_t agno; |
| 3955 | xfs_agi_t *agi; |
| 3956 | xfs_buf_t *agibp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3957 | xfs_agino_t agino; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3958 | int bucket; |
| 3959 | int error; |
| 3960 | uint mp_dmevmask; |
| 3961 | |
| 3962 | mp = log->l_mp; |
| 3963 | |
| 3964 | /* |
| 3965 | * Prevent any DMAPI event from being sent while in this function. |
| 3966 | */ |
| 3967 | mp_dmevmask = mp->m_dmevmask; |
| 3968 | mp->m_dmevmask = 0; |
| 3969 | |
| 3970 | for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { |
| 3971 | /* |
| 3972 | * Find the agi for this ag. |
| 3973 | */ |
Christoph Hellwig | 5e1be0f | 2008-11-28 14:23:37 +1100 | [diff] [blame] | 3974 | error = xfs_read_agi(mp, NULL, agno, &agibp); |
| 3975 | if (error) { |
| 3976 | /* |
| 3977 | * AGI is b0rked. Don't process it. |
| 3978 | * |
| 3979 | * We should probably mark the filesystem as corrupt |
| 3980 | * after we've recovered all the ag's we can.... |
| 3981 | */ |
| 3982 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3983 | } |
Jan Kara | d97d32e | 2012-03-15 09:34:02 +0000 | [diff] [blame] | 3984 | /* |
| 3985 | * Unlock the buffer so that it can be acquired in the normal |
| 3986 | * course of the transaction to truncate and free each inode. |
| 3987 | * Because we are not racing with anyone else here for the AGI |
| 3988 | * buffer, we don't even need to hold it locked to read the |
| 3989 | * initial unlinked bucket entries out of the buffer. We keep |
| 3990 | * buffer reference though, so that it stays pinned in memory |
| 3991 | * while we need the buffer. |
| 3992 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3993 | agi = XFS_BUF_TO_AGI(agibp); |
Jan Kara | d97d32e | 2012-03-15 09:34:02 +0000 | [diff] [blame] | 3994 | xfs_buf_unlock(agibp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3995 | |
| 3996 | for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { |
Christoph Hellwig | 16259e7 | 2005-11-02 15:11:25 +1100 | [diff] [blame] | 3997 | agino = be32_to_cpu(agi->agi_unlinked[bucket]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3998 | while (agino != NULLAGINO) { |
Christoph Hellwig | 23fac50 | 2008-11-28 14:23:40 +1100 | [diff] [blame] | 3999 | agino = xlog_recover_process_one_iunlink(mp, |
| 4000 | agno, agino, bucket); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4001 | } |
| 4002 | } |
Jan Kara | d97d32e | 2012-03-15 09:34:02 +0000 | [diff] [blame] | 4003 | xfs_buf_rele(agibp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4004 | } |
| 4005 | |
| 4006 | mp->m_dmevmask = mp_dmevmask; |
| 4007 | } |
| 4008 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4009 | /* |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4010 | * Upack the log buffer data and crc check it. If the check fails, issue a |
| 4011 | * warning if and only if the CRC in the header is non-zero. This makes the |
| 4012 | * check an advisory warning, and the zero CRC check will prevent failure |
| 4013 | * warnings from being emitted when upgrading the kernel from one that does not |
| 4014 | * add CRCs by default. |
| 4015 | * |
| 4016 | * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log |
| 4017 | * corruption failure |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4018 | */ |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4019 | STATIC int |
| 4020 | xlog_unpack_data_crc( |
| 4021 | struct xlog_rec_header *rhead, |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 4022 | char *dp, |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4023 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4024 | { |
Dave Chinner | f9668a0 | 2012-11-28 13:01:03 +1100 | [diff] [blame] | 4025 | __le32 crc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4026 | |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4027 | crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); |
| 4028 | if (crc != rhead->h_crc) { |
| 4029 | if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) { |
| 4030 | xfs_alert(log->l_mp, |
Eric Sandeen | 08e96e1 | 2013-10-11 20:59:05 -0500 | [diff] [blame] | 4031 | "log record CRC mismatch: found 0x%x, expected 0x%x.", |
Dave Chinner | f9668a0 | 2012-11-28 13:01:03 +1100 | [diff] [blame] | 4032 | le32_to_cpu(rhead->h_crc), |
| 4033 | le32_to_cpu(crc)); |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4034 | xfs_hex_dump(dp, 32); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4035 | } |
| 4036 | |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4037 | /* |
| 4038 | * If we've detected a log record corruption, then we can't |
| 4039 | * recover past this point. Abort recovery if we are enforcing |
| 4040 | * CRC protection by punting an error back up the stack. |
| 4041 | */ |
| 4042 | if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 4043 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4044 | } |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4045 | |
| 4046 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4047 | } |
| 4048 | |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4049 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4050 | xlog_unpack_data( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 4051 | struct xlog_rec_header *rhead, |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 4052 | char *dp, |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 4053 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4054 | { |
| 4055 | int i, j, k; |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4056 | int error; |
| 4057 | |
| 4058 | error = xlog_unpack_data_crc(rhead, dp, log); |
| 4059 | if (error) |
| 4060 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4061 | |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 4062 | for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4063 | i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 4064 | *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4065 | dp += BBSIZE; |
| 4066 | } |
| 4067 | |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 4068 | if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { |
Christoph Hellwig | b28708d | 2008-11-28 14:23:38 +1100 | [diff] [blame] | 4069 | xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead; |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 4070 | for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4071 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
| 4072 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 4073 | *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4074 | dp += BBSIZE; |
| 4075 | } |
| 4076 | } |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4077 | |
| 4078 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4079 | } |
| 4080 | |
| 4081 | STATIC int |
| 4082 | xlog_valid_rec_header( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 4083 | struct xlog *log, |
| 4084 | struct xlog_rec_header *rhead, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4085 | xfs_daddr_t blkno) |
| 4086 | { |
| 4087 | int hlen; |
| 4088 | |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 4089 | if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4090 | XFS_ERROR_REPORT("xlog_valid_rec_header(1)", |
| 4091 | XFS_ERRLEVEL_LOW, log->l_mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 4092 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4093 | } |
| 4094 | if (unlikely( |
| 4095 | (!rhead->h_version || |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 4096 | (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 4097 | xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", |
Harvey Harrison | 34a622b | 2008-04-10 12:19:21 +1000 | [diff] [blame] | 4098 | __func__, be32_to_cpu(rhead->h_version)); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 4099 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4100 | } |
| 4101 | |
| 4102 | /* LR body must have data or it wouldn't have been written */ |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 4103 | hlen = be32_to_cpu(rhead->h_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4104 | if (unlikely( hlen <= 0 || hlen > INT_MAX )) { |
| 4105 | XFS_ERROR_REPORT("xlog_valid_rec_header(2)", |
| 4106 | XFS_ERRLEVEL_LOW, log->l_mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 4107 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4108 | } |
| 4109 | if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) { |
| 4110 | XFS_ERROR_REPORT("xlog_valid_rec_header(3)", |
| 4111 | XFS_ERRLEVEL_LOW, log->l_mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 4112 | return -EFSCORRUPTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4113 | } |
| 4114 | return 0; |
| 4115 | } |
| 4116 | |
| 4117 | /* |
| 4118 | * Read the log from tail to head and process the log records found. |
| 4119 | * Handle the two cases where the tail and head are in the same cycle |
| 4120 | * and where the active portion of the log wraps around the end of |
| 4121 | * the physical log separately. The pass parameter is passed through |
| 4122 | * to the routines called to process the data and is not looked at |
| 4123 | * here. |
| 4124 | */ |
| 4125 | STATIC int |
| 4126 | xlog_do_recovery_pass( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 4127 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4128 | xfs_daddr_t head_blk, |
| 4129 | xfs_daddr_t tail_blk, |
| 4130 | int pass) |
| 4131 | { |
| 4132 | xlog_rec_header_t *rhead; |
| 4133 | xfs_daddr_t blk_no; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 4134 | char *offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4135 | xfs_buf_t *hbp, *dbp; |
| 4136 | int error = 0, h_size; |
| 4137 | int bblks, split_bblks; |
| 4138 | int hblks, split_hblks, wrapped_hblks; |
Dave Chinner | f0a7695 | 2010-01-11 11:49:57 +0000 | [diff] [blame] | 4139 | struct hlist_head rhash[XLOG_RHASH_SIZE]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4140 | |
| 4141 | ASSERT(head_blk != tail_blk); |
| 4142 | |
| 4143 | /* |
| 4144 | * Read the header of the tail block and get the iclog buffer size from |
| 4145 | * h_size. Use this to tell how many sectors make up the log header. |
| 4146 | */ |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 4147 | if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4148 | /* |
| 4149 | * When using variable length iclogs, read first sector of |
| 4150 | * iclog header and extract the header size from it. Get a |
| 4151 | * new hbp that is the correct size. |
| 4152 | */ |
| 4153 | hbp = xlog_get_bp(log, 1); |
| 4154 | if (!hbp) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 4155 | return -ENOMEM; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4156 | |
| 4157 | error = xlog_bread(log, tail_blk, 1, hbp, &offset); |
| 4158 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4159 | goto bread_err1; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4160 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4161 | rhead = (xlog_rec_header_t *)offset; |
| 4162 | error = xlog_valid_rec_header(log, rhead, tail_blk); |
| 4163 | if (error) |
| 4164 | goto bread_err1; |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 4165 | h_size = be32_to_cpu(rhead->h_size); |
| 4166 | if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4167 | (h_size > XLOG_HEADER_CYCLE_SIZE)) { |
| 4168 | hblks = h_size / XLOG_HEADER_CYCLE_SIZE; |
| 4169 | if (h_size % XLOG_HEADER_CYCLE_SIZE) |
| 4170 | hblks++; |
| 4171 | xlog_put_bp(hbp); |
| 4172 | hbp = xlog_get_bp(log, hblks); |
| 4173 | } else { |
| 4174 | hblks = 1; |
| 4175 | } |
| 4176 | } else { |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 4177 | ASSERT(log->l_sectBBsize == 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4178 | hblks = 1; |
| 4179 | hbp = xlog_get_bp(log, 1); |
| 4180 | h_size = XLOG_BIG_RECORD_BSIZE; |
| 4181 | } |
| 4182 | |
| 4183 | if (!hbp) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 4184 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4185 | dbp = xlog_get_bp(log, BTOBB(h_size)); |
| 4186 | if (!dbp) { |
| 4187 | xlog_put_bp(hbp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 4188 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4189 | } |
| 4190 | |
| 4191 | memset(rhash, 0, sizeof(rhash)); |
Eric Sandeen | 970fd3f | 2014-09-09 11:57:29 +1000 | [diff] [blame] | 4192 | blk_no = tail_blk; |
| 4193 | if (tail_blk > head_blk) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4194 | /* |
| 4195 | * Perform recovery around the end of the physical log. |
| 4196 | * When the head is not on the same cycle number as the tail, |
Eric Sandeen | 970fd3f | 2014-09-09 11:57:29 +1000 | [diff] [blame] | 4197 | * we can't do a sequential recovery. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4198 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4199 | while (blk_no < log->l_logBBsize) { |
| 4200 | /* |
| 4201 | * Check for header wrapping around physical end-of-log |
| 4202 | */ |
Chandra Seetharaman | 6292604 | 2011-07-22 23:40:15 +0000 | [diff] [blame] | 4203 | offset = hbp->b_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4204 | split_hblks = 0; |
| 4205 | wrapped_hblks = 0; |
| 4206 | if (blk_no + hblks <= log->l_logBBsize) { |
| 4207 | /* Read header in one read */ |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4208 | error = xlog_bread(log, blk_no, hblks, hbp, |
| 4209 | &offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4210 | if (error) |
| 4211 | goto bread_err2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4212 | } else { |
| 4213 | /* This LR is split across physical log end */ |
| 4214 | if (blk_no != log->l_logBBsize) { |
| 4215 | /* some data before physical log end */ |
| 4216 | ASSERT(blk_no <= INT_MAX); |
| 4217 | split_hblks = log->l_logBBsize - (int)blk_no; |
| 4218 | ASSERT(split_hblks > 0); |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4219 | error = xlog_bread(log, blk_no, |
| 4220 | split_hblks, hbp, |
| 4221 | &offset); |
| 4222 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4223 | goto bread_err2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4224 | } |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4225 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4226 | /* |
| 4227 | * Note: this black magic still works with |
| 4228 | * large sector sizes (non-512) only because: |
| 4229 | * - we increased the buffer size originally |
| 4230 | * by 1 sector giving us enough extra space |
| 4231 | * for the second read; |
| 4232 | * - the log start is guaranteed to be sector |
| 4233 | * aligned; |
| 4234 | * - we read the log end (LR header start) |
| 4235 | * _first_, then the log start (LR header end) |
| 4236 | * - order is important. |
| 4237 | */ |
David Chinner | 234f56a | 2008-04-10 12:24:24 +1000 | [diff] [blame] | 4238 | wrapped_hblks = hblks - split_hblks; |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 4239 | error = xlog_bread_offset(log, 0, |
| 4240 | wrapped_hblks, hbp, |
| 4241 | offset + BBTOB(split_hblks)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4242 | if (error) |
| 4243 | goto bread_err2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4244 | } |
| 4245 | rhead = (xlog_rec_header_t *)offset; |
| 4246 | error = xlog_valid_rec_header(log, rhead, |
| 4247 | split_hblks ? blk_no : 0); |
| 4248 | if (error) |
| 4249 | goto bread_err2; |
| 4250 | |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 4251 | bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4252 | blk_no += hblks; |
| 4253 | |
| 4254 | /* Read in data for log record */ |
| 4255 | if (blk_no + bblks <= log->l_logBBsize) { |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4256 | error = xlog_bread(log, blk_no, bblks, dbp, |
| 4257 | &offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4258 | if (error) |
| 4259 | goto bread_err2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4260 | } else { |
| 4261 | /* This log record is split across the |
| 4262 | * physical end of log */ |
Chandra Seetharaman | 6292604 | 2011-07-22 23:40:15 +0000 | [diff] [blame] | 4263 | offset = dbp->b_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4264 | split_bblks = 0; |
| 4265 | if (blk_no != log->l_logBBsize) { |
| 4266 | /* some data is before the physical |
| 4267 | * end of log */ |
| 4268 | ASSERT(!wrapped_hblks); |
| 4269 | ASSERT(blk_no <= INT_MAX); |
| 4270 | split_bblks = |
| 4271 | log->l_logBBsize - (int)blk_no; |
| 4272 | ASSERT(split_bblks > 0); |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4273 | error = xlog_bread(log, blk_no, |
| 4274 | split_bblks, dbp, |
| 4275 | &offset); |
| 4276 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4277 | goto bread_err2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4278 | } |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4279 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4280 | /* |
| 4281 | * Note: this black magic still works with |
| 4282 | * large sector sizes (non-512) only because: |
| 4283 | * - we increased the buffer size originally |
| 4284 | * by 1 sector giving us enough extra space |
| 4285 | * for the second read; |
| 4286 | * - the log start is guaranteed to be sector |
| 4287 | * aligned; |
| 4288 | * - we read the log end (LR header start) |
| 4289 | * _first_, then the log start (LR header end) |
| 4290 | * - order is important. |
| 4291 | */ |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 4292 | error = xlog_bread_offset(log, 0, |
Dave Chinner | 009507b | 2012-11-02 11:38:44 +1100 | [diff] [blame] | 4293 | bblks - split_bblks, dbp, |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 4294 | offset + BBTOB(split_bblks)); |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4295 | if (error) |
| 4296 | goto bread_err2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4297 | } |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4298 | |
| 4299 | error = xlog_unpack_data(rhead, offset, log); |
| 4300 | if (error) |
| 4301 | goto bread_err2; |
| 4302 | |
| 4303 | error = xlog_recover_process_data(log, rhash, |
| 4304 | rhead, offset, pass); |
| 4305 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4306 | goto bread_err2; |
| 4307 | blk_no += bblks; |
| 4308 | } |
| 4309 | |
| 4310 | ASSERT(blk_no >= log->l_logBBsize); |
| 4311 | blk_no -= log->l_logBBsize; |
Eric Sandeen | 970fd3f | 2014-09-09 11:57:29 +1000 | [diff] [blame] | 4312 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4313 | |
Eric Sandeen | 970fd3f | 2014-09-09 11:57:29 +1000 | [diff] [blame] | 4314 | /* read first part of physical log */ |
| 4315 | while (blk_no < head_blk) { |
| 4316 | error = xlog_bread(log, blk_no, hblks, hbp, &offset); |
| 4317 | if (error) |
| 4318 | goto bread_err2; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4319 | |
Eric Sandeen | 970fd3f | 2014-09-09 11:57:29 +1000 | [diff] [blame] | 4320 | rhead = (xlog_rec_header_t *)offset; |
| 4321 | error = xlog_valid_rec_header(log, rhead, blk_no); |
| 4322 | if (error) |
| 4323 | goto bread_err2; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4324 | |
Eric Sandeen | 970fd3f | 2014-09-09 11:57:29 +1000 | [diff] [blame] | 4325 | /* blocks in data section */ |
| 4326 | bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); |
| 4327 | error = xlog_bread(log, blk_no+hblks, bblks, dbp, |
| 4328 | &offset); |
| 4329 | if (error) |
| 4330 | goto bread_err2; |
Christoph Hellwig | 076e6ac | 2009-03-16 08:24:13 +0100 | [diff] [blame] | 4331 | |
Eric Sandeen | 970fd3f | 2014-09-09 11:57:29 +1000 | [diff] [blame] | 4332 | error = xlog_unpack_data(rhead, offset, log); |
| 4333 | if (error) |
| 4334 | goto bread_err2; |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 4335 | |
Eric Sandeen | 970fd3f | 2014-09-09 11:57:29 +1000 | [diff] [blame] | 4336 | error = xlog_recover_process_data(log, rhash, |
| 4337 | rhead, offset, pass); |
| 4338 | if (error) |
| 4339 | goto bread_err2; |
| 4340 | blk_no += bblks + hblks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4341 | } |
| 4342 | |
| 4343 | bread_err2: |
| 4344 | xlog_put_bp(dbp); |
| 4345 | bread_err1: |
| 4346 | xlog_put_bp(hbp); |
| 4347 | return error; |
| 4348 | } |
| 4349 | |
| 4350 | /* |
| 4351 | * Do the recovery of the log. We actually do this in two phases. |
| 4352 | * The two passes are necessary in order to implement the function |
| 4353 | * of cancelling a record written into the log. The first pass |
| 4354 | * determines those things which have been cancelled, and the |
| 4355 | * second pass replays log items normally except for those which |
| 4356 | * have been cancelled. The handling of the replay and cancellations |
| 4357 | * takes place in the log item type specific routines. |
| 4358 | * |
| 4359 | * The table of items which have cancel records in the log is allocated |
| 4360 | * and freed at this level, since only here do we know when all of |
| 4361 | * the log recovery has been completed. |
| 4362 | */ |
| 4363 | STATIC int |
| 4364 | xlog_do_log_recovery( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 4365 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4366 | xfs_daddr_t head_blk, |
| 4367 | xfs_daddr_t tail_blk) |
| 4368 | { |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 4369 | int error, i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4370 | |
| 4371 | ASSERT(head_blk != tail_blk); |
| 4372 | |
| 4373 | /* |
| 4374 | * First do a pass to find all of the cancelled buf log items. |
| 4375 | * Store them in the buf_cancel_table for use in the second pass. |
| 4376 | */ |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 4377 | log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * |
| 4378 | sizeof(struct list_head), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4379 | KM_SLEEP); |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 4380 | for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) |
| 4381 | INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); |
| 4382 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4383 | error = xlog_do_recovery_pass(log, head_blk, tail_blk, |
| 4384 | XLOG_RECOVER_PASS1); |
| 4385 | if (error != 0) { |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 4386 | kmem_free(log->l_buf_cancel_table); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4387 | log->l_buf_cancel_table = NULL; |
| 4388 | return error; |
| 4389 | } |
| 4390 | /* |
| 4391 | * Then do a second pass to actually recover the items in the log. |
| 4392 | * When it is complete free the table of buf cancel items. |
| 4393 | */ |
| 4394 | error = xlog_do_recovery_pass(log, head_blk, tail_blk, |
| 4395 | XLOG_RECOVER_PASS2); |
| 4396 | #ifdef DEBUG |
Tim Shimmin | 6d192a9 | 2006-06-09 14:55:38 +1000 | [diff] [blame] | 4397 | if (!error) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4398 | int i; |
| 4399 | |
| 4400 | for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) |
Christoph Hellwig | d5689ea | 2010-12-01 22:06:22 +0000 | [diff] [blame] | 4401 | ASSERT(list_empty(&log->l_buf_cancel_table[i])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4402 | } |
| 4403 | #endif /* DEBUG */ |
| 4404 | |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 4405 | kmem_free(log->l_buf_cancel_table); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4406 | log->l_buf_cancel_table = NULL; |
| 4407 | |
| 4408 | return error; |
| 4409 | } |
| 4410 | |
| 4411 | /* |
| 4412 | * Do the actual recovery |
| 4413 | */ |
| 4414 | STATIC int |
| 4415 | xlog_do_recover( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 4416 | struct xlog *log, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4417 | xfs_daddr_t head_blk, |
| 4418 | xfs_daddr_t tail_blk) |
| 4419 | { |
| 4420 | int error; |
| 4421 | xfs_buf_t *bp; |
| 4422 | xfs_sb_t *sbp; |
| 4423 | |
| 4424 | /* |
| 4425 | * First replay the images in the log. |
| 4426 | */ |
| 4427 | error = xlog_do_log_recovery(log, head_blk, tail_blk); |
Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 4428 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4429 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4430 | |
| 4431 | /* |
| 4432 | * If IO errors happened during recovery, bail out. |
| 4433 | */ |
| 4434 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 4435 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4436 | } |
| 4437 | |
| 4438 | /* |
| 4439 | * We now update the tail_lsn since much of the recovery has completed |
| 4440 | * and there may be space available to use. If there were no extent |
| 4441 | * or iunlinks, we can free up the entire log and set the tail_lsn to |
| 4442 | * be the last_sync_lsn. This was set in xlog_find_tail to be the |
| 4443 | * lsn of the last known good LR on disk. If there are extent frees |
| 4444 | * or iunlinks they will have some entries in the AIL; so we look at |
| 4445 | * the AIL to determine how to set the tail_lsn. |
| 4446 | */ |
| 4447 | xlog_assign_tail_lsn(log->l_mp); |
| 4448 | |
| 4449 | /* |
| 4450 | * Now that we've finished replaying all buffer and inode |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 4451 | * updates, re-read in the superblock and reverify it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4452 | */ |
| 4453 | bp = xfs_getsb(log->l_mp, 0); |
| 4454 | XFS_BUF_UNDONE(bp); |
Lachlan McIlroy | bebf963 | 2007-10-15 13:18:02 +1000 | [diff] [blame] | 4455 | ASSERT(!(XFS_BUF_ISWRITE(bp))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4456 | XFS_BUF_READ(bp); |
Lachlan McIlroy | bebf963 | 2007-10-15 13:18:02 +1000 | [diff] [blame] | 4457 | XFS_BUF_UNASYNC(bp); |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 4458 | bp->b_ops = &xfs_sb_buf_ops; |
Christoph Hellwig | 83a0adc | 2013-12-17 00:03:52 -0800 | [diff] [blame] | 4459 | |
Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 4460 | error = xfs_buf_submit_wait(bp); |
David Chinner | d64e31a | 2008-04-10 12:22:17 +1000 | [diff] [blame] | 4461 | if (error) { |
Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 4462 | if (!XFS_FORCED_SHUTDOWN(log->l_mp)) { |
| 4463 | xfs_buf_ioerror_alert(bp, __func__); |
| 4464 | ASSERT(0); |
| 4465 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4466 | xfs_buf_relse(bp); |
| 4467 | return error; |
| 4468 | } |
| 4469 | |
| 4470 | /* Convert superblock from on-disk format */ |
| 4471 | sbp = &log->l_mp->m_sb; |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 4472 | xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4473 | ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC); |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 4474 | ASSERT(xfs_sb_good_version(sbp)); |
Dave Chinner | 5681ca4 | 2015-02-23 21:22:31 +1100 | [diff] [blame] | 4475 | xfs_reinit_percpu_counters(log->l_mp); |
| 4476 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4477 | xfs_buf_relse(bp); |
| 4478 | |
Lachlan McIlroy | 5478eea | 2007-02-10 18:36:29 +1100 | [diff] [blame] | 4479 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4480 | xlog_recover_check_summary(log); |
| 4481 | |
| 4482 | /* Normal transactions can now occur */ |
| 4483 | log->l_flags &= ~XLOG_ACTIVE_RECOVERY; |
| 4484 | return 0; |
| 4485 | } |
| 4486 | |
| 4487 | /* |
| 4488 | * Perform recovery and re-initialize some log variables in xlog_find_tail. |
| 4489 | * |
| 4490 | * Return error or zero. |
| 4491 | */ |
| 4492 | int |
| 4493 | xlog_recover( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 4494 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4495 | { |
| 4496 | xfs_daddr_t head_blk, tail_blk; |
| 4497 | int error; |
| 4498 | |
| 4499 | /* find the tail of the log */ |
Eric Sandeen | 65be605 | 2006-01-11 15:34:19 +1100 | [diff] [blame] | 4500 | if ((error = xlog_find_tail(log, &head_blk, &tail_blk))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4501 | return error; |
| 4502 | |
| 4503 | if (tail_blk != head_blk) { |
| 4504 | /* There used to be a comment here: |
| 4505 | * |
| 4506 | * disallow recovery on read-only mounts. note -- mount |
| 4507 | * checks for ENOSPC and turns it into an intelligent |
| 4508 | * error message. |
| 4509 | * ...but this is no longer true. Now, unless you specify |
| 4510 | * NORECOVERY (in which case this function would never be |
| 4511 | * called), we just go ahead and recover. We do this all |
| 4512 | * under the vfs layer, so we can get away with it unless |
| 4513 | * the device itself is read-only, in which case we fail. |
| 4514 | */ |
Utako Kusaka | 3a02ee1 | 2007-05-08 13:50:06 +1000 | [diff] [blame] | 4515 | if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4516 | return error; |
| 4517 | } |
| 4518 | |
Dave Chinner | e721f50 | 2013-04-03 16:11:32 +1100 | [diff] [blame] | 4519 | /* |
| 4520 | * Version 5 superblock log feature mask validation. We know the |
| 4521 | * log is dirty so check if there are any unknown log features |
| 4522 | * in what we need to recover. If there are unknown features |
| 4523 | * (e.g. unsupported transactions, then simply reject the |
| 4524 | * attempt at recovery before touching anything. |
| 4525 | */ |
| 4526 | if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 && |
| 4527 | xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb, |
| 4528 | XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) { |
| 4529 | xfs_warn(log->l_mp, |
| 4530 | "Superblock has unknown incompatible log features (0x%x) enabled.\n" |
| 4531 | "The log can not be fully and/or safely recovered by this kernel.\n" |
| 4532 | "Please recover the log on a kernel that supports the unknown features.", |
| 4533 | (log->l_mp->m_sb.sb_features_log_incompat & |
| 4534 | XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 4535 | return -EINVAL; |
Dave Chinner | e721f50 | 2013-04-03 16:11:32 +1100 | [diff] [blame] | 4536 | } |
| 4537 | |
Brian Foster | 2e22717 | 2014-09-09 11:56:13 +1000 | [diff] [blame] | 4538 | /* |
| 4539 | * Delay log recovery if the debug hook is set. This is debug |
| 4540 | * instrumention to coordinate simulation of I/O failures with |
| 4541 | * log recovery. |
| 4542 | */ |
| 4543 | if (xfs_globals.log_recovery_delay) { |
| 4544 | xfs_notice(log->l_mp, |
| 4545 | "Delaying log recovery for %d seconds.", |
| 4546 | xfs_globals.log_recovery_delay); |
| 4547 | msleep(xfs_globals.log_recovery_delay * 1000); |
| 4548 | } |
| 4549 | |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 4550 | xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", |
| 4551 | log->l_mp->m_logname ? log->l_mp->m_logname |
| 4552 | : "internal"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4553 | |
| 4554 | error = xlog_do_recover(log, head_blk, tail_blk); |
| 4555 | log->l_flags |= XLOG_RECOVERY_NEEDED; |
| 4556 | } |
| 4557 | return error; |
| 4558 | } |
| 4559 | |
| 4560 | /* |
| 4561 | * In the first part of recovery we replay inodes and buffers and build |
| 4562 | * up the list of extent free items which need to be processed. Here |
| 4563 | * we process the extent free items and clean up the on disk unlinked |
| 4564 | * inode lists. This is separated from the first part of recovery so |
| 4565 | * that the root and real-time bitmap inodes can be read in from disk in |
| 4566 | * between the two stages. This is necessary so that we can free space |
| 4567 | * in the real-time portion of the file system. |
| 4568 | */ |
| 4569 | int |
| 4570 | xlog_recover_finish( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 4571 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4572 | { |
| 4573 | /* |
| 4574 | * Now we're ready to do the transactions needed for the |
| 4575 | * rest of recovery. Start with completing all the extent |
| 4576 | * free intent records and then process the unlinked inode |
| 4577 | * lists. At this point, we essentially run in normal mode |
| 4578 | * except that we're still performing recovery actions |
| 4579 | * rather than accepting new requests. |
| 4580 | */ |
| 4581 | if (log->l_flags & XLOG_RECOVERY_NEEDED) { |
David Chinner | 3c1e2bb | 2008-04-10 12:21:11 +1000 | [diff] [blame] | 4582 | int error; |
| 4583 | error = xlog_recover_process_efis(log); |
| 4584 | if (error) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 4585 | xfs_alert(log->l_mp, "Failed to recover EFIs"); |
David Chinner | 3c1e2bb | 2008-04-10 12:21:11 +1000 | [diff] [blame] | 4586 | return error; |
| 4587 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4588 | /* |
| 4589 | * Sync the log to get all the EFIs out of the AIL. |
| 4590 | * This isn't absolutely necessary, but it helps in |
| 4591 | * case the unlink transactions would have problems |
| 4592 | * pushing the EFIs out of the way. |
| 4593 | */ |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 4594 | xfs_log_force(log->l_mp, XFS_LOG_SYNC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4595 | |
Christoph Hellwig | 4249023 | 2008-08-13 16:49:32 +1000 | [diff] [blame] | 4596 | xlog_recover_process_iunlinks(log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4597 | |
| 4598 | xlog_recover_check_summary(log); |
| 4599 | |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 4600 | xfs_notice(log->l_mp, "Ending recovery (logdev: %s)", |
| 4601 | log->l_mp->m_logname ? log->l_mp->m_logname |
| 4602 | : "internal"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4603 | log->l_flags &= ~XLOG_RECOVERY_NEEDED; |
| 4604 | } else { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 4605 | xfs_info(log->l_mp, "Ending clean mount"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4606 | } |
| 4607 | return 0; |
| 4608 | } |
| 4609 | |
| 4610 | |
| 4611 | #if defined(DEBUG) |
| 4612 | /* |
| 4613 | * Read all of the agf and agi counters and check that they |
| 4614 | * are consistent with the superblock counters. |
| 4615 | */ |
| 4616 | void |
| 4617 | xlog_recover_check_summary( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 4618 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4619 | { |
| 4620 | xfs_mount_t *mp; |
| 4621 | xfs_agf_t *agfp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4622 | xfs_buf_t *agfbp; |
| 4623 | xfs_buf_t *agibp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4624 | xfs_agnumber_t agno; |
| 4625 | __uint64_t freeblks; |
| 4626 | __uint64_t itotal; |
| 4627 | __uint64_t ifree; |
Christoph Hellwig | 5e1be0f | 2008-11-28 14:23:37 +1100 | [diff] [blame] | 4628 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4629 | |
| 4630 | mp = log->l_mp; |
| 4631 | |
| 4632 | freeblks = 0LL; |
| 4633 | itotal = 0LL; |
| 4634 | ifree = 0LL; |
| 4635 | for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { |
From: Christoph Hellwig | 4805621 | 2008-11-28 14:23:38 +1100 | [diff] [blame] | 4636 | error = xfs_read_agf(mp, NULL, agno, 0, &agfbp); |
| 4637 | if (error) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 4638 | xfs_alert(mp, "%s agf read failed agno %d error %d", |
| 4639 | __func__, agno, error); |
From: Christoph Hellwig | 4805621 | 2008-11-28 14:23:38 +1100 | [diff] [blame] | 4640 | } else { |
| 4641 | agfp = XFS_BUF_TO_AGF(agfbp); |
| 4642 | freeblks += be32_to_cpu(agfp->agf_freeblks) + |
| 4643 | be32_to_cpu(agfp->agf_flcount); |
| 4644 | xfs_buf_relse(agfbp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4645 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4646 | |
Christoph Hellwig | 5e1be0f | 2008-11-28 14:23:37 +1100 | [diff] [blame] | 4647 | error = xfs_read_agi(mp, NULL, agno, &agibp); |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 4648 | if (error) { |
| 4649 | xfs_alert(mp, "%s agi read failed agno %d error %d", |
| 4650 | __func__, agno, error); |
| 4651 | } else { |
Christoph Hellwig | 5e1be0f | 2008-11-28 14:23:37 +1100 | [diff] [blame] | 4652 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4653 | |
Christoph Hellwig | 5e1be0f | 2008-11-28 14:23:37 +1100 | [diff] [blame] | 4654 | itotal += be32_to_cpu(agi->agi_count); |
| 4655 | ifree += be32_to_cpu(agi->agi_freecount); |
| 4656 | xfs_buf_relse(agibp); |
| 4657 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4658 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4659 | } |
| 4660 | #endif /* DEBUG */ |