Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. |
| 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 19 | #include "xfs_fs.h" |
Dave Chinner | 632b89e | 2013-10-29 22:11:58 +1100 | [diff] [blame] | 20 | #include "xfs_shared.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 21 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 22 | #include "xfs_log_format.h" |
| 23 | #include "xfs_trans_resv.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 24 | #include "xfs_bit.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include "xfs_mount.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 26 | #include "xfs_inode.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include "xfs_btree.h" |
| 28 | #include "xfs_ialloc.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 29 | #include "xfs_ialloc_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include "xfs_alloc.h" |
| 31 | #include "xfs_error.h" |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 32 | #include "xfs_trace.h" |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 33 | #include "xfs_cksum.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 34 | #include "xfs_trans.h" |
Darrick J. Wong | 340785c | 2016-08-03 11:33:42 +1000 | [diff] [blame] | 35 | #include "xfs_rmap.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
Christoph Hellwig | 91cca5df | 2008-10-30 16:58:01 +1100 | [diff] [blame] | 38 | STATIC int |
| 39 | xfs_inobt_get_minrecs( |
| 40 | struct xfs_btree_cur *cur, |
| 41 | int level) |
| 42 | { |
| 43 | return cur->bc_mp->m_inobt_mnr[level != 0]; |
| 44 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 46 | STATIC struct xfs_btree_cur * |
| 47 | xfs_inobt_dup_cursor( |
| 48 | struct xfs_btree_cur *cur) |
| 49 | { |
| 50 | return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp, |
Brian Foster | 57bd3db | 2014-04-24 16:00:50 +1000 | [diff] [blame] | 51 | cur->bc_private.a.agbp, cur->bc_private.a.agno, |
| 52 | cur->bc_btnum); |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 53 | } |
| 54 | |
Christoph Hellwig | 344207c | 2008-10-30 16:57:16 +1100 | [diff] [blame] | 55 | STATIC void |
| 56 | xfs_inobt_set_root( |
| 57 | struct xfs_btree_cur *cur, |
| 58 | union xfs_btree_ptr *nptr, |
| 59 | int inc) /* level change */ |
| 60 | { |
| 61 | struct xfs_buf *agbp = cur->bc_private.a.agbp; |
| 62 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); |
| 63 | |
| 64 | agi->agi_root = nptr->s; |
| 65 | be32_add_cpu(&agi->agi_level, inc); |
| 66 | xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); |
| 67 | } |
| 68 | |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 69 | STATIC void |
| 70 | xfs_finobt_set_root( |
| 71 | struct xfs_btree_cur *cur, |
| 72 | union xfs_btree_ptr *nptr, |
| 73 | int inc) /* level change */ |
| 74 | { |
| 75 | struct xfs_buf *agbp = cur->bc_private.a.agbp; |
| 76 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); |
| 77 | |
| 78 | agi->agi_free_root = nptr->s; |
| 79 | be32_add_cpu(&agi->agi_free_level, inc); |
| 80 | xfs_ialloc_log_agi(cur->bc_tp, agbp, |
| 81 | XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL); |
| 82 | } |
| 83 | |
Christoph Hellwig | ce5e42d | 2008-10-30 16:55:23 +1100 | [diff] [blame] | 84 | STATIC int |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 85 | xfs_inobt_alloc_block( |
| 86 | struct xfs_btree_cur *cur, |
| 87 | union xfs_btree_ptr *start, |
| 88 | union xfs_btree_ptr *new, |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 89 | int *stat) |
| 90 | { |
| 91 | xfs_alloc_arg_t args; /* block allocation args */ |
| 92 | int error; /* error return value */ |
| 93 | xfs_agblock_t sbno = be32_to_cpu(start->s); |
| 94 | |
| 95 | XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); |
| 96 | |
| 97 | memset(&args, 0, sizeof(args)); |
| 98 | args.tp = cur->bc_tp; |
| 99 | args.mp = cur->bc_mp; |
Darrick J. Wong | 340785c | 2016-08-03 11:33:42 +1000 | [diff] [blame] | 100 | xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INOBT); |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 101 | args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno); |
| 102 | args.minlen = 1; |
| 103 | args.maxlen = 1; |
| 104 | args.prod = 1; |
| 105 | args.type = XFS_ALLOCTYPE_NEAR_BNO; |
| 106 | |
| 107 | error = xfs_alloc_vextent(&args); |
| 108 | if (error) { |
| 109 | XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); |
| 110 | return error; |
| 111 | } |
| 112 | if (args.fsbno == NULLFSBLOCK) { |
| 113 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); |
| 114 | *stat = 0; |
| 115 | return 0; |
| 116 | } |
| 117 | ASSERT(args.len == 1); |
| 118 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); |
| 119 | |
| 120 | new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno)); |
| 121 | *stat = 1; |
| 122 | return 0; |
| 123 | } |
| 124 | |
Christoph Hellwig | d4b3a4b | 2008-10-30 16:57:51 +1100 | [diff] [blame] | 125 | STATIC int |
| 126 | xfs_inobt_free_block( |
| 127 | struct xfs_btree_cur *cur, |
| 128 | struct xfs_buf *bp) |
| 129 | { |
Darrick J. Wong | 340785c | 2016-08-03 11:33:42 +1000 | [diff] [blame] | 130 | struct xfs_owner_info oinfo; |
| 131 | |
| 132 | xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); |
Christoph Hellwig | edfd9dd | 2016-02-08 14:58:07 +1100 | [diff] [blame] | 133 | return xfs_free_extent(cur->bc_tp, |
Darrick J. Wong | 340785c | 2016-08-03 11:33:42 +1000 | [diff] [blame] | 134 | XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1, |
| 135 | &oinfo); |
Christoph Hellwig | d4b3a4b | 2008-10-30 16:57:51 +1100 | [diff] [blame] | 136 | } |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 137 | |
| 138 | STATIC int |
Christoph Hellwig | ce5e42d | 2008-10-30 16:55:23 +1100 | [diff] [blame] | 139 | xfs_inobt_get_maxrecs( |
| 140 | struct xfs_btree_cur *cur, |
| 141 | int level) |
| 142 | { |
| 143 | return cur->bc_mp->m_inobt_mxr[level != 0]; |
| 144 | } |
| 145 | |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 146 | STATIC void |
| 147 | xfs_inobt_init_key_from_rec( |
| 148 | union xfs_btree_key *key, |
| 149 | union xfs_btree_rec *rec) |
| 150 | { |
| 151 | key->inobt.ir_startino = rec->inobt.ir_startino; |
| 152 | } |
| 153 | |
Christoph Hellwig | 4b22a57 | 2008-10-30 16:57:40 +1100 | [diff] [blame] | 154 | STATIC void |
Christoph Hellwig | 4b22a57 | 2008-10-30 16:57:40 +1100 | [diff] [blame] | 155 | xfs_inobt_init_rec_from_cur( |
| 156 | struct xfs_btree_cur *cur, |
| 157 | union xfs_btree_rec *rec) |
| 158 | { |
| 159 | rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino); |
Brian Foster | 5419040 | 2015-05-29 09:03:04 +1000 | [diff] [blame] | 160 | if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) { |
| 161 | rec->inobt.ir_u.sp.ir_holemask = |
| 162 | cpu_to_be16(cur->bc_rec.i.ir_holemask); |
| 163 | rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count; |
| 164 | rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount; |
| 165 | } else { |
| 166 | /* ir_holemask/ir_count not supported on-disk */ |
| 167 | rec->inobt.ir_u.f.ir_freecount = |
| 168 | cpu_to_be32(cur->bc_rec.i.ir_freecount); |
| 169 | } |
Christoph Hellwig | 4b22a57 | 2008-10-30 16:57:40 +1100 | [diff] [blame] | 170 | rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free); |
| 171 | } |
| 172 | |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 173 | /* |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 174 | * initial value of ptr for lookup |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 175 | */ |
| 176 | STATIC void |
| 177 | xfs_inobt_init_ptr_from_cur( |
| 178 | struct xfs_btree_cur *cur, |
| 179 | union xfs_btree_ptr *ptr) |
| 180 | { |
| 181 | struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); |
| 182 | |
| 183 | ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); |
| 184 | |
| 185 | ptr->s = agi->agi_root; |
| 186 | } |
| 187 | |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 188 | STATIC void |
| 189 | xfs_finobt_init_ptr_from_cur( |
| 190 | struct xfs_btree_cur *cur, |
| 191 | union xfs_btree_ptr *ptr) |
| 192 | { |
| 193 | struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); |
| 194 | |
| 195 | ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); |
| 196 | ptr->s = agi->agi_free_root; |
| 197 | } |
| 198 | |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 199 | STATIC __int64_t |
| 200 | xfs_inobt_key_diff( |
| 201 | struct xfs_btree_cur *cur, |
| 202 | union xfs_btree_key *key) |
| 203 | { |
| 204 | return (__int64_t)be32_to_cpu(key->inobt.ir_startino) - |
| 205 | cur->bc_rec.i.ir_startino; |
| 206 | } |
| 207 | |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 208 | static int |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 209 | xfs_inobt_verify( |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 210 | struct xfs_buf *bp) |
| 211 | { |
| 212 | struct xfs_mount *mp = bp->b_target->bt_mount; |
| 213 | struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); |
| 214 | unsigned int level; |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 215 | |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 216 | /* |
| 217 | * During growfs operations, we can't verify the exact owner as the |
| 218 | * perag is not fully initialised and hence not attached to the buffer. |
| 219 | * |
| 220 | * Similarly, during log recovery we will have a perag structure |
| 221 | * attached, but the agi information will not yet have been initialised |
| 222 | * from the on disk AGI. We don't currently use any of this information, |
| 223 | * but beware of the landmine (i.e. need to check pag->pagi_init) if we |
| 224 | * ever do. |
| 225 | */ |
| 226 | switch (block->bb_magic) { |
| 227 | case cpu_to_be32(XFS_IBT_CRC_MAGIC): |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 228 | case cpu_to_be32(XFS_FIBT_CRC_MAGIC): |
Darrick J. Wong | c5ab131 | 2016-01-04 16:13:21 +1100 | [diff] [blame] | 229 | if (!xfs_btree_sblock_v5hdr_verify(bp)) |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 230 | return false; |
| 231 | /* fall through */ |
| 232 | case cpu_to_be32(XFS_IBT_MAGIC): |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 233 | case cpu_to_be32(XFS_FIBT_MAGIC): |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 234 | break; |
| 235 | default: |
| 236 | return 0; |
| 237 | } |
| 238 | |
Darrick J. Wong | c5ab131 | 2016-01-04 16:13:21 +1100 | [diff] [blame] | 239 | /* level verification */ |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 240 | level = be16_to_cpu(block->bb_level); |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 241 | if (level >= mp->m_in_maxlevels) |
| 242 | return false; |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 243 | |
Darrick J. Wong | c5ab131 | 2016-01-04 16:13:21 +1100 | [diff] [blame] | 244 | return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]); |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 245 | } |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 246 | |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 247 | static void |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 248 | xfs_inobt_read_verify( |
| 249 | struct xfs_buf *bp) |
| 250 | { |
Eric Sandeen | ce5028c | 2014-02-27 15:23:10 +1100 | [diff] [blame] | 251 | if (!xfs_btree_sblock_verify_crc(bp)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 252 | xfs_buf_ioerror(bp, -EFSBADCRC); |
Eric Sandeen | ce5028c | 2014-02-27 15:23:10 +1100 | [diff] [blame] | 253 | else if (!xfs_inobt_verify(bp)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 254 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
Eric Sandeen | ce5028c | 2014-02-27 15:23:10 +1100 | [diff] [blame] | 255 | |
| 256 | if (bp->b_error) { |
| 257 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
| 258 | xfs_verifier_error(bp); |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 259 | } |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 260 | } |
| 261 | |
| 262 | static void |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 263 | xfs_inobt_write_verify( |
| 264 | struct xfs_buf *bp) |
| 265 | { |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 266 | if (!xfs_inobt_verify(bp)) { |
| 267 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 268 | xfs_buf_ioerror(bp, -EFSCORRUPTED); |
Eric Sandeen | ce5028c | 2014-02-27 15:23:10 +1100 | [diff] [blame] | 269 | xfs_verifier_error(bp); |
Eric Sandeen | e0d2c23 | 2014-02-27 15:14:31 +1100 | [diff] [blame] | 270 | return; |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 271 | } |
| 272 | xfs_btree_sblock_calc_crc(bp); |
| 273 | |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 274 | } |
| 275 | |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 276 | const struct xfs_buf_ops xfs_inobt_buf_ops = { |
Eric Sandeen | 233135b | 2016-01-04 16:10:19 +1100 | [diff] [blame] | 277 | .name = "xfs_inobt", |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 278 | .verify_read = xfs_inobt_read_verify, |
| 279 | .verify_write = xfs_inobt_write_verify, |
| 280 | }; |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 281 | |
Dave Chinner | 742ae1e | 2013-04-30 21:39:34 +1000 | [diff] [blame] | 282 | #if defined(DEBUG) || defined(XFS_WARN) |
Christoph Hellwig | 4a26e66 | 2008-10-30 16:58:32 +1100 | [diff] [blame] | 283 | STATIC int |
| 284 | xfs_inobt_keys_inorder( |
| 285 | struct xfs_btree_cur *cur, |
| 286 | union xfs_btree_key *k1, |
| 287 | union xfs_btree_key *k2) |
| 288 | { |
| 289 | return be32_to_cpu(k1->inobt.ir_startino) < |
| 290 | be32_to_cpu(k2->inobt.ir_startino); |
| 291 | } |
| 292 | |
| 293 | STATIC int |
| 294 | xfs_inobt_recs_inorder( |
| 295 | struct xfs_btree_cur *cur, |
| 296 | union xfs_btree_rec *r1, |
| 297 | union xfs_btree_rec *r2) |
| 298 | { |
| 299 | return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <= |
| 300 | be32_to_cpu(r2->inobt.ir_startino); |
| 301 | } |
| 302 | #endif /* DEBUG */ |
| 303 | |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 304 | static const struct xfs_btree_ops xfs_inobt_ops = { |
Christoph Hellwig | 65f1eae | 2008-10-30 16:55:34 +1100 | [diff] [blame] | 305 | .rec_len = sizeof(xfs_inobt_rec_t), |
| 306 | .key_len = sizeof(xfs_inobt_key_t), |
| 307 | |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 308 | .dup_cursor = xfs_inobt_dup_cursor, |
Christoph Hellwig | 344207c | 2008-10-30 16:57:16 +1100 | [diff] [blame] | 309 | .set_root = xfs_inobt_set_root, |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 310 | .alloc_block = xfs_inobt_alloc_block, |
Christoph Hellwig | d4b3a4b | 2008-10-30 16:57:51 +1100 | [diff] [blame] | 311 | .free_block = xfs_inobt_free_block, |
Christoph Hellwig | 91cca5df | 2008-10-30 16:58:01 +1100 | [diff] [blame] | 312 | .get_minrecs = xfs_inobt_get_minrecs, |
Christoph Hellwig | ce5e42d | 2008-10-30 16:55:23 +1100 | [diff] [blame] | 313 | .get_maxrecs = xfs_inobt_get_maxrecs, |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 314 | .init_key_from_rec = xfs_inobt_init_key_from_rec, |
Christoph Hellwig | 4b22a57 | 2008-10-30 16:57:40 +1100 | [diff] [blame] | 315 | .init_rec_from_cur = xfs_inobt_init_rec_from_cur, |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 316 | .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, |
| 317 | .key_diff = xfs_inobt_key_diff, |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 318 | .buf_ops = &xfs_inobt_buf_ops, |
Dave Chinner | 742ae1e | 2013-04-30 21:39:34 +1000 | [diff] [blame] | 319 | #if defined(DEBUG) || defined(XFS_WARN) |
Christoph Hellwig | 4a26e66 | 2008-10-30 16:58:32 +1100 | [diff] [blame] | 320 | .keys_inorder = xfs_inobt_keys_inorder, |
| 321 | .recs_inorder = xfs_inobt_recs_inorder, |
| 322 | #endif |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 323 | }; |
| 324 | |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 325 | static const struct xfs_btree_ops xfs_finobt_ops = { |
| 326 | .rec_len = sizeof(xfs_inobt_rec_t), |
| 327 | .key_len = sizeof(xfs_inobt_key_t), |
| 328 | |
| 329 | .dup_cursor = xfs_inobt_dup_cursor, |
| 330 | .set_root = xfs_finobt_set_root, |
| 331 | .alloc_block = xfs_inobt_alloc_block, |
| 332 | .free_block = xfs_inobt_free_block, |
| 333 | .get_minrecs = xfs_inobt_get_minrecs, |
| 334 | .get_maxrecs = xfs_inobt_get_maxrecs, |
| 335 | .init_key_from_rec = xfs_inobt_init_key_from_rec, |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 336 | .init_rec_from_cur = xfs_inobt_init_rec_from_cur, |
| 337 | .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur, |
| 338 | .key_diff = xfs_inobt_key_diff, |
| 339 | .buf_ops = &xfs_inobt_buf_ops, |
| 340 | #if defined(DEBUG) || defined(XFS_WARN) |
| 341 | .keys_inorder = xfs_inobt_keys_inorder, |
| 342 | .recs_inorder = xfs_inobt_recs_inorder, |
| 343 | #endif |
| 344 | }; |
| 345 | |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 346 | /* |
| 347 | * Allocate a new inode btree cursor. |
| 348 | */ |
| 349 | struct xfs_btree_cur * /* new inode btree cursor */ |
| 350 | xfs_inobt_init_cursor( |
| 351 | struct xfs_mount *mp, /* file system mount point */ |
| 352 | struct xfs_trans *tp, /* transaction pointer */ |
| 353 | struct xfs_buf *agbp, /* buffer for agi structure */ |
Brian Foster | 57bd3db | 2014-04-24 16:00:50 +1000 | [diff] [blame] | 354 | xfs_agnumber_t agno, /* allocation group number */ |
| 355 | xfs_btnum_t btnum) /* ialloc or free ino btree */ |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 356 | { |
| 357 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); |
| 358 | struct xfs_btree_cur *cur; |
| 359 | |
| 360 | cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); |
| 361 | |
| 362 | cur->bc_tp = tp; |
| 363 | cur->bc_mp = mp; |
Brian Foster | 57bd3db | 2014-04-24 16:00:50 +1000 | [diff] [blame] | 364 | cur->bc_btnum = btnum; |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 365 | if (btnum == XFS_BTNUM_INO) { |
| 366 | cur->bc_nlevels = be32_to_cpu(agi->agi_level); |
| 367 | cur->bc_ops = &xfs_inobt_ops; |
| 368 | } else { |
| 369 | cur->bc_nlevels = be32_to_cpu(agi->agi_free_level); |
| 370 | cur->bc_ops = &xfs_finobt_ops; |
| 371 | } |
| 372 | |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 373 | cur->bc_blocklog = mp->m_sb.sb_blocklog; |
| 374 | |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 375 | if (xfs_sb_version_hascrc(&mp->m_sb)) |
| 376 | cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 377 | |
| 378 | cur->bc_private.a.agbp = agbp; |
| 379 | cur->bc_private.a.agno = agno; |
| 380 | |
| 381 | return cur; |
| 382 | } |
Christoph Hellwig | 60197e8 | 2008-10-30 17:11:19 +1100 | [diff] [blame] | 383 | |
| 384 | /* |
| 385 | * Calculate number of records in an inobt btree block. |
| 386 | */ |
| 387 | int |
| 388 | xfs_inobt_maxrecs( |
| 389 | struct xfs_mount *mp, |
| 390 | int blocklen, |
| 391 | int leaf) |
| 392 | { |
Christoph Hellwig | 7cc95a8 | 2008-10-30 17:14:34 +1100 | [diff] [blame] | 393 | blocklen -= XFS_INOBT_BLOCK_LEN(mp); |
Christoph Hellwig | 60197e8 | 2008-10-30 17:11:19 +1100 | [diff] [blame] | 394 | |
| 395 | if (leaf) |
| 396 | return blocklen / sizeof(xfs_inobt_rec_t); |
| 397 | return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t)); |
| 398 | } |
Brian Foster | 4148c34 | 2015-05-29 09:09:05 +1000 | [diff] [blame] | 399 | |
| 400 | /* |
| 401 | * Convert the inode record holemask to an inode allocation bitmap. The inode |
| 402 | * allocation bitmap is inode granularity and specifies whether an inode is |
| 403 | * physically allocated on disk (not whether the inode is considered allocated |
| 404 | * or free by the fs). |
| 405 | * |
| 406 | * A bit value of 1 means the inode is allocated, a value of 0 means it is free. |
| 407 | */ |
| 408 | uint64_t |
| 409 | xfs_inobt_irec_to_allocmask( |
| 410 | struct xfs_inobt_rec_incore *rec) |
| 411 | { |
| 412 | uint64_t bitmap = 0; |
| 413 | uint64_t inodespbit; |
| 414 | int nextbit; |
| 415 | uint allocbitmap; |
| 416 | |
| 417 | /* |
| 418 | * The holemask has 16-bits for a 64 inode record. Therefore each |
| 419 | * holemask bit represents multiple inodes. Create a mask of bits to set |
| 420 | * in the allocmask for each holemask bit. |
| 421 | */ |
| 422 | inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1; |
| 423 | |
| 424 | /* |
| 425 | * Allocated inodes are represented by 0 bits in holemask. Invert the 0 |
| 426 | * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask |
| 427 | * anything beyond the 16 holemask bits since this casts to a larger |
| 428 | * type. |
| 429 | */ |
| 430 | allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1); |
| 431 | |
| 432 | /* |
| 433 | * allocbitmap is the inverted holemask so every set bit represents |
| 434 | * allocated inodes. To expand from 16-bit holemask granularity to |
| 435 | * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target |
| 436 | * bitmap for every holemask bit. |
| 437 | */ |
| 438 | nextbit = xfs_next_bit(&allocbitmap, 1, 0); |
| 439 | while (nextbit != -1) { |
| 440 | ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY)); |
| 441 | |
| 442 | bitmap |= (inodespbit << |
| 443 | (nextbit * XFS_INODES_PER_HOLEMASK_BIT)); |
| 444 | |
| 445 | nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1); |
| 446 | } |
| 447 | |
| 448 | return bitmap; |
| 449 | } |
Brian Foster | 56d1115 | 2015-05-29 09:18:32 +1000 | [diff] [blame] | 450 | |
| 451 | #if defined(DEBUG) || defined(XFS_WARN) |
| 452 | /* |
| 453 | * Verify that an in-core inode record has a valid inode count. |
| 454 | */ |
| 455 | int |
| 456 | xfs_inobt_rec_check_count( |
| 457 | struct xfs_mount *mp, |
| 458 | struct xfs_inobt_rec_incore *rec) |
| 459 | { |
| 460 | int inocount = 0; |
| 461 | int nextbit = 0; |
| 462 | uint64_t allocbmap; |
| 463 | int wordsz; |
| 464 | |
| 465 | wordsz = sizeof(allocbmap) / sizeof(unsigned int); |
| 466 | allocbmap = xfs_inobt_irec_to_allocmask(rec); |
| 467 | |
| 468 | nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit); |
| 469 | while (nextbit != -1) { |
| 470 | inocount++; |
| 471 | nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, |
| 472 | nextbit + 1); |
| 473 | } |
| 474 | |
| 475 | if (inocount != rec->ir_count) |
| 476 | return -EFSCORRUPTED; |
| 477 | |
| 478 | return 0; |
| 479 | } |
| 480 | #endif /* DEBUG */ |