Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include "xfs.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 7 | #include "xfs_fs.h" |
Dave Chinner | 632b89e | 2013-10-29 22:11:58 +1100 | [diff] [blame] | 8 | #include "xfs_shared.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 9 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 10 | #include "xfs_log_format.h" |
| 11 | #include "xfs_trans_resv.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 12 | #include "xfs_bit.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include "xfs_mount.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 14 | #include "xfs_inode.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include "xfs_btree.h" |
| 16 | #include "xfs_ialloc.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 17 | #include "xfs_ialloc_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs_alloc.h" |
| 19 | #include "xfs_error.h" |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 20 | #include "xfs_trace.h" |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 21 | #include "xfs_cksum.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 22 | #include "xfs_trans.h" |
Darrick J. Wong | 340785c | 2016-08-03 11:33:42 +1000 | [diff] [blame] | 23 | #include "xfs_rmap.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Christoph Hellwig | 91cca5df | 2008-10-30 16:58:01 +1100 | [diff] [blame] | 26 | STATIC int |
| 27 | xfs_inobt_get_minrecs( |
| 28 | struct xfs_btree_cur *cur, |
| 29 | int level) |
| 30 | { |
| 31 | return cur->bc_mp->m_inobt_mnr[level != 0]; |
| 32 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 34 | STATIC struct xfs_btree_cur * |
| 35 | xfs_inobt_dup_cursor( |
| 36 | struct xfs_btree_cur *cur) |
| 37 | { |
| 38 | return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp, |
Brian Foster | 57bd3db | 2014-04-24 16:00:50 +1000 | [diff] [blame] | 39 | cur->bc_private.a.agbp, cur->bc_private.a.agno, |
| 40 | cur->bc_btnum); |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 41 | } |
| 42 | |
Christoph Hellwig | 344207c | 2008-10-30 16:57:16 +1100 | [diff] [blame] | 43 | STATIC void |
| 44 | xfs_inobt_set_root( |
| 45 | struct xfs_btree_cur *cur, |
| 46 | union xfs_btree_ptr *nptr, |
| 47 | int inc) /* level change */ |
| 48 | { |
| 49 | struct xfs_buf *agbp = cur->bc_private.a.agbp; |
| 50 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); |
| 51 | |
| 52 | agi->agi_root = nptr->s; |
| 53 | be32_add_cpu(&agi->agi_level, inc); |
| 54 | xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); |
| 55 | } |
| 56 | |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 57 | STATIC void |
| 58 | xfs_finobt_set_root( |
| 59 | struct xfs_btree_cur *cur, |
| 60 | union xfs_btree_ptr *nptr, |
| 61 | int inc) /* level change */ |
| 62 | { |
| 63 | struct xfs_buf *agbp = cur->bc_private.a.agbp; |
| 64 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); |
| 65 | |
| 66 | agi->agi_free_root = nptr->s; |
| 67 | be32_add_cpu(&agi->agi_free_level, inc); |
| 68 | xfs_ialloc_log_agi(cur->bc_tp, agbp, |
| 69 | XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL); |
| 70 | } |
| 71 | |
Christoph Hellwig | ce5e42d | 2008-10-30 16:55:23 +1100 | [diff] [blame] | 72 | STATIC int |
Christoph Hellwig | 76d771b | 2017-01-25 07:49:35 -0800 | [diff] [blame] | 73 | __xfs_inobt_alloc_block( |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 74 | struct xfs_btree_cur *cur, |
| 75 | union xfs_btree_ptr *start, |
| 76 | union xfs_btree_ptr *new, |
Christoph Hellwig | 76d771b | 2017-01-25 07:49:35 -0800 | [diff] [blame] | 77 | int *stat, |
| 78 | enum xfs_ag_resv_type resv) |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 79 | { |
| 80 | xfs_alloc_arg_t args; /* block allocation args */ |
| 81 | int error; /* error return value */ |
| 82 | xfs_agblock_t sbno = be32_to_cpu(start->s); |
| 83 | |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 84 | memset(&args, 0, sizeof(args)); |
| 85 | args.tp = cur->bc_tp; |
| 86 | args.mp = cur->bc_mp; |
Darrick J. Wong | 340785c | 2016-08-03 11:33:42 +1000 | [diff] [blame] | 87 | xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INOBT); |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 88 | args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno); |
| 89 | args.minlen = 1; |
| 90 | args.maxlen = 1; |
| 91 | args.prod = 1; |
| 92 | args.type = XFS_ALLOCTYPE_NEAR_BNO; |
Christoph Hellwig | 76d771b | 2017-01-25 07:49:35 -0800 | [diff] [blame] | 93 | args.resv = resv; |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 94 | |
| 95 | error = xfs_alloc_vextent(&args); |
Carlos Maiolino | e157ebd | 2018-03-06 17:03:30 -0800 | [diff] [blame] | 96 | if (error) |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 97 | return error; |
Carlos Maiolino | e157ebd | 2018-03-06 17:03:30 -0800 | [diff] [blame] | 98 | |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 99 | if (args.fsbno == NULLFSBLOCK) { |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 100 | *stat = 0; |
| 101 | return 0; |
| 102 | } |
| 103 | ASSERT(args.len == 1); |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 104 | |
| 105 | new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno)); |
| 106 | *stat = 1; |
| 107 | return 0; |
| 108 | } |
| 109 | |
Christoph Hellwig | d4b3a4b | 2008-10-30 16:57:51 +1100 | [diff] [blame] | 110 | STATIC int |
Christoph Hellwig | 76d771b | 2017-01-25 07:49:35 -0800 | [diff] [blame] | 111 | xfs_inobt_alloc_block( |
| 112 | struct xfs_btree_cur *cur, |
| 113 | union xfs_btree_ptr *start, |
| 114 | union xfs_btree_ptr *new, |
| 115 | int *stat) |
| 116 | { |
| 117 | return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE); |
| 118 | } |
| 119 | |
| 120 | STATIC int |
| 121 | xfs_finobt_alloc_block( |
| 122 | struct xfs_btree_cur *cur, |
| 123 | union xfs_btree_ptr *start, |
| 124 | union xfs_btree_ptr *new, |
| 125 | int *stat) |
| 126 | { |
Brian Foster | ad90bb5 | 2018-01-12 14:07:21 -0800 | [diff] [blame] | 127 | if (cur->bc_mp->m_inotbt_nores) |
| 128 | return xfs_inobt_alloc_block(cur, start, new, stat); |
Christoph Hellwig | 76d771b | 2017-01-25 07:49:35 -0800 | [diff] [blame] | 129 | return __xfs_inobt_alloc_block(cur, start, new, stat, |
| 130 | XFS_AG_RESV_METADATA); |
| 131 | } |
| 132 | |
| 133 | STATIC int |
Brian Foster | ad90bb5 | 2018-01-12 14:07:21 -0800 | [diff] [blame] | 134 | __xfs_inobt_free_block( |
| 135 | struct xfs_btree_cur *cur, |
| 136 | struct xfs_buf *bp, |
| 137 | enum xfs_ag_resv_type resv) |
| 138 | { |
| 139 | struct xfs_owner_info oinfo; |
| 140 | |
| 141 | xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); |
| 142 | return xfs_free_extent(cur->bc_tp, |
| 143 | XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1, |
| 144 | &oinfo, resv); |
| 145 | } |
| 146 | |
| 147 | STATIC int |
Christoph Hellwig | d4b3a4b | 2008-10-30 16:57:51 +1100 | [diff] [blame] | 148 | xfs_inobt_free_block( |
| 149 | struct xfs_btree_cur *cur, |
| 150 | struct xfs_buf *bp) |
| 151 | { |
Brian Foster | ad90bb5 | 2018-01-12 14:07:21 -0800 | [diff] [blame] | 152 | return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE); |
| 153 | } |
Darrick J. Wong | 340785c | 2016-08-03 11:33:42 +1000 | [diff] [blame] | 154 | |
Brian Foster | ad90bb5 | 2018-01-12 14:07:21 -0800 | [diff] [blame] | 155 | STATIC int |
| 156 | xfs_finobt_free_block( |
| 157 | struct xfs_btree_cur *cur, |
| 158 | struct xfs_buf *bp) |
| 159 | { |
| 160 | if (cur->bc_mp->m_inotbt_nores) |
| 161 | return xfs_inobt_free_block(cur, bp); |
| 162 | return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA); |
Christoph Hellwig | d4b3a4b | 2008-10-30 16:57:51 +1100 | [diff] [blame] | 163 | } |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 164 | |
| 165 | STATIC int |
Christoph Hellwig | ce5e42d | 2008-10-30 16:55:23 +1100 | [diff] [blame] | 166 | xfs_inobt_get_maxrecs( |
| 167 | struct xfs_btree_cur *cur, |
| 168 | int level) |
| 169 | { |
| 170 | return cur->bc_mp->m_inobt_mxr[level != 0]; |
| 171 | } |
| 172 | |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 173 | STATIC void |
| 174 | xfs_inobt_init_key_from_rec( |
| 175 | union xfs_btree_key *key, |
| 176 | union xfs_btree_rec *rec) |
| 177 | { |
| 178 | key->inobt.ir_startino = rec->inobt.ir_startino; |
| 179 | } |
| 180 | |
Christoph Hellwig | 4b22a57 | 2008-10-30 16:57:40 +1100 | [diff] [blame] | 181 | STATIC void |
Darrick J. Wong | 118bb47 | 2017-06-16 11:00:08 -0700 | [diff] [blame] | 182 | xfs_inobt_init_high_key_from_rec( |
| 183 | union xfs_btree_key *key, |
| 184 | union xfs_btree_rec *rec) |
| 185 | { |
| 186 | __u32 x; |
| 187 | |
| 188 | x = be32_to_cpu(rec->inobt.ir_startino); |
| 189 | x += XFS_INODES_PER_CHUNK - 1; |
| 190 | key->inobt.ir_startino = cpu_to_be32(x); |
| 191 | } |
| 192 | |
| 193 | STATIC void |
Christoph Hellwig | 4b22a57 | 2008-10-30 16:57:40 +1100 | [diff] [blame] | 194 | xfs_inobt_init_rec_from_cur( |
| 195 | struct xfs_btree_cur *cur, |
| 196 | union xfs_btree_rec *rec) |
| 197 | { |
| 198 | rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino); |
Brian Foster | 5419040 | 2015-05-29 09:03:04 +1000 | [diff] [blame] | 199 | if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) { |
| 200 | rec->inobt.ir_u.sp.ir_holemask = |
| 201 | cpu_to_be16(cur->bc_rec.i.ir_holemask); |
| 202 | rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count; |
| 203 | rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount; |
| 204 | } else { |
| 205 | /* ir_holemask/ir_count not supported on-disk */ |
| 206 | rec->inobt.ir_u.f.ir_freecount = |
| 207 | cpu_to_be32(cur->bc_rec.i.ir_freecount); |
| 208 | } |
Christoph Hellwig | 4b22a57 | 2008-10-30 16:57:40 +1100 | [diff] [blame] | 209 | rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free); |
| 210 | } |
| 211 | |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 212 | /* |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 213 | * initial value of ptr for lookup |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 214 | */ |
| 215 | STATIC void |
| 216 | xfs_inobt_init_ptr_from_cur( |
| 217 | struct xfs_btree_cur *cur, |
| 218 | union xfs_btree_ptr *ptr) |
| 219 | { |
| 220 | struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); |
| 221 | |
| 222 | ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); |
| 223 | |
| 224 | ptr->s = agi->agi_root; |
| 225 | } |
| 226 | |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 227 | STATIC void |
| 228 | xfs_finobt_init_ptr_from_cur( |
| 229 | struct xfs_btree_cur *cur, |
| 230 | union xfs_btree_ptr *ptr) |
| 231 | { |
| 232 | struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); |
| 233 | |
| 234 | ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); |
| 235 | ptr->s = agi->agi_free_root; |
| 236 | } |
| 237 | |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 238 | STATIC int64_t |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 239 | xfs_inobt_key_diff( |
| 240 | struct xfs_btree_cur *cur, |
| 241 | union xfs_btree_key *key) |
| 242 | { |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 243 | return (int64_t)be32_to_cpu(key->inobt.ir_startino) - |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 244 | cur->bc_rec.i.ir_startino; |
| 245 | } |
| 246 | |
Darrick J. Wong | 118bb47 | 2017-06-16 11:00:08 -0700 | [diff] [blame] | 247 | STATIC int64_t |
| 248 | xfs_inobt_diff_two_keys( |
| 249 | struct xfs_btree_cur *cur, |
| 250 | union xfs_btree_key *k1, |
| 251 | union xfs_btree_key *k2) |
| 252 | { |
| 253 | return (int64_t)be32_to_cpu(k1->inobt.ir_startino) - |
| 254 | be32_to_cpu(k2->inobt.ir_startino); |
| 255 | } |
| 256 | |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 257 | static xfs_failaddr_t |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 258 | xfs_inobt_verify( |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 259 | struct xfs_buf *bp) |
| 260 | { |
| 261 | struct xfs_mount *mp = bp->b_target->bt_mount; |
| 262 | struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 263 | xfs_failaddr_t fa; |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 264 | unsigned int level; |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 265 | |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 266 | /* |
| 267 | * During growfs operations, we can't verify the exact owner as the |
| 268 | * perag is not fully initialised and hence not attached to the buffer. |
| 269 | * |
| 270 | * Similarly, during log recovery we will have a perag structure |
| 271 | * attached, but the agi information will not yet have been initialised |
| 272 | * from the on disk AGI. We don't currently use any of this information, |
| 273 | * but beware of the landmine (i.e. need to check pag->pagi_init) if we |
| 274 | * ever do. |
| 275 | */ |
| 276 | switch (block->bb_magic) { |
| 277 | case cpu_to_be32(XFS_IBT_CRC_MAGIC): |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 278 | case cpu_to_be32(XFS_FIBT_CRC_MAGIC): |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 279 | fa = xfs_btree_sblock_v5hdr_verify(bp); |
| 280 | if (fa) |
| 281 | return fa; |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 282 | /* fall through */ |
| 283 | case cpu_to_be32(XFS_IBT_MAGIC): |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 284 | case cpu_to_be32(XFS_FIBT_MAGIC): |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 285 | break; |
| 286 | default: |
Darrick J. Wong | 2e050e6 | 2018-05-24 08:54:59 -0700 | [diff] [blame] | 287 | return __this_address; |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 288 | } |
| 289 | |
Darrick J. Wong | c5ab131 | 2016-01-04 16:13:21 +1100 | [diff] [blame] | 290 | /* level verification */ |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 291 | level = be16_to_cpu(block->bb_level); |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 292 | if (level >= mp->m_in_maxlevels) |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 293 | return __this_address; |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 294 | |
Darrick J. Wong | c5ab131 | 2016-01-04 16:13:21 +1100 | [diff] [blame] | 295 | return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]); |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 296 | } |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 297 | |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 298 | static void |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 299 | xfs_inobt_read_verify( |
| 300 | struct xfs_buf *bp) |
| 301 | { |
Darrick J. Wong | bc1a09b | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 302 | xfs_failaddr_t fa; |
| 303 | |
Eric Sandeen | ce5028c | 2014-02-27 15:23:10 +1100 | [diff] [blame] | 304 | if (!xfs_btree_sblock_verify_crc(bp)) |
Darrick J. Wong | bc1a09b | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 305 | xfs_verifier_error(bp, -EFSBADCRC, __this_address); |
| 306 | else { |
| 307 | fa = xfs_inobt_verify(bp); |
| 308 | if (fa) |
| 309 | xfs_verifier_error(bp, -EFSCORRUPTED, fa); |
| 310 | } |
Eric Sandeen | ce5028c | 2014-02-27 15:23:10 +1100 | [diff] [blame] | 311 | |
Darrick J. Wong | 31ca03c | 2018-01-08 10:51:02 -0800 | [diff] [blame] | 312 | if (bp->b_error) |
Eric Sandeen | ce5028c | 2014-02-27 15:23:10 +1100 | [diff] [blame] | 313 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 314 | } |
| 315 | |
| 316 | static void |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 317 | xfs_inobt_write_verify( |
| 318 | struct xfs_buf *bp) |
| 319 | { |
Darrick J. Wong | bc1a09b | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 320 | xfs_failaddr_t fa; |
| 321 | |
| 322 | fa = xfs_inobt_verify(bp); |
| 323 | if (fa) { |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 324 | trace_xfs_btree_corrupt(bp, _RET_IP_); |
Darrick J. Wong | bc1a09b | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 325 | xfs_verifier_error(bp, -EFSCORRUPTED, fa); |
Eric Sandeen | e0d2c23 | 2014-02-27 15:14:31 +1100 | [diff] [blame] | 326 | return; |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 327 | } |
| 328 | xfs_btree_sblock_calc_crc(bp); |
| 329 | |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 330 | } |
| 331 | |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 332 | const struct xfs_buf_ops xfs_inobt_buf_ops = { |
Eric Sandeen | 233135b | 2016-01-04 16:10:19 +1100 | [diff] [blame] | 333 | .name = "xfs_inobt", |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 334 | .verify_read = xfs_inobt_read_verify, |
| 335 | .verify_write = xfs_inobt_write_verify, |
Darrick J. Wong | b557259 | 2018-01-08 10:51:08 -0800 | [diff] [blame] | 336 | .verify_struct = xfs_inobt_verify, |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 337 | }; |
Dave Chinner | 3d3e6f6 | 2012-11-12 22:54:08 +1100 | [diff] [blame] | 338 | |
Christoph Hellwig | 4a26e66 | 2008-10-30 16:58:32 +1100 | [diff] [blame] | 339 | STATIC int |
| 340 | xfs_inobt_keys_inorder( |
| 341 | struct xfs_btree_cur *cur, |
| 342 | union xfs_btree_key *k1, |
| 343 | union xfs_btree_key *k2) |
| 344 | { |
| 345 | return be32_to_cpu(k1->inobt.ir_startino) < |
| 346 | be32_to_cpu(k2->inobt.ir_startino); |
| 347 | } |
| 348 | |
| 349 | STATIC int |
| 350 | xfs_inobt_recs_inorder( |
| 351 | struct xfs_btree_cur *cur, |
| 352 | union xfs_btree_rec *r1, |
| 353 | union xfs_btree_rec *r2) |
| 354 | { |
| 355 | return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <= |
| 356 | be32_to_cpu(r2->inobt.ir_startino); |
| 357 | } |
Christoph Hellwig | 4a26e66 | 2008-10-30 16:58:32 +1100 | [diff] [blame] | 358 | |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 359 | static const struct xfs_btree_ops xfs_inobt_ops = { |
Christoph Hellwig | 65f1eae | 2008-10-30 16:55:34 +1100 | [diff] [blame] | 360 | .rec_len = sizeof(xfs_inobt_rec_t), |
| 361 | .key_len = sizeof(xfs_inobt_key_t), |
| 362 | |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 363 | .dup_cursor = xfs_inobt_dup_cursor, |
Christoph Hellwig | 344207c | 2008-10-30 16:57:16 +1100 | [diff] [blame] | 364 | .set_root = xfs_inobt_set_root, |
Christoph Hellwig | f5eb8e7 | 2008-10-30 16:57:03 +1100 | [diff] [blame] | 365 | .alloc_block = xfs_inobt_alloc_block, |
Christoph Hellwig | d4b3a4b | 2008-10-30 16:57:51 +1100 | [diff] [blame] | 366 | .free_block = xfs_inobt_free_block, |
Christoph Hellwig | 91cca5df | 2008-10-30 16:58:01 +1100 | [diff] [blame] | 367 | .get_minrecs = xfs_inobt_get_minrecs, |
Christoph Hellwig | ce5e42d | 2008-10-30 16:55:23 +1100 | [diff] [blame] | 368 | .get_maxrecs = xfs_inobt_get_maxrecs, |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 369 | .init_key_from_rec = xfs_inobt_init_key_from_rec, |
Darrick J. Wong | 118bb47 | 2017-06-16 11:00:08 -0700 | [diff] [blame] | 370 | .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec, |
Christoph Hellwig | 4b22a57 | 2008-10-30 16:57:40 +1100 | [diff] [blame] | 371 | .init_rec_from_cur = xfs_inobt_init_rec_from_cur, |
Christoph Hellwig | fe033cc | 2008-10-30 16:56:09 +1100 | [diff] [blame] | 372 | .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, |
| 373 | .key_diff = xfs_inobt_key_diff, |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 374 | .buf_ops = &xfs_inobt_buf_ops, |
Darrick J. Wong | 118bb47 | 2017-06-16 11:00:08 -0700 | [diff] [blame] | 375 | .diff_two_keys = xfs_inobt_diff_two_keys, |
Christoph Hellwig | 4a26e66 | 2008-10-30 16:58:32 +1100 | [diff] [blame] | 376 | .keys_inorder = xfs_inobt_keys_inorder, |
| 377 | .recs_inorder = xfs_inobt_recs_inorder, |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 378 | }; |
| 379 | |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 380 | static const struct xfs_btree_ops xfs_finobt_ops = { |
| 381 | .rec_len = sizeof(xfs_inobt_rec_t), |
| 382 | .key_len = sizeof(xfs_inobt_key_t), |
| 383 | |
| 384 | .dup_cursor = xfs_inobt_dup_cursor, |
| 385 | .set_root = xfs_finobt_set_root, |
Christoph Hellwig | 76d771b | 2017-01-25 07:49:35 -0800 | [diff] [blame] | 386 | .alloc_block = xfs_finobt_alloc_block, |
Brian Foster | ad90bb5 | 2018-01-12 14:07:21 -0800 | [diff] [blame] | 387 | .free_block = xfs_finobt_free_block, |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 388 | .get_minrecs = xfs_inobt_get_minrecs, |
| 389 | .get_maxrecs = xfs_inobt_get_maxrecs, |
| 390 | .init_key_from_rec = xfs_inobt_init_key_from_rec, |
Darrick J. Wong | 118bb47 | 2017-06-16 11:00:08 -0700 | [diff] [blame] | 391 | .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec, |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 392 | .init_rec_from_cur = xfs_inobt_init_rec_from_cur, |
| 393 | .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur, |
| 394 | .key_diff = xfs_inobt_key_diff, |
| 395 | .buf_ops = &xfs_inobt_buf_ops, |
Darrick J. Wong | 118bb47 | 2017-06-16 11:00:08 -0700 | [diff] [blame] | 396 | .diff_two_keys = xfs_inobt_diff_two_keys, |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 397 | .keys_inorder = xfs_inobt_keys_inorder, |
| 398 | .recs_inorder = xfs_inobt_recs_inorder, |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 399 | }; |
| 400 | |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 401 | /* |
| 402 | * Allocate a new inode btree cursor. |
| 403 | */ |
| 404 | struct xfs_btree_cur * /* new inode btree cursor */ |
| 405 | xfs_inobt_init_cursor( |
| 406 | struct xfs_mount *mp, /* file system mount point */ |
| 407 | struct xfs_trans *tp, /* transaction pointer */ |
| 408 | struct xfs_buf *agbp, /* buffer for agi structure */ |
Brian Foster | 57bd3db | 2014-04-24 16:00:50 +1000 | [diff] [blame] | 409 | xfs_agnumber_t agno, /* allocation group number */ |
| 410 | xfs_btnum_t btnum) /* ialloc or free ino btree */ |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 411 | { |
| 412 | struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); |
| 413 | struct xfs_btree_cur *cur; |
| 414 | |
Darrick J. Wong | b24a978 | 2016-12-09 16:49:54 +1100 | [diff] [blame] | 415 | cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 416 | |
| 417 | cur->bc_tp = tp; |
| 418 | cur->bc_mp = mp; |
Brian Foster | 57bd3db | 2014-04-24 16:00:50 +1000 | [diff] [blame] | 419 | cur->bc_btnum = btnum; |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 420 | if (btnum == XFS_BTNUM_INO) { |
| 421 | cur->bc_nlevels = be32_to_cpu(agi->agi_level); |
| 422 | cur->bc_ops = &xfs_inobt_ops; |
Dave Chinner | 11ef38a | 2016-12-05 14:38:58 +1100 | [diff] [blame] | 423 | cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2); |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 424 | } else { |
| 425 | cur->bc_nlevels = be32_to_cpu(agi->agi_free_level); |
| 426 | cur->bc_ops = &xfs_finobt_ops; |
Dave Chinner | 11ef38a | 2016-12-05 14:38:58 +1100 | [diff] [blame] | 427 | cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2); |
Brian Foster | aafc3c2 | 2014-04-24 16:00:52 +1000 | [diff] [blame] | 428 | } |
| 429 | |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 430 | cur->bc_blocklog = mp->m_sb.sb_blocklog; |
| 431 | |
Christoph Hellwig | ee1a47a | 2013-04-21 14:53:46 -0500 | [diff] [blame] | 432 | if (xfs_sb_version_hascrc(&mp->m_sb)) |
| 433 | cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; |
Christoph Hellwig | 561f7d1 | 2008-10-30 16:53:59 +1100 | [diff] [blame] | 434 | |
| 435 | cur->bc_private.a.agbp = agbp; |
| 436 | cur->bc_private.a.agno = agno; |
| 437 | |
| 438 | return cur; |
| 439 | } |
Christoph Hellwig | 60197e8 | 2008-10-30 17:11:19 +1100 | [diff] [blame] | 440 | |
| 441 | /* |
| 442 | * Calculate number of records in an inobt btree block. |
| 443 | */ |
| 444 | int |
| 445 | xfs_inobt_maxrecs( |
| 446 | struct xfs_mount *mp, |
| 447 | int blocklen, |
| 448 | int leaf) |
| 449 | { |
Christoph Hellwig | 7cc95a8 | 2008-10-30 17:14:34 +1100 | [diff] [blame] | 450 | blocklen -= XFS_INOBT_BLOCK_LEN(mp); |
Christoph Hellwig | 60197e8 | 2008-10-30 17:11:19 +1100 | [diff] [blame] | 451 | |
| 452 | if (leaf) |
| 453 | return blocklen / sizeof(xfs_inobt_rec_t); |
| 454 | return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t)); |
| 455 | } |
Brian Foster | 4148c34 | 2015-05-29 09:09:05 +1000 | [diff] [blame] | 456 | |
| 457 | /* |
| 458 | * Convert the inode record holemask to an inode allocation bitmap. The inode |
| 459 | * allocation bitmap is inode granularity and specifies whether an inode is |
| 460 | * physically allocated on disk (not whether the inode is considered allocated |
| 461 | * or free by the fs). |
| 462 | * |
| 463 | * A bit value of 1 means the inode is allocated, a value of 0 means it is free. |
| 464 | */ |
| 465 | uint64_t |
| 466 | xfs_inobt_irec_to_allocmask( |
| 467 | struct xfs_inobt_rec_incore *rec) |
| 468 | { |
| 469 | uint64_t bitmap = 0; |
| 470 | uint64_t inodespbit; |
| 471 | int nextbit; |
| 472 | uint allocbitmap; |
| 473 | |
| 474 | /* |
| 475 | * The holemask has 16-bits for a 64 inode record. Therefore each |
| 476 | * holemask bit represents multiple inodes. Create a mask of bits to set |
| 477 | * in the allocmask for each holemask bit. |
| 478 | */ |
| 479 | inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1; |
| 480 | |
| 481 | /* |
| 482 | * Allocated inodes are represented by 0 bits in holemask. Invert the 0 |
| 483 | * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask |
| 484 | * anything beyond the 16 holemask bits since this casts to a larger |
| 485 | * type. |
| 486 | */ |
| 487 | allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1); |
| 488 | |
| 489 | /* |
| 490 | * allocbitmap is the inverted holemask so every set bit represents |
| 491 | * allocated inodes. To expand from 16-bit holemask granularity to |
| 492 | * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target |
| 493 | * bitmap for every holemask bit. |
| 494 | */ |
| 495 | nextbit = xfs_next_bit(&allocbitmap, 1, 0); |
| 496 | while (nextbit != -1) { |
| 497 | ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY)); |
| 498 | |
| 499 | bitmap |= (inodespbit << |
| 500 | (nextbit * XFS_INODES_PER_HOLEMASK_BIT)); |
| 501 | |
| 502 | nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1); |
| 503 | } |
| 504 | |
| 505 | return bitmap; |
| 506 | } |
Brian Foster | 56d1115 | 2015-05-29 09:18:32 +1000 | [diff] [blame] | 507 | |
| 508 | #if defined(DEBUG) || defined(XFS_WARN) |
| 509 | /* |
| 510 | * Verify that an in-core inode record has a valid inode count. |
| 511 | */ |
| 512 | int |
| 513 | xfs_inobt_rec_check_count( |
| 514 | struct xfs_mount *mp, |
| 515 | struct xfs_inobt_rec_incore *rec) |
| 516 | { |
| 517 | int inocount = 0; |
| 518 | int nextbit = 0; |
| 519 | uint64_t allocbmap; |
| 520 | int wordsz; |
| 521 | |
| 522 | wordsz = sizeof(allocbmap) / sizeof(unsigned int); |
| 523 | allocbmap = xfs_inobt_irec_to_allocmask(rec); |
| 524 | |
| 525 | nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit); |
| 526 | while (nextbit != -1) { |
| 527 | inocount++; |
| 528 | nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, |
| 529 | nextbit + 1); |
| 530 | } |
| 531 | |
| 532 | if (inocount != rec->ir_count) |
| 533 | return -EFSCORRUPTED; |
| 534 | |
| 535 | return 0; |
| 536 | } |
| 537 | #endif /* DEBUG */ |
Christoph Hellwig | 76d771b | 2017-01-25 07:49:35 -0800 | [diff] [blame] | 538 | |
| 539 | static xfs_extlen_t |
| 540 | xfs_inobt_max_size( |
| 541 | struct xfs_mount *mp) |
| 542 | { |
| 543 | /* Bail out if we're uninitialized, which can happen in mkfs. */ |
| 544 | if (mp->m_inobt_mxr[0] == 0) |
| 545 | return 0; |
| 546 | |
Eric Sandeen | a1f6941 | 2018-04-06 10:09:42 -0700 | [diff] [blame] | 547 | return xfs_btree_calc_size(mp->m_inobt_mnr, |
Christoph Hellwig | 76d771b | 2017-01-25 07:49:35 -0800 | [diff] [blame] | 548 | (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock / |
| 549 | XFS_INODES_PER_CHUNK); |
| 550 | } |
| 551 | |
| 552 | static int |
| 553 | xfs_inobt_count_blocks( |
| 554 | struct xfs_mount *mp, |
| 555 | xfs_agnumber_t agno, |
| 556 | xfs_btnum_t btnum, |
| 557 | xfs_extlen_t *tree_blocks) |
| 558 | { |
| 559 | struct xfs_buf *agbp; |
| 560 | struct xfs_btree_cur *cur; |
| 561 | int error; |
| 562 | |
| 563 | error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); |
| 564 | if (error) |
| 565 | return error; |
| 566 | |
| 567 | cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum); |
| 568 | error = xfs_btree_count_blocks(cur, tree_blocks); |
Darrick J. Wong | 0b04b6b8 | 2018-07-19 12:26:31 -0700 | [diff] [blame] | 569 | xfs_btree_del_cursor(cur, error); |
Christoph Hellwig | 76d771b | 2017-01-25 07:49:35 -0800 | [diff] [blame] | 570 | xfs_buf_relse(agbp); |
| 571 | |
| 572 | return error; |
| 573 | } |
| 574 | |
| 575 | /* |
| 576 | * Figure out how many blocks to reserve and how many are used by this btree. |
| 577 | */ |
| 578 | int |
| 579 | xfs_finobt_calc_reserves( |
| 580 | struct xfs_mount *mp, |
| 581 | xfs_agnumber_t agno, |
| 582 | xfs_extlen_t *ask, |
| 583 | xfs_extlen_t *used) |
| 584 | { |
| 585 | xfs_extlen_t tree_len = 0; |
| 586 | int error; |
| 587 | |
| 588 | if (!xfs_sb_version_hasfinobt(&mp->m_sb)) |
| 589 | return 0; |
| 590 | |
| 591 | error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len); |
| 592 | if (error) |
| 593 | return error; |
| 594 | |
| 595 | *ask += xfs_inobt_max_size(mp); |
| 596 | *used += tree_len; |
| 597 | return 0; |
| 598 | } |
Darrick J. Wong | 14861c4 | 2018-05-09 10:02:01 -0700 | [diff] [blame] | 599 | |
| 600 | /* Calculate the inobt btree size for some records. */ |
| 601 | xfs_extlen_t |
| 602 | xfs_iallocbt_calc_size( |
| 603 | struct xfs_mount *mp, |
| 604 | unsigned long long len) |
| 605 | { |
| 606 | return xfs_btree_calc_size(mp->m_inobt_mnr, len); |
| 607 | } |