Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/fs/hfsplus/btree.c |
| 4 | * |
| 5 | * Copyright (C) 2001 |
| 6 | * Brad Boyer (flar@allandria.com) |
| 7 | * (C) 2003 Ardis Technologies <roman@ardistech.com> |
| 8 | * |
| 9 | * Handle opening/closing btree |
| 10 | */ |
| 11 | |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/pagemap.h> |
Vignesh Babu BM | e1b5c1d | 2007-05-08 00:24:30 -0700 | [diff] [blame] | 14 | #include <linux/log2.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
| 16 | #include "hfsplus_fs.h" |
| 17 | #include "hfsplus_raw.h" |
| 18 | |
Vyacheslav Dubeyko | b3b5b0f | 2013-11-12 15:11:07 -0800 | [diff] [blame] | 19 | /* |
| 20 | * Initial source code of clump size calculation is gotten |
| 21 | * from http://opensource.apple.com/tarballs/diskdev_cmds/ |
| 22 | */ |
| 23 | #define CLUMP_ENTRIES 15 |
| 24 | |
| 25 | static short clumptbl[CLUMP_ENTRIES * 3] = { |
| 26 | /* |
| 27 | * Volume Attributes Catalog Extents |
| 28 | * Size Clump (MB) Clump (MB) Clump (MB) |
| 29 | */ |
| 30 | /* 1GB */ 4, 4, 4, |
| 31 | /* 2GB */ 6, 6, 4, |
| 32 | /* 4GB */ 8, 8, 4, |
| 33 | /* 8GB */ 11, 11, 5, |
| 34 | /* |
| 35 | * For volumes 16GB and larger, we want to make sure that a full OS |
| 36 | * install won't require fragmentation of the Catalog or Attributes |
| 37 | * B-trees. We do this by making the clump sizes sufficiently large, |
| 38 | * and by leaving a gap after the B-trees for them to grow into. |
| 39 | * |
| 40 | * For SnowLeopard 10A298, a FullNetInstall with all packages selected |
| 41 | * results in: |
| 42 | * Catalog B-tree Header |
| 43 | * nodeSize: 8192 |
| 44 | * totalNodes: 31616 |
| 45 | * freeNodes: 1978 |
| 46 | * (used = 231.55 MB) |
| 47 | * Attributes B-tree Header |
| 48 | * nodeSize: 8192 |
| 49 | * totalNodes: 63232 |
| 50 | * freeNodes: 958 |
| 51 | * (used = 486.52 MB) |
| 52 | * |
| 53 | * We also want Time Machine backup volumes to have a sufficiently |
| 54 | * large clump size to reduce fragmentation. |
| 55 | * |
| 56 | * The series of numbers for Catalog and Attribute form a geometric |
| 57 | * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times |
| 58 | * the previous term. For Attributes (16GB to 512GB), each term is |
| 59 | * 4**(1/5) times the previous term. For 1TB to 16TB, each term is |
| 60 | * 2**(1/5) times the previous term. |
| 61 | */ |
| 62 | /* 16GB */ 64, 32, 5, |
| 63 | /* 32GB */ 84, 49, 6, |
| 64 | /* 64GB */ 111, 74, 7, |
| 65 | /* 128GB */ 147, 111, 8, |
| 66 | /* 256GB */ 194, 169, 9, |
| 67 | /* 512GB */ 256, 256, 11, |
| 68 | /* 1TB */ 294, 294, 14, |
| 69 | /* 2TB */ 338, 338, 16, |
| 70 | /* 4TB */ 388, 388, 20, |
| 71 | /* 8TB */ 446, 446, 25, |
| 72 | /* 16TB */ 512, 512, 32 |
| 73 | }; |
| 74 | |
| 75 | u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, |
| 76 | u64 sectors, int file_id) |
| 77 | { |
| 78 | u32 mod = max(node_size, block_size); |
| 79 | u32 clump_size; |
| 80 | int column; |
| 81 | int i; |
| 82 | |
| 83 | /* Figure out which column of the above table to use for this file. */ |
| 84 | switch (file_id) { |
| 85 | case HFSPLUS_ATTR_CNID: |
| 86 | column = 0; |
| 87 | break; |
| 88 | case HFSPLUS_CAT_CNID: |
| 89 | column = 1; |
| 90 | break; |
| 91 | default: |
| 92 | column = 2; |
| 93 | break; |
| 94 | } |
| 95 | |
| 96 | /* |
| 97 | * The default clump size is 0.8% of the volume size. And |
| 98 | * it must also be a multiple of the node and block size. |
| 99 | */ |
| 100 | if (sectors < 0x200000) { |
| 101 | clump_size = sectors << 2; /* 0.8 % */ |
| 102 | if (clump_size < (8 * node_size)) |
| 103 | clump_size = 8 * node_size; |
| 104 | } else { |
| 105 | /* turn exponent into table index... */ |
| 106 | for (i = 0, sectors = sectors >> 22; |
| 107 | sectors && (i < CLUMP_ENTRIES - 1); |
| 108 | ++i, sectors = sectors >> 1) { |
| 109 | /* empty body */ |
| 110 | } |
| 111 | |
| 112 | clump_size = clumptbl[column + (i) * 3] * 1024 * 1024; |
| 113 | } |
| 114 | |
| 115 | /* |
| 116 | * Round the clump size to a multiple of node and block size. |
| 117 | * NOTE: This rounds down. |
| 118 | */ |
| 119 | clump_size /= mod; |
| 120 | clump_size *= mod; |
| 121 | |
| 122 | /* |
| 123 | * Rounding down could have rounded down to 0 if the block size was |
| 124 | * greater than the clump size. If so, just use one block or node. |
| 125 | */ |
| 126 | if (clump_size == 0) |
| 127 | clump_size = mod; |
| 128 | |
| 129 | return clump_size; |
| 130 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
| 132 | /* Get a reference to a B*Tree and do some initial checks */ |
| 133 | struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) |
| 134 | { |
| 135 | struct hfs_btree *tree; |
| 136 | struct hfs_btree_header_rec *head; |
| 137 | struct address_space *mapping; |
David Howells | 6352539 | 2008-02-07 00:15:40 -0800 | [diff] [blame] | 138 | struct inode *inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | struct page *page; |
| 140 | unsigned int size; |
| 141 | |
Panagiotis Issaris | f8314dc | 2006-09-27 01:49:37 -0700 | [diff] [blame] | 142 | tree = kzalloc(sizeof(*tree), GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | if (!tree) |
| 144 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
Thomas Gleixner | 467c3d9 | 2010-10-01 05:46:52 +0200 | [diff] [blame] | 146 | mutex_init(&tree->tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | spin_lock_init(&tree->hash_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | tree->sb = sb; |
| 149 | tree->cnid = id; |
David Howells | 6352539 | 2008-02-07 00:15:40 -0800 | [diff] [blame] | 150 | inode = hfsplus_iget(sb, id); |
| 151 | if (IS_ERR(inode)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | goto free_tree; |
David Howells | 6352539 | 2008-02-07 00:15:40 -0800 | [diff] [blame] | 153 | tree->inode = inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | |
Jeff Mahoney | ee52716 | 2010-10-14 09:53:37 -0400 | [diff] [blame] | 155 | if (!HFSPLUS_I(tree->inode)->first_blocks) { |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 156 | pr_err("invalid btree extent records (0 size)\n"); |
Jeff Mahoney | ee52716 | 2010-10-14 09:53:37 -0400 | [diff] [blame] | 157 | goto free_inode; |
| 158 | } |
| 159 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | mapping = tree->inode->i_mapping; |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 161 | page = read_mapping_page(mapping, 0, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | if (IS_ERR(page)) |
Jeff Mahoney | ee52716 | 2010-10-14 09:53:37 -0400 | [diff] [blame] | 163 | goto free_inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
| 165 | /* Load the header */ |
Anton Salikhmetov | 2753cc2 | 2010-12-16 18:08:38 +0200 | [diff] [blame] | 166 | head = (struct hfs_btree_header_rec *)(kmap(page) + |
| 167 | sizeof(struct hfs_bnode_desc)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | tree->root = be32_to_cpu(head->root); |
| 169 | tree->leaf_count = be32_to_cpu(head->leaf_count); |
| 170 | tree->leaf_head = be32_to_cpu(head->leaf_head); |
| 171 | tree->leaf_tail = be32_to_cpu(head->leaf_tail); |
| 172 | tree->node_count = be32_to_cpu(head->node_count); |
| 173 | tree->free_nodes = be32_to_cpu(head->free_nodes); |
| 174 | tree->attributes = be32_to_cpu(head->attributes); |
| 175 | tree->node_size = be16_to_cpu(head->node_size); |
| 176 | tree->max_key_len = be16_to_cpu(head->max_key_len); |
| 177 | tree->depth = be16_to_cpu(head->depth); |
| 178 | |
Eric Sandeen | 9250f92 | 2010-10-14 09:53:48 -0400 | [diff] [blame] | 179 | /* Verify the tree and set the correct compare function */ |
| 180 | switch (id) { |
| 181 | case HFSPLUS_EXT_CNID: |
| 182 | if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) { |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 183 | pr_err("invalid extent max_key_len %d\n", |
Eric Sandeen | 9250f92 | 2010-10-14 09:53:48 -0400 | [diff] [blame] | 184 | tree->max_key_len); |
| 185 | goto fail_page; |
| 186 | } |
Christoph Hellwig | 13571a6 | 2010-10-14 09:54:23 -0400 | [diff] [blame] | 187 | if (tree->attributes & HFS_TREE_VARIDXKEYS) { |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 188 | pr_err("invalid extent btree flag\n"); |
Christoph Hellwig | 13571a6 | 2010-10-14 09:54:23 -0400 | [diff] [blame] | 189 | goto fail_page; |
| 190 | } |
| 191 | |
David Elliott | 2179d37 | 2006-01-18 17:43:08 -0800 | [diff] [blame] | 192 | tree->keycmp = hfsplus_ext_cmp_key; |
Eric Sandeen | 9250f92 | 2010-10-14 09:53:48 -0400 | [diff] [blame] | 193 | break; |
| 194 | case HFSPLUS_CAT_CNID: |
| 195 | if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) { |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 196 | pr_err("invalid catalog max_key_len %d\n", |
Eric Sandeen | 9250f92 | 2010-10-14 09:53:48 -0400 | [diff] [blame] | 197 | tree->max_key_len); |
| 198 | goto fail_page; |
| 199 | } |
Christoph Hellwig | 13571a6 | 2010-10-14 09:54:23 -0400 | [diff] [blame] | 200 | if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 201 | pr_err("invalid catalog btree flag\n"); |
Christoph Hellwig | 13571a6 | 2010-10-14 09:54:23 -0400 | [diff] [blame] | 202 | goto fail_page; |
| 203 | } |
Eric Sandeen | 9250f92 | 2010-10-14 09:53:48 -0400 | [diff] [blame] | 204 | |
Christoph Hellwig | 84adede | 2010-10-01 05:45:20 +0200 | [diff] [blame] | 205 | if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) && |
David Elliott | 2179d37 | 2006-01-18 17:43:08 -0800 | [diff] [blame] | 206 | (head->key_type == HFSPLUS_KEY_BINARY)) |
| 207 | tree->keycmp = hfsplus_cat_bin_cmp_key; |
Duane Griffin | d45bce8 | 2007-07-15 23:41:23 -0700 | [diff] [blame] | 208 | else { |
David Elliott | 2179d37 | 2006-01-18 17:43:08 -0800 | [diff] [blame] | 209 | tree->keycmp = hfsplus_cat_case_cmp_key; |
Christoph Hellwig | 84adede | 2010-10-01 05:45:20 +0200 | [diff] [blame] | 210 | set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags); |
Duane Griffin | d45bce8 | 2007-07-15 23:41:23 -0700 | [diff] [blame] | 211 | } |
Eric Sandeen | 9250f92 | 2010-10-14 09:53:48 -0400 | [diff] [blame] | 212 | break; |
Vyacheslav Dubeyko | 324ef39 | 2013-02-27 17:03:04 -0800 | [diff] [blame] | 213 | case HFSPLUS_ATTR_CNID: |
| 214 | if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) { |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 215 | pr_err("invalid attributes max_key_len %d\n", |
Vyacheslav Dubeyko | 324ef39 | 2013-02-27 17:03:04 -0800 | [diff] [blame] | 216 | tree->max_key_len); |
| 217 | goto fail_page; |
| 218 | } |
| 219 | tree->keycmp = hfsplus_attr_bin_cmp_key; |
| 220 | break; |
Eric Sandeen | 9250f92 | 2010-10-14 09:53:48 -0400 | [diff] [blame] | 221 | default: |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 222 | pr_err("unknown B*Tree requested\n"); |
David Elliott | 2179d37 | 2006-01-18 17:43:08 -0800 | [diff] [blame] | 223 | goto fail_page; |
| 224 | } |
| 225 | |
Christoph Hellwig | 13571a6 | 2010-10-14 09:54:23 -0400 | [diff] [blame] | 226 | if (!(tree->attributes & HFS_TREE_BIGKEYS)) { |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 227 | pr_err("invalid btree flag\n"); |
Christoph Hellwig | 13571a6 | 2010-10-14 09:54:23 -0400 | [diff] [blame] | 228 | goto fail_page; |
| 229 | } |
| 230 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | size = tree->node_size; |
Vignesh Babu BM | e1b5c1d | 2007-05-08 00:24:30 -0700 | [diff] [blame] | 232 | if (!is_power_of_2(size)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | goto fail_page; |
| 234 | if (!tree->node_count) |
| 235 | goto fail_page; |
Eric Sandeen | 9250f92 | 2010-10-14 09:53:48 -0400 | [diff] [blame] | 236 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | tree->node_size_shift = ffs(size) - 1; |
| 238 | |
Anton Salikhmetov | 2753cc2 | 2010-12-16 18:08:38 +0200 | [diff] [blame] | 239 | tree->pages_per_bnode = |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 240 | (tree->node_size + PAGE_SIZE - 1) >> |
| 241 | PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | |
| 243 | kunmap(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 244 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | return tree; |
| 246 | |
| 247 | fail_page: |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 248 | put_page(page); |
Jeff Mahoney | ee52716 | 2010-10-14 09:53:37 -0400 | [diff] [blame] | 249 | free_inode: |
Eric Sandeen | 9250f92 | 2010-10-14 09:53:48 -0400 | [diff] [blame] | 250 | tree->inode->i_mapping->a_ops = &hfsplus_aops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | iput(tree->inode); |
Jeff Mahoney | ee52716 | 2010-10-14 09:53:37 -0400 | [diff] [blame] | 252 | free_tree: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | kfree(tree); |
| 254 | return NULL; |
| 255 | } |
| 256 | |
| 257 | /* Release resources used by a btree */ |
| 258 | void hfs_btree_close(struct hfs_btree *tree) |
| 259 | { |
| 260 | struct hfs_bnode *node; |
| 261 | int i; |
| 262 | |
| 263 | if (!tree) |
| 264 | return; |
| 265 | |
| 266 | for (i = 0; i < NODE_HASH_SIZE; i++) { |
| 267 | while ((node = tree->node_hash[i])) { |
| 268 | tree->node_hash[i] = node->next_hash; |
| 269 | if (atomic_read(&node->refcnt)) |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 270 | pr_crit("node %d:%d " |
Anton Salikhmetov | 2753cc2 | 2010-12-16 18:08:38 +0200 | [diff] [blame] | 271 | "still has %d user(s)!\n", |
| 272 | node->tree->cnid, node->this, |
| 273 | atomic_read(&node->refcnt)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | hfs_bnode_free(node); |
| 275 | tree->node_hash_cnt--; |
| 276 | } |
| 277 | } |
| 278 | iput(tree->inode); |
| 279 | kfree(tree); |
| 280 | } |
| 281 | |
Vyacheslav Dubeyko | 81cc7fa | 2012-12-20 15:05:28 -0800 | [diff] [blame] | 282 | int hfs_btree_write(struct hfs_btree *tree) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | { |
| 284 | struct hfs_btree_header_rec *head; |
| 285 | struct hfs_bnode *node; |
| 286 | struct page *page; |
| 287 | |
| 288 | node = hfs_bnode_find(tree, 0); |
| 289 | if (IS_ERR(node)) |
| 290 | /* panic? */ |
Vyacheslav Dubeyko | 81cc7fa | 2012-12-20 15:05:28 -0800 | [diff] [blame] | 291 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | /* Load the header */ |
| 293 | page = node->page[0]; |
Anton Salikhmetov | 2753cc2 | 2010-12-16 18:08:38 +0200 | [diff] [blame] | 294 | head = (struct hfs_btree_header_rec *)(kmap(page) + |
| 295 | sizeof(struct hfs_bnode_desc)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | |
| 297 | head->root = cpu_to_be32(tree->root); |
| 298 | head->leaf_count = cpu_to_be32(tree->leaf_count); |
| 299 | head->leaf_head = cpu_to_be32(tree->leaf_head); |
| 300 | head->leaf_tail = cpu_to_be32(tree->leaf_tail); |
| 301 | head->node_count = cpu_to_be32(tree->node_count); |
| 302 | head->free_nodes = cpu_to_be32(tree->free_nodes); |
| 303 | head->attributes = cpu_to_be32(tree->attributes); |
| 304 | head->depth = cpu_to_be16(tree->depth); |
| 305 | |
| 306 | kunmap(page); |
| 307 | set_page_dirty(page); |
| 308 | hfs_bnode_put(node); |
Vyacheslav Dubeyko | 81cc7fa | 2012-12-20 15:05:28 -0800 | [diff] [blame] | 309 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | } |
| 311 | |
| 312 | static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) |
| 313 | { |
| 314 | struct hfs_btree *tree = prev->tree; |
| 315 | struct hfs_bnode *node; |
| 316 | struct hfs_bnode_desc desc; |
| 317 | __be32 cnid; |
| 318 | |
| 319 | node = hfs_bnode_create(tree, idx); |
| 320 | if (IS_ERR(node)) |
| 321 | return node; |
| 322 | |
| 323 | tree->free_nodes--; |
| 324 | prev->next = idx; |
| 325 | cnid = cpu_to_be32(idx); |
| 326 | hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4); |
| 327 | |
| 328 | node->type = HFS_NODE_MAP; |
| 329 | node->num_recs = 1; |
| 330 | hfs_bnode_clear(node, 0, tree->node_size); |
| 331 | desc.next = 0; |
| 332 | desc.prev = 0; |
| 333 | desc.type = HFS_NODE_MAP; |
| 334 | desc.height = 0; |
| 335 | desc.num_recs = cpu_to_be16(1); |
| 336 | desc.reserved = 0; |
| 337 | hfs_bnode_write(node, &desc, 0, sizeof(desc)); |
| 338 | hfs_bnode_write_u16(node, 14, 0x8000); |
| 339 | hfs_bnode_write_u16(node, tree->node_size - 2, 14); |
| 340 | hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6); |
| 341 | |
| 342 | return node; |
| 343 | } |
| 344 | |
Ernesto A. Fernández | 0b54b59 | 2018-10-30 15:06:14 -0700 | [diff] [blame] | 345 | /* Make sure @tree has enough space for the @rsvd_nodes */ |
| 346 | int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes) |
| 347 | { |
| 348 | struct inode *inode = tree->inode; |
| 349 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); |
| 350 | u32 count; |
| 351 | int res; |
| 352 | |
| 353 | if (rsvd_nodes <= 0) |
| 354 | return 0; |
| 355 | |
| 356 | while (tree->free_nodes < rsvd_nodes) { |
| 357 | res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree)); |
| 358 | if (res) |
| 359 | return res; |
| 360 | hip->phys_size = inode->i_size = |
| 361 | (loff_t)hip->alloc_blocks << |
| 362 | HFSPLUS_SB(tree->sb)->alloc_blksz_shift; |
| 363 | hip->fs_blocks = |
| 364 | hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift; |
| 365 | inode_set_bytes(inode, inode->i_size); |
| 366 | count = inode->i_size >> tree->node_size_shift; |
| 367 | tree->free_nodes += count - tree->node_count; |
| 368 | tree->node_count = count; |
| 369 | } |
| 370 | return 0; |
| 371 | } |
| 372 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) |
| 374 | { |
| 375 | struct hfs_bnode *node, *next_node; |
| 376 | struct page **pagep; |
| 377 | u32 nidx, idx; |
Andrew Morton | 487798d | 2008-04-30 00:54:54 -0700 | [diff] [blame] | 378 | unsigned off; |
| 379 | u16 off16; |
| 380 | u16 len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | u8 *data, byte, m; |
Ernesto A. Fernández | 0b54b59 | 2018-10-30 15:06:14 -0700 | [diff] [blame] | 382 | int i, res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | |
Ernesto A. Fernández | 0b54b59 | 2018-10-30 15:06:14 -0700 | [diff] [blame] | 384 | res = hfs_bmap_reserve(tree, 1); |
| 385 | if (res) |
| 386 | return ERR_PTR(res); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | |
| 388 | nidx = 0; |
| 389 | node = hfs_bnode_find(tree, nidx); |
| 390 | if (IS_ERR(node)) |
| 391 | return node; |
Andrew Morton | 487798d | 2008-04-30 00:54:54 -0700 | [diff] [blame] | 392 | len = hfs_brec_lenoff(node, 2, &off16); |
| 393 | off = off16; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | |
| 395 | off += node->page_offset; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 396 | pagep = node->page + (off >> PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | data = kmap(*pagep); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 398 | off &= ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | idx = 0; |
| 400 | |
| 401 | for (;;) { |
| 402 | while (len) { |
| 403 | byte = data[off]; |
| 404 | if (byte != 0xff) { |
| 405 | for (m = 0x80, i = 0; i < 8; m >>= 1, i++) { |
| 406 | if (!(byte & m)) { |
| 407 | idx += i; |
| 408 | data[off] |= m; |
| 409 | set_page_dirty(*pagep); |
| 410 | kunmap(*pagep); |
| 411 | tree->free_nodes--; |
| 412 | mark_inode_dirty(tree->inode); |
| 413 | hfs_bnode_put(node); |
Anton Salikhmetov | 2753cc2 | 2010-12-16 18:08:38 +0200 | [diff] [blame] | 414 | return hfs_bnode_create(tree, |
| 415 | idx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | } |
| 417 | } |
| 418 | } |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 419 | if (++off >= PAGE_SIZE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | kunmap(*pagep); |
| 421 | data = kmap(*++pagep); |
| 422 | off = 0; |
| 423 | } |
| 424 | idx += 8; |
| 425 | len--; |
| 426 | } |
| 427 | kunmap(*pagep); |
| 428 | nidx = node->next; |
| 429 | if (!nidx) { |
Joe Perches | c2b3e1f | 2013-04-30 15:27:54 -0700 | [diff] [blame] | 430 | hfs_dbg(BNODE_MOD, "create new bmap node\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | next_node = hfs_bmap_new_bmap(node, idx); |
| 432 | } else |
| 433 | next_node = hfs_bnode_find(tree, nidx); |
| 434 | hfs_bnode_put(node); |
| 435 | if (IS_ERR(next_node)) |
| 436 | return next_node; |
| 437 | node = next_node; |
| 438 | |
Andrew Morton | 487798d | 2008-04-30 00:54:54 -0700 | [diff] [blame] | 439 | len = hfs_brec_lenoff(node, 0, &off16); |
| 440 | off = off16; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | off += node->page_offset; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 442 | pagep = node->page + (off >> PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | data = kmap(*pagep); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 444 | off &= ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | } |
| 446 | } |
| 447 | |
| 448 | void hfs_bmap_free(struct hfs_bnode *node) |
| 449 | { |
| 450 | struct hfs_btree *tree; |
| 451 | struct page *page; |
| 452 | u16 off, len; |
| 453 | u32 nidx; |
| 454 | u8 *data, byte, m; |
| 455 | |
Joe Perches | c2b3e1f | 2013-04-30 15:27:54 -0700 | [diff] [blame] | 456 | hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this); |
Eric Sesterhenn | 0bf3ba5 | 2006-04-01 01:14:43 +0200 | [diff] [blame] | 457 | BUG_ON(!node->this); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | tree = node->tree; |
| 459 | nidx = node->this; |
| 460 | node = hfs_bnode_find(tree, 0); |
| 461 | if (IS_ERR(node)) |
| 462 | return; |
| 463 | len = hfs_brec_lenoff(node, 2, &off); |
| 464 | while (nidx >= len * 8) { |
| 465 | u32 i; |
| 466 | |
| 467 | nidx -= len * 8; |
| 468 | i = node->next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | if (!i) { |
| 470 | /* panic */; |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 471 | pr_crit("unable to free bnode %u. " |
Anton Salikhmetov | 2753cc2 | 2010-12-16 18:08:38 +0200 | [diff] [blame] | 472 | "bmap not found!\n", |
| 473 | node->this); |
Pan Bian | ab31765 | 2018-11-30 14:09:18 -0800 | [diff] [blame] | 474 | hfs_bnode_put(node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | return; |
| 476 | } |
Pan Bian | ab31765 | 2018-11-30 14:09:18 -0800 | [diff] [blame] | 477 | hfs_bnode_put(node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | node = hfs_bnode_find(tree, i); |
| 479 | if (IS_ERR(node)) |
| 480 | return; |
| 481 | if (node->type != HFS_NODE_MAP) { |
| 482 | /* panic */; |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 483 | pr_crit("invalid bmap found! " |
Anton Salikhmetov | 2753cc2 | 2010-12-16 18:08:38 +0200 | [diff] [blame] | 484 | "(%u,%d)\n", |
| 485 | node->this, node->type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | hfs_bnode_put(node); |
| 487 | return; |
| 488 | } |
| 489 | len = hfs_brec_lenoff(node, 0, &off); |
| 490 | } |
| 491 | off += node->page_offset + nidx / 8; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 492 | page = node->page[off >> PAGE_SHIFT]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | data = kmap(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 494 | off &= ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | m = 1 << (~nidx & 7); |
| 496 | byte = data[off]; |
| 497 | if (!(byte & m)) { |
Joe Perches | d614267 | 2013-04-30 15:27:55 -0700 | [diff] [blame] | 498 | pr_crit("trying to free free bnode " |
Anton Salikhmetov | 2753cc2 | 2010-12-16 18:08:38 +0200 | [diff] [blame] | 499 | "%u(%d)\n", |
| 500 | node->this, node->type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | kunmap(page); |
| 502 | hfs_bnode_put(node); |
| 503 | return; |
| 504 | } |
| 505 | data[off] = byte & ~m; |
| 506 | set_page_dirty(page); |
| 507 | kunmap(page); |
| 508 | hfs_bnode_put(node); |
| 509 | tree->free_nodes++; |
| 510 | mark_inode_dirty(tree->inode); |
| 511 | } |