Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/fs/hfsplus/bitmap.c |
| 3 | * |
| 4 | * Copyright (C) 2001 |
| 5 | * Brad Boyer (flar@allandria.com) |
| 6 | * (C) 2003 Ardis Technologies <roman@ardistech.com> |
| 7 | * |
| 8 | * Handling of allocation file |
| 9 | */ |
| 10 | |
| 11 | #include <linux/pagemap.h> |
| 12 | |
| 13 | #include "hfsplus_fs.h" |
| 14 | #include "hfsplus_raw.h" |
| 15 | |
| 16 | #define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8) |
| 17 | |
Anton Salikhmetov | 2753cc2 | 2010-12-16 18:08:38 +0200 | [diff] [blame] | 18 | int hfsplus_block_allocate(struct super_block *sb, u32 size, |
| 19 | u32 offset, u32 *max) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | { |
Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 21 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | struct page *page; |
| 23 | struct address_space *mapping; |
| 24 | __be32 *pptr, *curr, *end; |
| 25 | u32 mask, start, len, n; |
| 26 | __be32 val; |
| 27 | int i; |
| 28 | |
| 29 | len = *max; |
| 30 | if (!len) |
| 31 | return size; |
| 32 | |
| 33 | dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); |
Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 34 | mutex_lock(&sbi->alloc_mutex); |
| 35 | mapping = sbi->alloc_file->i_mapping; |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 36 | page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); |
Eric Sesterhenn | 649f1ee | 2008-10-15 22:04:10 -0700 | [diff] [blame] | 37 | if (IS_ERR(page)) { |
| 38 | start = size; |
| 39 | goto out; |
| 40 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | pptr = kmap(page); |
| 42 | curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; |
| 43 | i = offset % 32; |
| 44 | offset &= ~(PAGE_CACHE_BITS - 1); |
| 45 | if ((size ^ offset) / PAGE_CACHE_BITS) |
| 46 | end = pptr + PAGE_CACHE_BITS / 32; |
| 47 | else |
| 48 | end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; |
| 49 | |
| 50 | /* scan the first partial u32 for zero bits */ |
| 51 | val = *curr; |
| 52 | if (~val) { |
| 53 | n = be32_to_cpu(val); |
| 54 | mask = (1U << 31) >> i; |
| 55 | for (; i < 32; mask >>= 1, i++) { |
| 56 | if (!(n & mask)) |
| 57 | goto found; |
| 58 | } |
| 59 | } |
| 60 | curr++; |
| 61 | |
| 62 | /* scan complete u32s for the first zero bit */ |
| 63 | while (1) { |
| 64 | while (curr < end) { |
| 65 | val = *curr; |
| 66 | if (~val) { |
| 67 | n = be32_to_cpu(val); |
| 68 | mask = 1 << 31; |
| 69 | for (i = 0; i < 32; mask >>= 1, i++) { |
| 70 | if (!(n & mask)) |
| 71 | goto found; |
| 72 | } |
| 73 | } |
| 74 | curr++; |
| 75 | } |
| 76 | kunmap(page); |
| 77 | offset += PAGE_CACHE_BITS; |
| 78 | if (offset >= size) |
| 79 | break; |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 80 | page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, |
| 81 | NULL); |
Eric Sesterhenn | 649f1ee | 2008-10-15 22:04:10 -0700 | [diff] [blame] | 82 | if (IS_ERR(page)) { |
| 83 | start = size; |
| 84 | goto out; |
| 85 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | curr = pptr = kmap(page); |
| 87 | if ((size ^ offset) / PAGE_CACHE_BITS) |
| 88 | end = pptr + PAGE_CACHE_BITS / 32; |
| 89 | else |
| 90 | end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; |
| 91 | } |
| 92 | dprint(DBG_BITMAP, "bitmap full\n"); |
| 93 | start = size; |
| 94 | goto out; |
| 95 | |
| 96 | found: |
| 97 | start = offset + (curr - pptr) * 32 + i; |
| 98 | if (start >= size) { |
| 99 | dprint(DBG_BITMAP, "bitmap full\n"); |
| 100 | goto out; |
| 101 | } |
| 102 | /* do any partial u32 at the start */ |
| 103 | len = min(size - start, len); |
| 104 | while (1) { |
| 105 | n |= mask; |
| 106 | if (++i >= 32) |
| 107 | break; |
| 108 | mask >>= 1; |
| 109 | if (!--len || n & mask) |
| 110 | goto done; |
| 111 | } |
| 112 | if (!--len) |
| 113 | goto done; |
| 114 | *curr++ = cpu_to_be32(n); |
| 115 | /* do full u32s */ |
| 116 | while (1) { |
| 117 | while (curr < end) { |
| 118 | n = be32_to_cpu(*curr); |
| 119 | if (len < 32) |
| 120 | goto last; |
| 121 | if (n) { |
| 122 | len = 32; |
| 123 | goto last; |
| 124 | } |
| 125 | *curr++ = cpu_to_be32(0xffffffff); |
| 126 | len -= 32; |
| 127 | } |
| 128 | set_page_dirty(page); |
| 129 | kunmap(page); |
| 130 | offset += PAGE_CACHE_BITS; |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 131 | page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, |
| 132 | NULL); |
Eric Sesterhenn | 649f1ee | 2008-10-15 22:04:10 -0700 | [diff] [blame] | 133 | if (IS_ERR(page)) { |
| 134 | start = size; |
| 135 | goto out; |
| 136 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | pptr = kmap(page); |
| 138 | curr = pptr; |
| 139 | end = pptr + PAGE_CACHE_BITS / 32; |
| 140 | } |
| 141 | last: |
| 142 | /* do any partial u32 at end */ |
| 143 | mask = 1U << 31; |
| 144 | for (i = 0; i < len; i++) { |
| 145 | if (n & mask) |
| 146 | break; |
| 147 | n |= mask; |
| 148 | mask >>= 1; |
| 149 | } |
| 150 | done: |
| 151 | *curr = cpu_to_be32(n); |
| 152 | set_page_dirty(page); |
| 153 | kunmap(page); |
| 154 | *max = offset + (curr - pptr) * 32 + i - start; |
Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 155 | sbi->free_blocks -= *max; |
Artem Bityutskiy | 9e6c582 | 2012-07-12 17:26:31 +0300 | [diff] [blame] | 156 | hfsplus_mark_mdb_dirty(sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); |
| 158 | out: |
Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 159 | mutex_unlock(&sbi->alloc_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | return start; |
| 161 | } |
| 162 | |
| 163 | int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) |
| 164 | { |
Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 165 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | struct page *page; |
| 167 | struct address_space *mapping; |
| 168 | __be32 *pptr, *curr, *end; |
| 169 | u32 mask, len, pnr; |
| 170 | int i; |
| 171 | |
| 172 | /* is there any actual work to be done? */ |
| 173 | if (!count) |
| 174 | return 0; |
| 175 | |
| 176 | dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count); |
| 177 | /* are all of the bits in range? */ |
Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 178 | if ((offset + count) > sbi->total_blocks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | return -2; |
| 180 | |
Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 181 | mutex_lock(&sbi->alloc_mutex); |
| 182 | mapping = sbi->alloc_file->i_mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | pnr = offset / PAGE_CACHE_BITS; |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 184 | page = read_mapping_page(mapping, pnr, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | pptr = kmap(page); |
| 186 | curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; |
| 187 | end = pptr + PAGE_CACHE_BITS / 32; |
| 188 | len = count; |
| 189 | |
| 190 | /* do any partial u32 at the start */ |
| 191 | i = offset % 32; |
| 192 | if (i) { |
| 193 | int j = 32 - i; |
| 194 | mask = 0xffffffffU << j; |
| 195 | if (j > count) { |
| 196 | mask |= 0xffffffffU >> (i + count); |
| 197 | *curr++ &= cpu_to_be32(mask); |
| 198 | goto out; |
| 199 | } |
| 200 | *curr++ &= cpu_to_be32(mask); |
| 201 | count -= j; |
| 202 | } |
| 203 | |
| 204 | /* do full u32s */ |
| 205 | while (1) { |
| 206 | while (curr < end) { |
| 207 | if (count < 32) |
| 208 | goto done; |
| 209 | *curr++ = 0; |
| 210 | count -= 32; |
| 211 | } |
| 212 | if (!count) |
| 213 | break; |
| 214 | set_page_dirty(page); |
| 215 | kunmap(page); |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 216 | page = read_mapping_page(mapping, ++pnr, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | pptr = kmap(page); |
| 218 | curr = pptr; |
| 219 | end = pptr + PAGE_CACHE_BITS / 32; |
| 220 | } |
| 221 | done: |
| 222 | /* do any partial u32 at end */ |
| 223 | if (count) { |
| 224 | mask = 0xffffffffU >> count; |
| 225 | *curr &= cpu_to_be32(mask); |
| 226 | } |
| 227 | out: |
| 228 | set_page_dirty(page); |
| 229 | kunmap(page); |
Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 230 | sbi->free_blocks += len; |
Artem Bityutskiy | 9e6c582 | 2012-07-12 17:26:31 +0300 | [diff] [blame] | 231 | hfsplus_mark_mdb_dirty(sb); |
Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 232 | mutex_unlock(&sbi->alloc_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | |
| 234 | return 0; |
| 235 | } |