blob: ad57f5991eb1f14e3ac24dafa207e440d6d74be3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/hfsplus/bitmap.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Handling of allocation file
9 */
10
11#include <linux/pagemap.h>
12
13#include "hfsplus_fs.h"
14#include "hfsplus_raw.h"
15
16#define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8)
17
18int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
19{
Christoph Hellwigdd73a012010-10-01 05:42:59 +020020 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 struct page *page;
22 struct address_space *mapping;
23 __be32 *pptr, *curr, *end;
24 u32 mask, start, len, n;
25 __be32 val;
26 int i;
27
28 len = *max;
29 if (!len)
30 return size;
31
32 dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
Christoph Hellwigdd73a012010-10-01 05:42:59 +020033 mutex_lock(&sbi->alloc_mutex);
34 mapping = sbi->alloc_file->i_mapping;
Pekka Enberg090d2b12006-06-23 02:05:08 -070035 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
Eric Sesterhenn649f1ee2008-10-15 22:04:10 -070036 if (IS_ERR(page)) {
37 start = size;
38 goto out;
39 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 pptr = kmap(page);
41 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
42 i = offset % 32;
43 offset &= ~(PAGE_CACHE_BITS - 1);
44 if ((size ^ offset) / PAGE_CACHE_BITS)
45 end = pptr + PAGE_CACHE_BITS / 32;
46 else
47 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
48
49 /* scan the first partial u32 for zero bits */
50 val = *curr;
51 if (~val) {
52 n = be32_to_cpu(val);
53 mask = (1U << 31) >> i;
54 for (; i < 32; mask >>= 1, i++) {
55 if (!(n & mask))
56 goto found;
57 }
58 }
59 curr++;
60
61 /* scan complete u32s for the first zero bit */
62 while (1) {
63 while (curr < end) {
64 val = *curr;
65 if (~val) {
66 n = be32_to_cpu(val);
67 mask = 1 << 31;
68 for (i = 0; i < 32; mask >>= 1, i++) {
69 if (!(n & mask))
70 goto found;
71 }
72 }
73 curr++;
74 }
75 kunmap(page);
76 offset += PAGE_CACHE_BITS;
77 if (offset >= size)
78 break;
Pekka Enberg090d2b12006-06-23 02:05:08 -070079 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
80 NULL);
Eric Sesterhenn649f1ee2008-10-15 22:04:10 -070081 if (IS_ERR(page)) {
82 start = size;
83 goto out;
84 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 curr = pptr = kmap(page);
86 if ((size ^ offset) / PAGE_CACHE_BITS)
87 end = pptr + PAGE_CACHE_BITS / 32;
88 else
89 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
90 }
91 dprint(DBG_BITMAP, "bitmap full\n");
92 start = size;
93 goto out;
94
95found:
96 start = offset + (curr - pptr) * 32 + i;
97 if (start >= size) {
98 dprint(DBG_BITMAP, "bitmap full\n");
99 goto out;
100 }
101 /* do any partial u32 at the start */
102 len = min(size - start, len);
103 while (1) {
104 n |= mask;
105 if (++i >= 32)
106 break;
107 mask >>= 1;
108 if (!--len || n & mask)
109 goto done;
110 }
111 if (!--len)
112 goto done;
113 *curr++ = cpu_to_be32(n);
114 /* do full u32s */
115 while (1) {
116 while (curr < end) {
117 n = be32_to_cpu(*curr);
118 if (len < 32)
119 goto last;
120 if (n) {
121 len = 32;
122 goto last;
123 }
124 *curr++ = cpu_to_be32(0xffffffff);
125 len -= 32;
126 }
127 set_page_dirty(page);
128 kunmap(page);
129 offset += PAGE_CACHE_BITS;
Pekka Enberg090d2b12006-06-23 02:05:08 -0700130 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
131 NULL);
Eric Sesterhenn649f1ee2008-10-15 22:04:10 -0700132 if (IS_ERR(page)) {
133 start = size;
134 goto out;
135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 pptr = kmap(page);
137 curr = pptr;
138 end = pptr + PAGE_CACHE_BITS / 32;
139 }
140last:
141 /* do any partial u32 at end */
142 mask = 1U << 31;
143 for (i = 0; i < len; i++) {
144 if (n & mask)
145 break;
146 n |= mask;
147 mask >>= 1;
148 }
149done:
150 *curr = cpu_to_be32(n);
151 set_page_dirty(page);
152 kunmap(page);
153 *max = offset + (curr - pptr) * 32 + i - start;
Christoph Hellwigdd73a012010-10-01 05:42:59 +0200154 sbi->free_blocks -= *max;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 sb->s_dirt = 1;
156 dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
157out:
Christoph Hellwigdd73a012010-10-01 05:42:59 +0200158 mutex_unlock(&sbi->alloc_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 return start;
160}
161
162int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
163{
Christoph Hellwigdd73a012010-10-01 05:42:59 +0200164 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 struct page *page;
166 struct address_space *mapping;
167 __be32 *pptr, *curr, *end;
168 u32 mask, len, pnr;
169 int i;
170
171 /* is there any actual work to be done? */
172 if (!count)
173 return 0;
174
175 dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
176 /* are all of the bits in range? */
Christoph Hellwigdd73a012010-10-01 05:42:59 +0200177 if ((offset + count) > sbi->total_blocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 return -2;
179
Christoph Hellwigdd73a012010-10-01 05:42:59 +0200180 mutex_lock(&sbi->alloc_mutex);
181 mapping = sbi->alloc_file->i_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 pnr = offset / PAGE_CACHE_BITS;
Pekka Enberg090d2b12006-06-23 02:05:08 -0700183 page = read_mapping_page(mapping, pnr, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 pptr = kmap(page);
185 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
186 end = pptr + PAGE_CACHE_BITS / 32;
187 len = count;
188
189 /* do any partial u32 at the start */
190 i = offset % 32;
191 if (i) {
192 int j = 32 - i;
193 mask = 0xffffffffU << j;
194 if (j > count) {
195 mask |= 0xffffffffU >> (i + count);
196 *curr++ &= cpu_to_be32(mask);
197 goto out;
198 }
199 *curr++ &= cpu_to_be32(mask);
200 count -= j;
201 }
202
203 /* do full u32s */
204 while (1) {
205 while (curr < end) {
206 if (count < 32)
207 goto done;
208 *curr++ = 0;
209 count -= 32;
210 }
211 if (!count)
212 break;
213 set_page_dirty(page);
214 kunmap(page);
Pekka Enberg090d2b12006-06-23 02:05:08 -0700215 page = read_mapping_page(mapping, ++pnr, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 pptr = kmap(page);
217 curr = pptr;
218 end = pptr + PAGE_CACHE_BITS / 32;
219 }
220done:
221 /* do any partial u32 at end */
222 if (count) {
223 mask = 0xffffffffU >> count;
224 *curr &= cpu_to_be32(mask);
225 }
226out:
227 set_page_dirty(page);
228 kunmap(page);
Christoph Hellwigdd73a012010-10-01 05:42:59 +0200229 sbi->free_blocks += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 sb->s_dirt = 1;
Christoph Hellwigdd73a012010-10-01 05:42:59 +0200231 mutex_unlock(&sbi->alloc_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233 return 0;
234}