blob: ea30afc2a03c774221cc17341c446d30a0cb341b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/hfsplus/bitmap.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Handling of allocation file
9 */
10
11#include <linux/pagemap.h>
12
13#include "hfsplus_fs.h"
14#include "hfsplus_raw.h"
15
16#define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8)
17
18int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
19{
20 struct page *page;
21 struct address_space *mapping;
22 __be32 *pptr, *curr, *end;
23 u32 mask, start, len, n;
24 __be32 val;
25 int i;
26
27 len = *max;
28 if (!len)
29 return size;
30
31 dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -080032 mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
Pekka Enberg090d2b12006-06-23 02:05:08 -070034 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
Eric Sesterhenn649f1ee2008-10-15 22:04:10 -070035 if (IS_ERR(page)) {
36 start = size;
37 goto out;
38 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 pptr = kmap(page);
40 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
41 i = offset % 32;
42 offset &= ~(PAGE_CACHE_BITS - 1);
43 if ((size ^ offset) / PAGE_CACHE_BITS)
44 end = pptr + PAGE_CACHE_BITS / 32;
45 else
46 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
47
48 /* scan the first partial u32 for zero bits */
49 val = *curr;
50 if (~val) {
51 n = be32_to_cpu(val);
52 mask = (1U << 31) >> i;
53 for (; i < 32; mask >>= 1, i++) {
54 if (!(n & mask))
55 goto found;
56 }
57 }
58 curr++;
59
60 /* scan complete u32s for the first zero bit */
61 while (1) {
62 while (curr < end) {
63 val = *curr;
64 if (~val) {
65 n = be32_to_cpu(val);
66 mask = 1 << 31;
67 for (i = 0; i < 32; mask >>= 1, i++) {
68 if (!(n & mask))
69 goto found;
70 }
71 }
72 curr++;
73 }
74 kunmap(page);
75 offset += PAGE_CACHE_BITS;
76 if (offset >= size)
77 break;
Pekka Enberg090d2b12006-06-23 02:05:08 -070078 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
79 NULL);
Eric Sesterhenn649f1ee2008-10-15 22:04:10 -070080 if (IS_ERR(page)) {
81 start = size;
82 goto out;
83 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 curr = pptr = kmap(page);
85 if ((size ^ offset) / PAGE_CACHE_BITS)
86 end = pptr + PAGE_CACHE_BITS / 32;
87 else
88 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
89 }
90 dprint(DBG_BITMAP, "bitmap full\n");
91 start = size;
92 goto out;
93
94found:
95 start = offset + (curr - pptr) * 32 + i;
96 if (start >= size) {
97 dprint(DBG_BITMAP, "bitmap full\n");
98 goto out;
99 }
100 /* do any partial u32 at the start */
101 len = min(size - start, len);
102 while (1) {
103 n |= mask;
104 if (++i >= 32)
105 break;
106 mask >>= 1;
107 if (!--len || n & mask)
108 goto done;
109 }
110 if (!--len)
111 goto done;
112 *curr++ = cpu_to_be32(n);
113 /* do full u32s */
114 while (1) {
115 while (curr < end) {
116 n = be32_to_cpu(*curr);
117 if (len < 32)
118 goto last;
119 if (n) {
120 len = 32;
121 goto last;
122 }
123 *curr++ = cpu_to_be32(0xffffffff);
124 len -= 32;
125 }
126 set_page_dirty(page);
127 kunmap(page);
128 offset += PAGE_CACHE_BITS;
Pekka Enberg090d2b12006-06-23 02:05:08 -0700129 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
130 NULL);
Eric Sesterhenn649f1ee2008-10-15 22:04:10 -0700131 if (IS_ERR(page)) {
132 start = size;
133 goto out;
134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 pptr = kmap(page);
136 curr = pptr;
137 end = pptr + PAGE_CACHE_BITS / 32;
138 }
139last:
140 /* do any partial u32 at end */
141 mask = 1U << 31;
142 for (i = 0; i < len; i++) {
143 if (n & mask)
144 break;
145 n |= mask;
146 mask >>= 1;
147 }
148done:
149 *curr = cpu_to_be32(n);
150 set_page_dirty(page);
151 kunmap(page);
152 *max = offset + (curr - pptr) * 32 + i - start;
153 HFSPLUS_SB(sb).free_blocks -= *max;
154 sb->s_dirt = 1;
155 dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
156out:
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800157 mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 return start;
159}
160
161int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
162{
163 struct page *page;
164 struct address_space *mapping;
165 __be32 *pptr, *curr, *end;
166 u32 mask, len, pnr;
167 int i;
168
169 /* is there any actual work to be done? */
170 if (!count)
171 return 0;
172
173 dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
174 /* are all of the bits in range? */
175 if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
176 return -2;
177
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800178 mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
180 pnr = offset / PAGE_CACHE_BITS;
Pekka Enberg090d2b12006-06-23 02:05:08 -0700181 page = read_mapping_page(mapping, pnr, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 pptr = kmap(page);
183 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
184 end = pptr + PAGE_CACHE_BITS / 32;
185 len = count;
186
187 /* do any partial u32 at the start */
188 i = offset % 32;
189 if (i) {
190 int j = 32 - i;
191 mask = 0xffffffffU << j;
192 if (j > count) {
193 mask |= 0xffffffffU >> (i + count);
194 *curr++ &= cpu_to_be32(mask);
195 goto out;
196 }
197 *curr++ &= cpu_to_be32(mask);
198 count -= j;
199 }
200
201 /* do full u32s */
202 while (1) {
203 while (curr < end) {
204 if (count < 32)
205 goto done;
206 *curr++ = 0;
207 count -= 32;
208 }
209 if (!count)
210 break;
211 set_page_dirty(page);
212 kunmap(page);
Pekka Enberg090d2b12006-06-23 02:05:08 -0700213 page = read_mapping_page(mapping, ++pnr, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 pptr = kmap(page);
215 curr = pptr;
216 end = pptr + PAGE_CACHE_BITS / 32;
217 }
218done:
219 /* do any partial u32 at end */
220 if (count) {
221 mask = 0xffffffffU >> count;
222 *curr &= cpu_to_be32(mask);
223 }
224out:
225 set_page_dirty(page);
226 kunmap(page);
227 HFSPLUS_SB(sb).free_blocks += len;
228 sb->s_dirt = 1;
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800229 mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 return 0;
232}