Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Squashfs - a compressed read only filesystem for Linux |
| 3 | * |
| 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
| 5 | * Phillip Lougher <phillip@lougher.demon.co.uk> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version 2, |
| 10 | * or (at your option) any later version. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU General Public License |
| 18 | * along with this program; if not, write to the Free Software |
| 19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 20 | * |
| 21 | * cache.c |
| 22 | */ |
| 23 | |
| 24 | /* |
| 25 | * Blocks in Squashfs are compressed. To avoid repeatedly decompressing |
| 26 | * recently accessed data Squashfs uses two small metadata and fragment caches. |
| 27 | * |
| 28 | * This file implements a generic cache implementation used for both caches, |
| 29 | * plus functions layered ontop of the generic cache implementation to |
| 30 | * access the metadata and fragment caches. |
| 31 | * |
| 32 | * To avoid out of memory and fragmentation isssues with vmalloc the cache |
| 33 | * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. |
| 34 | * |
| 35 | * It should be noted that the cache is not used for file datablocks, these |
| 36 | * are decompressed and cached in the page-cache in the normal way. The |
| 37 | * cache is only used to temporarily cache fragment and metadata blocks |
| 38 | * which have been read as as a result of a metadata (i.e. inode or |
| 39 | * directory) or fragment access. Because metadata and fragments are packed |
| 40 | * together into blocks (to gain greater compression) the read of a particular |
| 41 | * piece of metadata or fragment will retrieve other metadata/fragments which |
| 42 | * have been packed with it, these because of locality-of-reference may be read |
| 43 | * in the near future. Temporarily caching them ensures they are available for |
| 44 | * near future access without requiring an additional read and decompress. |
| 45 | */ |
| 46 | |
| 47 | #include <linux/fs.h> |
| 48 | #include <linux/vfs.h> |
| 49 | #include <linux/slab.h> |
| 50 | #include <linux/vmalloc.h> |
| 51 | #include <linux/sched.h> |
| 52 | #include <linux/spinlock.h> |
| 53 | #include <linux/wait.h> |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 54 | #include <linux/pagemap.h> |
| 55 | |
| 56 | #include "squashfs_fs.h" |
| 57 | #include "squashfs_fs_sb.h" |
| 58 | #include "squashfs_fs_i.h" |
| 59 | #include "squashfs.h" |
| 60 | |
| 61 | /* |
| 62 | * Look-up block in cache, and increment usage count. If not in cache, read |
| 63 | * and decompress it from disk. |
| 64 | */ |
| 65 | struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb, |
| 66 | struct squashfs_cache *cache, u64 block, int length) |
| 67 | { |
| 68 | int i, n; |
| 69 | struct squashfs_cache_entry *entry; |
| 70 | |
| 71 | spin_lock(&cache->lock); |
| 72 | |
| 73 | while (1) { |
| 74 | for (i = 0; i < cache->entries; i++) |
| 75 | if (cache->entry[i].block == block) |
| 76 | break; |
| 77 | |
| 78 | if (i == cache->entries) { |
| 79 | /* |
| 80 | * Block not in cache, if all cache entries are used |
| 81 | * go to sleep waiting for one to become available. |
| 82 | */ |
| 83 | if (cache->unused == 0) { |
| 84 | cache->num_waiters++; |
| 85 | spin_unlock(&cache->lock); |
| 86 | wait_event(cache->wait_queue, cache->unused); |
| 87 | spin_lock(&cache->lock); |
| 88 | cache->num_waiters--; |
| 89 | continue; |
| 90 | } |
| 91 | |
| 92 | /* |
| 93 | * At least one unused cache entry. A simple |
| 94 | * round-robin strategy is used to choose the entry to |
| 95 | * be evicted from the cache. |
| 96 | */ |
| 97 | i = cache->next_blk; |
| 98 | for (n = 0; n < cache->entries; n++) { |
| 99 | if (cache->entry[i].refcount == 0) |
| 100 | break; |
| 101 | i = (i + 1) % cache->entries; |
| 102 | } |
| 103 | |
| 104 | cache->next_blk = (i + 1) % cache->entries; |
| 105 | entry = &cache->entry[i]; |
| 106 | |
| 107 | /* |
| 108 | * Initialise choosen cache entry, and fill it in from |
| 109 | * disk. |
| 110 | */ |
| 111 | cache->unused--; |
| 112 | entry->block = block; |
| 113 | entry->refcount = 1; |
| 114 | entry->pending = 1; |
| 115 | entry->num_waiters = 0; |
| 116 | entry->error = 0; |
| 117 | spin_unlock(&cache->lock); |
| 118 | |
| 119 | entry->length = squashfs_read_data(sb, entry->data, |
| 120 | block, length, &entry->next_index, |
Phillip Lougher | 118e1ef | 2009-03-05 00:31:12 +0000 | [diff] [blame] | 121 | cache->block_size, cache->pages); |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 122 | |
| 123 | spin_lock(&cache->lock); |
| 124 | |
| 125 | if (entry->length < 0) |
| 126 | entry->error = entry->length; |
| 127 | |
| 128 | entry->pending = 0; |
| 129 | |
| 130 | /* |
| 131 | * While filling this entry one or more other processes |
| 132 | * have looked it up in the cache, and have slept |
| 133 | * waiting for it to become available. |
| 134 | */ |
| 135 | if (entry->num_waiters) { |
| 136 | spin_unlock(&cache->lock); |
| 137 | wake_up_all(&entry->wait_queue); |
| 138 | } else |
| 139 | spin_unlock(&cache->lock); |
| 140 | |
| 141 | goto out; |
| 142 | } |
| 143 | |
| 144 | /* |
| 145 | * Block already in cache. Increment refcount so it doesn't |
| 146 | * get reused until we're finished with it, if it was |
| 147 | * previously unused there's one less cache entry available |
| 148 | * for reuse. |
| 149 | */ |
| 150 | entry = &cache->entry[i]; |
| 151 | if (entry->refcount == 0) |
| 152 | cache->unused--; |
| 153 | entry->refcount++; |
| 154 | |
| 155 | /* |
| 156 | * If the entry is currently being filled in by another process |
| 157 | * go to sleep waiting for it to become available. |
| 158 | */ |
| 159 | if (entry->pending) { |
| 160 | entry->num_waiters++; |
| 161 | spin_unlock(&cache->lock); |
| 162 | wait_event(entry->wait_queue, !entry->pending); |
| 163 | } else |
| 164 | spin_unlock(&cache->lock); |
| 165 | |
| 166 | goto out; |
| 167 | } |
| 168 | |
| 169 | out: |
| 170 | TRACE("Got %s %d, start block %lld, refcount %d, error %d\n", |
| 171 | cache->name, i, entry->block, entry->refcount, entry->error); |
| 172 | |
| 173 | if (entry->error) |
| 174 | ERROR("Unable to read %s cache entry [%llx]\n", cache->name, |
| 175 | block); |
| 176 | return entry; |
| 177 | } |
| 178 | |
| 179 | |
| 180 | /* |
| 181 | * Release cache entry, once usage count is zero it can be reused. |
| 182 | */ |
| 183 | void squashfs_cache_put(struct squashfs_cache_entry *entry) |
| 184 | { |
| 185 | struct squashfs_cache *cache = entry->cache; |
| 186 | |
| 187 | spin_lock(&cache->lock); |
| 188 | entry->refcount--; |
| 189 | if (entry->refcount == 0) { |
| 190 | cache->unused++; |
| 191 | /* |
| 192 | * If there's any processes waiting for a block to become |
| 193 | * available, wake one up. |
| 194 | */ |
| 195 | if (cache->num_waiters) { |
| 196 | spin_unlock(&cache->lock); |
| 197 | wake_up(&cache->wait_queue); |
| 198 | return; |
| 199 | } |
| 200 | } |
| 201 | spin_unlock(&cache->lock); |
| 202 | } |
| 203 | |
| 204 | /* |
| 205 | * Delete cache reclaiming all kmalloced buffers. |
| 206 | */ |
| 207 | void squashfs_cache_delete(struct squashfs_cache *cache) |
| 208 | { |
| 209 | int i, j; |
| 210 | |
| 211 | if (cache == NULL) |
| 212 | return; |
| 213 | |
| 214 | for (i = 0; i < cache->entries; i++) { |
| 215 | if (cache->entry[i].data) { |
| 216 | for (j = 0; j < cache->pages; j++) |
| 217 | kfree(cache->entry[i].data[j]); |
| 218 | kfree(cache->entry[i].data); |
| 219 | } |
| 220 | } |
| 221 | |
| 222 | kfree(cache->entry); |
| 223 | kfree(cache); |
| 224 | } |
| 225 | |
| 226 | |
| 227 | /* |
| 228 | * Initialise cache allocating the specified number of entries, each of |
| 229 | * size block_size. To avoid vmalloc fragmentation issues each entry |
| 230 | * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. |
| 231 | */ |
| 232 | struct squashfs_cache *squashfs_cache_init(char *name, int entries, |
| 233 | int block_size) |
| 234 | { |
| 235 | int i, j; |
| 236 | struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); |
| 237 | |
| 238 | if (cache == NULL) { |
| 239 | ERROR("Failed to allocate %s cache\n", name); |
| 240 | return NULL; |
| 241 | } |
| 242 | |
| 243 | cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); |
| 244 | if (cache->entry == NULL) { |
| 245 | ERROR("Failed to allocate %s cache\n", name); |
| 246 | goto cleanup; |
| 247 | } |
| 248 | |
| 249 | cache->next_blk = 0; |
| 250 | cache->unused = entries; |
| 251 | cache->entries = entries; |
| 252 | cache->block_size = block_size; |
| 253 | cache->pages = block_size >> PAGE_CACHE_SHIFT; |
Doug Chapman | a37b06d | 2009-05-13 02:56:39 +0100 | [diff] [blame] | 254 | cache->pages = cache->pages ? cache->pages : 1; |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 255 | cache->name = name; |
| 256 | cache->num_waiters = 0; |
| 257 | spin_lock_init(&cache->lock); |
| 258 | init_waitqueue_head(&cache->wait_queue); |
| 259 | |
| 260 | for (i = 0; i < entries; i++) { |
| 261 | struct squashfs_cache_entry *entry = &cache->entry[i]; |
| 262 | |
| 263 | init_waitqueue_head(&cache->entry[i].wait_queue); |
| 264 | entry->cache = cache; |
| 265 | entry->block = SQUASHFS_INVALID_BLK; |
| 266 | entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); |
| 267 | if (entry->data == NULL) { |
| 268 | ERROR("Failed to allocate %s cache entry\n", name); |
| 269 | goto cleanup; |
| 270 | } |
| 271 | |
| 272 | for (j = 0; j < cache->pages; j++) { |
| 273 | entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); |
| 274 | if (entry->data[j] == NULL) { |
| 275 | ERROR("Failed to allocate %s buffer\n", name); |
| 276 | goto cleanup; |
| 277 | } |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | return cache; |
| 282 | |
| 283 | cleanup: |
| 284 | squashfs_cache_delete(cache); |
| 285 | return NULL; |
| 286 | } |
| 287 | |
| 288 | |
| 289 | /* |
| 290 | * Copy upto length bytes from cache entry to buffer starting at offset bytes |
| 291 | * into the cache entry. If there's not length bytes then copy the number of |
| 292 | * bytes available. In all cases return the number of bytes copied. |
| 293 | */ |
| 294 | int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, |
| 295 | int offset, int length) |
| 296 | { |
| 297 | int remaining = length; |
| 298 | |
| 299 | if (length == 0) |
| 300 | return 0; |
| 301 | else if (buffer == NULL) |
| 302 | return min(length, entry->length - offset); |
| 303 | |
| 304 | while (offset < entry->length) { |
| 305 | void *buff = entry->data[offset / PAGE_CACHE_SIZE] |
| 306 | + (offset % PAGE_CACHE_SIZE); |
| 307 | int bytes = min_t(int, entry->length - offset, |
| 308 | PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); |
| 309 | |
| 310 | if (bytes >= remaining) { |
| 311 | memcpy(buffer, buff, remaining); |
| 312 | remaining = 0; |
| 313 | break; |
| 314 | } |
| 315 | |
| 316 | memcpy(buffer, buff, bytes); |
| 317 | buffer += bytes; |
| 318 | remaining -= bytes; |
| 319 | offset += bytes; |
| 320 | } |
| 321 | |
| 322 | return length - remaining; |
| 323 | } |
| 324 | |
| 325 | |
| 326 | /* |
| 327 | * Read length bytes from metadata position <block, offset> (block is the |
| 328 | * start of the compressed block on disk, and offset is the offset into |
| 329 | * the block once decompressed). Data is packed into consecutive blocks, |
| 330 | * and length bytes may require reading more than one block. |
| 331 | */ |
| 332 | int squashfs_read_metadata(struct super_block *sb, void *buffer, |
| 333 | u64 *block, int *offset, int length) |
| 334 | { |
| 335 | struct squashfs_sb_info *msblk = sb->s_fs_info; |
| 336 | int bytes, copied = length; |
| 337 | struct squashfs_cache_entry *entry; |
| 338 | |
| 339 | TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); |
| 340 | |
| 341 | while (length) { |
| 342 | entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); |
| 343 | if (entry->error) |
| 344 | return entry->error; |
| 345 | else if (*offset >= entry->length) |
| 346 | return -EIO; |
| 347 | |
| 348 | bytes = squashfs_copy_data(buffer, entry, *offset, length); |
| 349 | if (buffer) |
| 350 | buffer += bytes; |
| 351 | length -= bytes; |
| 352 | *offset += bytes; |
| 353 | |
| 354 | if (*offset == entry->length) { |
| 355 | *block = entry->next_index; |
| 356 | *offset = 0; |
| 357 | } |
| 358 | |
| 359 | squashfs_cache_put(entry); |
| 360 | } |
| 361 | |
| 362 | return copied; |
| 363 | } |
| 364 | |
| 365 | |
| 366 | /* |
| 367 | * Look-up in the fragmment cache the fragment located at <start_block> in the |
| 368 | * filesystem. If necessary read and decompress it from disk. |
| 369 | */ |
| 370 | struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb, |
| 371 | u64 start_block, int length) |
| 372 | { |
| 373 | struct squashfs_sb_info *msblk = sb->s_fs_info; |
| 374 | |
| 375 | return squashfs_cache_get(sb, msblk->fragment_cache, start_block, |
| 376 | length); |
| 377 | } |
| 378 | |
| 379 | |
| 380 | /* |
| 381 | * Read and decompress the datablock located at <start_block> in the |
| 382 | * filesystem. The cache is used here to avoid duplicating locking and |
| 383 | * read/decompress code. |
| 384 | */ |
| 385 | struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb, |
| 386 | u64 start_block, int length) |
| 387 | { |
| 388 | struct squashfs_sb_info *msblk = sb->s_fs_info; |
| 389 | |
| 390 | return squashfs_cache_get(sb, msblk->read_page, start_block, length); |
| 391 | } |
| 392 | |
| 393 | |
| 394 | /* |
| 395 | * Read a filesystem table (uncompressed sequence of bytes) from disk |
| 396 | */ |
| 397 | int squashfs_read_table(struct super_block *sb, void *buffer, u64 block, |
| 398 | int length) |
| 399 | { |
| 400 | int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
| 401 | int i, res; |
| 402 | void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL); |
| 403 | if (data == NULL) |
| 404 | return -ENOMEM; |
| 405 | |
| 406 | for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) |
| 407 | data[i] = buffer; |
| 408 | res = squashfs_read_data(sb, data, block, length | |
Phillip Lougher | 118e1ef | 2009-03-05 00:31:12 +0000 | [diff] [blame] | 409 | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 410 | kfree(data); |
| 411 | return res; |
| 412 | } |