Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/fs/fat/cache.c |
| 3 | * |
| 4 | * Written 1992,1993 by Werner Almesberger |
| 5 | * |
| 6 | * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead |
| 7 | * of inode number. |
| 8 | * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/fs.h> |
| 12 | #include <linux/msdos_fs.h> |
| 13 | #include <linux/buffer_head.h> |
| 14 | |
| 15 | /* this must be > 0. */ |
| 16 | #define FAT_MAX_CACHE 8 |
| 17 | |
| 18 | struct fat_cache { |
| 19 | struct list_head cache_list; |
| 20 | int nr_contig; /* number of contiguous clusters */ |
| 21 | int fcluster; /* cluster number in the file. */ |
| 22 | int dcluster; /* cluster number on disk. */ |
| 23 | }; |
| 24 | |
| 25 | struct fat_cache_id { |
| 26 | unsigned int id; |
| 27 | int nr_contig; |
| 28 | int fcluster; |
| 29 | int dcluster; |
| 30 | }; |
| 31 | |
| 32 | static inline int fat_max_cache(struct inode *inode) |
| 33 | { |
| 34 | return FAT_MAX_CACHE; |
| 35 | } |
| 36 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 37 | static struct kmem_cache *fat_cache_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Christoph Lameter | 4ba9b9d | 2007-10-16 23:25:51 -0700 | [diff] [blame] | 39 | static void init_once(struct kmem_cache *cachep, void *foo) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | { |
| 41 | struct fat_cache *cache = (struct fat_cache *)foo; |
| 42 | |
Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 43 | INIT_LIST_HEAD(&cache->cache_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | int __init fat_cache_init(void) |
| 47 | { |
| 48 | fat_cache_cachep = kmem_cache_create("fat_cache", |
| 49 | sizeof(struct fat_cache), |
Paul Jackson | 4b6a931 | 2006-03-24 03:16:05 -0800 | [diff] [blame] | 50 | 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 51 | init_once); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | if (fat_cache_cachep == NULL) |
| 53 | return -ENOMEM; |
| 54 | return 0; |
| 55 | } |
| 56 | |
Andrew Morton | ef6689e | 2005-06-30 22:13:14 -0700 | [diff] [blame] | 57 | void fat_cache_destroy(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | { |
Alexey Dobriyan | 1a1d92c | 2006-09-27 01:49:40 -0700 | [diff] [blame] | 59 | kmem_cache_destroy(fat_cache_cachep); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | static inline struct fat_cache *fat_cache_alloc(struct inode *inode) |
| 63 | { |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 64 | return kmem_cache_alloc(fat_cache_cachep, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | } |
| 66 | |
| 67 | static inline void fat_cache_free(struct fat_cache *cache) |
| 68 | { |
| 69 | BUG_ON(!list_empty(&cache->cache_list)); |
| 70 | kmem_cache_free(fat_cache_cachep, cache); |
| 71 | } |
| 72 | |
| 73 | static inline void fat_cache_update_lru(struct inode *inode, |
| 74 | struct fat_cache *cache) |
| 75 | { |
| 76 | if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list) |
| 77 | list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru); |
| 78 | } |
| 79 | |
| 80 | static int fat_cache_lookup(struct inode *inode, int fclus, |
| 81 | struct fat_cache_id *cid, |
| 82 | int *cached_fclus, int *cached_dclus) |
| 83 | { |
| 84 | static struct fat_cache nohit = { .fcluster = 0, }; |
| 85 | |
| 86 | struct fat_cache *hit = &nohit, *p; |
| 87 | int offset = -1; |
| 88 | |
| 89 | spin_lock(&MSDOS_I(inode)->cache_lru_lock); |
| 90 | list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) { |
| 91 | /* Find the cache of "fclus" or nearest cache. */ |
| 92 | if (p->fcluster <= fclus && hit->fcluster < p->fcluster) { |
| 93 | hit = p; |
| 94 | if ((hit->fcluster + hit->nr_contig) < fclus) { |
| 95 | offset = hit->nr_contig; |
| 96 | } else { |
| 97 | offset = fclus - hit->fcluster; |
| 98 | break; |
| 99 | } |
| 100 | } |
| 101 | } |
| 102 | if (hit != &nohit) { |
| 103 | fat_cache_update_lru(inode, hit); |
| 104 | |
| 105 | cid->id = MSDOS_I(inode)->cache_valid_id; |
| 106 | cid->nr_contig = hit->nr_contig; |
| 107 | cid->fcluster = hit->fcluster; |
| 108 | cid->dcluster = hit->dcluster; |
| 109 | *cached_fclus = cid->fcluster + offset; |
| 110 | *cached_dclus = cid->dcluster + offset; |
| 111 | } |
| 112 | spin_unlock(&MSDOS_I(inode)->cache_lru_lock); |
| 113 | |
| 114 | return offset; |
| 115 | } |
| 116 | |
| 117 | static struct fat_cache *fat_cache_merge(struct inode *inode, |
| 118 | struct fat_cache_id *new) |
| 119 | { |
| 120 | struct fat_cache *p; |
| 121 | |
| 122 | list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) { |
| 123 | /* Find the same part as "new" in cluster-chain. */ |
| 124 | if (p->fcluster == new->fcluster) { |
| 125 | BUG_ON(p->dcluster != new->dcluster); |
| 126 | if (new->nr_contig > p->nr_contig) |
| 127 | p->nr_contig = new->nr_contig; |
| 128 | return p; |
| 129 | } |
| 130 | } |
| 131 | return NULL; |
| 132 | } |
| 133 | |
| 134 | static void fat_cache_add(struct inode *inode, struct fat_cache_id *new) |
| 135 | { |
| 136 | struct fat_cache *cache, *tmp; |
| 137 | |
| 138 | if (new->fcluster == -1) /* dummy cache */ |
| 139 | return; |
| 140 | |
| 141 | spin_lock(&MSDOS_I(inode)->cache_lru_lock); |
| 142 | if (new->id != FAT_CACHE_VALID && |
| 143 | new->id != MSDOS_I(inode)->cache_valid_id) |
| 144 | goto out; /* this cache was invalidated */ |
| 145 | |
| 146 | cache = fat_cache_merge(inode, new); |
| 147 | if (cache == NULL) { |
| 148 | if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) { |
| 149 | MSDOS_I(inode)->nr_caches++; |
| 150 | spin_unlock(&MSDOS_I(inode)->cache_lru_lock); |
| 151 | |
| 152 | tmp = fat_cache_alloc(inode); |
| 153 | spin_lock(&MSDOS_I(inode)->cache_lru_lock); |
| 154 | cache = fat_cache_merge(inode, new); |
| 155 | if (cache != NULL) { |
| 156 | MSDOS_I(inode)->nr_caches--; |
| 157 | fat_cache_free(tmp); |
| 158 | goto out_update_lru; |
| 159 | } |
| 160 | cache = tmp; |
| 161 | } else { |
| 162 | struct list_head *p = MSDOS_I(inode)->cache_lru.prev; |
| 163 | cache = list_entry(p, struct fat_cache, cache_list); |
| 164 | } |
| 165 | cache->fcluster = new->fcluster; |
| 166 | cache->dcluster = new->dcluster; |
| 167 | cache->nr_contig = new->nr_contig; |
| 168 | } |
| 169 | out_update_lru: |
| 170 | fat_cache_update_lru(inode, cache); |
| 171 | out: |
| 172 | spin_unlock(&MSDOS_I(inode)->cache_lru_lock); |
| 173 | } |
| 174 | |
| 175 | /* |
| 176 | * Cache invalidation occurs rarely, thus the LRU chain is not updated. It |
| 177 | * fixes itself after a while. |
| 178 | */ |
| 179 | static void __fat_cache_inval_inode(struct inode *inode) |
| 180 | { |
| 181 | struct msdos_inode_info *i = MSDOS_I(inode); |
| 182 | struct fat_cache *cache; |
| 183 | |
| 184 | while (!list_empty(&i->cache_lru)) { |
| 185 | cache = list_entry(i->cache_lru.next, struct fat_cache, cache_list); |
| 186 | list_del_init(&cache->cache_list); |
| 187 | i->nr_caches--; |
| 188 | fat_cache_free(cache); |
| 189 | } |
| 190 | /* Update. The copy of caches before this id is discarded. */ |
| 191 | i->cache_valid_id++; |
| 192 | if (i->cache_valid_id == FAT_CACHE_VALID) |
| 193 | i->cache_valid_id++; |
| 194 | } |
| 195 | |
| 196 | void fat_cache_inval_inode(struct inode *inode) |
| 197 | { |
| 198 | spin_lock(&MSDOS_I(inode)->cache_lru_lock); |
| 199 | __fat_cache_inval_inode(inode); |
| 200 | spin_unlock(&MSDOS_I(inode)->cache_lru_lock); |
| 201 | } |
| 202 | |
| 203 | static inline int cache_contiguous(struct fat_cache_id *cid, int dclus) |
| 204 | { |
| 205 | cid->nr_contig++; |
| 206 | return ((cid->dcluster + cid->nr_contig) == dclus); |
| 207 | } |
| 208 | |
| 209 | static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus) |
| 210 | { |
| 211 | cid->id = FAT_CACHE_VALID; |
| 212 | cid->fcluster = fclus; |
| 213 | cid->dcluster = dclus; |
| 214 | cid->nr_contig = 0; |
| 215 | } |
| 216 | |
| 217 | int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) |
| 218 | { |
| 219 | struct super_block *sb = inode->i_sb; |
| 220 | const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits; |
| 221 | struct fat_entry fatent; |
| 222 | struct fat_cache_id cid; |
| 223 | int nr; |
| 224 | |
| 225 | BUG_ON(MSDOS_I(inode)->i_start == 0); |
| 226 | |
| 227 | *fclus = 0; |
| 228 | *dclus = MSDOS_I(inode)->i_start; |
| 229 | if (cluster == 0) |
| 230 | return 0; |
| 231 | |
| 232 | if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { |
| 233 | /* |
| 234 | * dummy, always not contiguous |
| 235 | * This is reinitialized by cache_init(), later. |
| 236 | */ |
| 237 | cache_init(&cid, -1, -1); |
| 238 | } |
| 239 | |
| 240 | fatent_init(&fatent); |
| 241 | while (*fclus < cluster) { |
| 242 | /* prevent the infinite loop of cluster chain */ |
| 243 | if (*fclus > limit) { |
| 244 | fat_fs_panic(sb, "%s: detected the cluster chain loop" |
| 245 | " (i_pos %lld)", __FUNCTION__, |
| 246 | MSDOS_I(inode)->i_pos); |
| 247 | nr = -EIO; |
| 248 | goto out; |
| 249 | } |
| 250 | |
| 251 | nr = fat_ent_read(inode, &fatent, *dclus); |
| 252 | if (nr < 0) |
| 253 | goto out; |
| 254 | else if (nr == FAT_ENT_FREE) { |
| 255 | fat_fs_panic(sb, "%s: invalid cluster chain" |
| 256 | " (i_pos %lld)", __FUNCTION__, |
| 257 | MSDOS_I(inode)->i_pos); |
| 258 | nr = -EIO; |
| 259 | goto out; |
| 260 | } else if (nr == FAT_ENT_EOF) { |
| 261 | fat_cache_add(inode, &cid); |
| 262 | goto out; |
| 263 | } |
| 264 | (*fclus)++; |
| 265 | *dclus = nr; |
| 266 | if (!cache_contiguous(&cid, *dclus)) |
| 267 | cache_init(&cid, *fclus, *dclus); |
| 268 | } |
| 269 | nr = 0; |
| 270 | fat_cache_add(inode, &cid); |
| 271 | out: |
| 272 | fatent_brelse(&fatent); |
| 273 | return nr; |
| 274 | } |
| 275 | |
| 276 | static int fat_bmap_cluster(struct inode *inode, int cluster) |
| 277 | { |
| 278 | struct super_block *sb = inode->i_sb; |
| 279 | int ret, fclus, dclus; |
| 280 | |
| 281 | if (MSDOS_I(inode)->i_start == 0) |
| 282 | return 0; |
| 283 | |
| 284 | ret = fat_get_cluster(inode, cluster, &fclus, &dclus); |
| 285 | if (ret < 0) |
| 286 | return ret; |
| 287 | else if (ret == FAT_ENT_EOF) { |
| 288 | fat_fs_panic(sb, "%s: request beyond EOF (i_pos %lld)", |
| 289 | __FUNCTION__, MSDOS_I(inode)->i_pos); |
| 290 | return -EIO; |
| 291 | } |
| 292 | return dclus; |
| 293 | } |
| 294 | |
OGAWA Hirofumi | e5174ba | 2006-01-08 01:02:11 -0800 | [diff] [blame] | 295 | int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, |
| 296 | unsigned long *mapped_blocks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | { |
| 298 | struct super_block *sb = inode->i_sb; |
| 299 | struct msdos_sb_info *sbi = MSDOS_SB(sb); |
| 300 | sector_t last_block; |
| 301 | int cluster, offset; |
| 302 | |
| 303 | *phys = 0; |
OGAWA Hirofumi | e5174ba | 2006-01-08 01:02:11 -0800 | [diff] [blame] | 304 | *mapped_blocks = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) { |
OGAWA Hirofumi | e5174ba | 2006-01-08 01:02:11 -0800 | [diff] [blame] | 306 | if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | *phys = sector + sbi->dir_start; |
OGAWA Hirofumi | e5174ba | 2006-01-08 01:02:11 -0800 | [diff] [blame] | 308 | *mapped_blocks = 1; |
| 309 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | return 0; |
| 311 | } |
| 312 | last_block = (MSDOS_I(inode)->mmu_private + (sb->s_blocksize - 1)) |
| 313 | >> sb->s_blocksize_bits; |
| 314 | if (sector >= last_block) |
| 315 | return 0; |
| 316 | |
| 317 | cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); |
| 318 | offset = sector & (sbi->sec_per_clus - 1); |
| 319 | cluster = fat_bmap_cluster(inode, cluster); |
| 320 | if (cluster < 0) |
| 321 | return cluster; |
OGAWA Hirofumi | e5174ba | 2006-01-08 01:02:11 -0800 | [diff] [blame] | 322 | else if (cluster) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | *phys = fat_clus_to_blknr(sbi, cluster) + offset; |
OGAWA Hirofumi | e5174ba | 2006-01-08 01:02:11 -0800 | [diff] [blame] | 324 | *mapped_blocks = sbi->sec_per_clus - offset; |
| 325 | if (*mapped_blocks > last_block - sector) |
| 326 | *mapped_blocks = last_block - sector; |
| 327 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | return 0; |
| 329 | } |