Jan Kara | 1ccd14b | 2008-09-22 05:54:49 +0200 | [diff] [blame] | 1 | /* |
| 2 | * vfsv0 quota IO operations on file |
| 3 | */ |
| 4 | |
| 5 | #include <linux/errno.h> |
| 6 | #include <linux/fs.h> |
| 7 | #include <linux/mount.h> |
| 8 | #include <linux/dqblk_v2.h> |
| 9 | #include <linux/kernel.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/quotaops.h> |
| 14 | |
| 15 | #include <asm/byteorder.h> |
| 16 | |
| 17 | #include "quota_tree.h" |
| 18 | |
| 19 | MODULE_AUTHOR("Jan Kara"); |
| 20 | MODULE_DESCRIPTION("Quota trie support"); |
| 21 | MODULE_LICENSE("GPL"); |
| 22 | |
| 23 | #define __QUOTA_QT_PARANOIA |
| 24 | |
| 25 | typedef char *dqbuf_t; |
| 26 | |
| 27 | static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) |
| 28 | { |
| 29 | unsigned int epb = info->dqi_usable_bs >> 2; |
| 30 | |
| 31 | depth = info->dqi_qtree_depth - depth - 1; |
| 32 | while (depth--) |
| 33 | id /= epb; |
| 34 | return id % epb; |
| 35 | } |
| 36 | |
| 37 | /* Number of entries in one blocks */ |
| 38 | static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) |
| 39 | { |
| 40 | return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) |
| 41 | / info->dqi_entry_size; |
| 42 | } |
| 43 | |
| 44 | static dqbuf_t getdqbuf(size_t size) |
| 45 | { |
| 46 | dqbuf_t buf = kmalloc(size, GFP_NOFS); |
| 47 | if (!buf) |
| 48 | printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); |
| 49 | return buf; |
| 50 | } |
| 51 | |
| 52 | static inline void freedqbuf(dqbuf_t buf) |
| 53 | { |
| 54 | kfree(buf); |
| 55 | } |
| 56 | |
| 57 | static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) |
| 58 | { |
| 59 | struct super_block *sb = info->dqi_sb; |
| 60 | |
| 61 | memset(buf, 0, info->dqi_usable_bs); |
| 62 | return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, |
| 63 | info->dqi_usable_bs, blk << info->dqi_blocksize_bits); |
| 64 | } |
| 65 | |
| 66 | static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) |
| 67 | { |
| 68 | struct super_block *sb = info->dqi_sb; |
| 69 | |
| 70 | return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, |
| 71 | info->dqi_usable_bs, blk << info->dqi_blocksize_bits); |
| 72 | } |
| 73 | |
| 74 | /* Remove empty block from list and return it */ |
| 75 | static int get_free_dqblk(struct qtree_mem_dqinfo *info) |
| 76 | { |
| 77 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); |
| 78 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; |
| 79 | int ret, blk; |
| 80 | |
| 81 | if (!buf) |
| 82 | return -ENOMEM; |
| 83 | if (info->dqi_free_blk) { |
| 84 | blk = info->dqi_free_blk; |
| 85 | ret = read_blk(info, blk, buf); |
| 86 | if (ret < 0) |
| 87 | goto out_buf; |
| 88 | info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); |
| 89 | } |
| 90 | else { |
| 91 | memset(buf, 0, info->dqi_usable_bs); |
| 92 | /* Assure block allocation... */ |
| 93 | ret = write_blk(info, info->dqi_blocks, buf); |
| 94 | if (ret < 0) |
| 95 | goto out_buf; |
| 96 | blk = info->dqi_blocks++; |
| 97 | } |
| 98 | mark_info_dirty(info->dqi_sb, info->dqi_type); |
| 99 | ret = blk; |
| 100 | out_buf: |
| 101 | freedqbuf(buf); |
| 102 | return ret; |
| 103 | } |
| 104 | |
| 105 | /* Insert empty block to the list */ |
| 106 | static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) |
| 107 | { |
| 108 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; |
| 109 | int err; |
| 110 | |
| 111 | dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); |
| 112 | dh->dqdh_prev_free = cpu_to_le32(0); |
| 113 | dh->dqdh_entries = cpu_to_le16(0); |
| 114 | err = write_blk(info, blk, buf); |
| 115 | if (err < 0) |
| 116 | return err; |
| 117 | info->dqi_free_blk = blk; |
| 118 | mark_info_dirty(info->dqi_sb, info->dqi_type); |
| 119 | return 0; |
| 120 | } |
| 121 | |
| 122 | /* Remove given block from the list of blocks with free entries */ |
| 123 | static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) |
| 124 | { |
| 125 | dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); |
| 126 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; |
| 127 | uint nextblk = le32_to_cpu(dh->dqdh_next_free); |
| 128 | uint prevblk = le32_to_cpu(dh->dqdh_prev_free); |
| 129 | int err; |
| 130 | |
| 131 | if (!tmpbuf) |
| 132 | return -ENOMEM; |
| 133 | if (nextblk) { |
| 134 | err = read_blk(info, nextblk, tmpbuf); |
| 135 | if (err < 0) |
| 136 | goto out_buf; |
| 137 | ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = |
| 138 | dh->dqdh_prev_free; |
| 139 | err = write_blk(info, nextblk, tmpbuf); |
| 140 | if (err < 0) |
| 141 | goto out_buf; |
| 142 | } |
| 143 | if (prevblk) { |
| 144 | err = read_blk(info, prevblk, tmpbuf); |
| 145 | if (err < 0) |
| 146 | goto out_buf; |
| 147 | ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = |
| 148 | dh->dqdh_next_free; |
| 149 | err = write_blk(info, prevblk, tmpbuf); |
| 150 | if (err < 0) |
| 151 | goto out_buf; |
| 152 | } else { |
| 153 | info->dqi_free_entry = nextblk; |
| 154 | mark_info_dirty(info->dqi_sb, info->dqi_type); |
| 155 | } |
| 156 | freedqbuf(tmpbuf); |
| 157 | dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); |
| 158 | /* No matter whether write succeeds block is out of list */ |
| 159 | if (write_blk(info, blk, buf) < 0) |
| 160 | printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); |
| 161 | return 0; |
| 162 | out_buf: |
| 163 | freedqbuf(tmpbuf); |
| 164 | return err; |
| 165 | } |
| 166 | |
| 167 | /* Insert given block to the beginning of list with free entries */ |
| 168 | static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) |
| 169 | { |
| 170 | dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); |
| 171 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; |
| 172 | int err; |
| 173 | |
| 174 | if (!tmpbuf) |
| 175 | return -ENOMEM; |
| 176 | dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); |
| 177 | dh->dqdh_prev_free = cpu_to_le32(0); |
| 178 | err = write_blk(info, blk, buf); |
| 179 | if (err < 0) |
| 180 | goto out_buf; |
| 181 | if (info->dqi_free_entry) { |
| 182 | err = read_blk(info, info->dqi_free_entry, tmpbuf); |
| 183 | if (err < 0) |
| 184 | goto out_buf; |
| 185 | ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = |
| 186 | cpu_to_le32(blk); |
| 187 | err = write_blk(info, info->dqi_free_entry, tmpbuf); |
| 188 | if (err < 0) |
| 189 | goto out_buf; |
| 190 | } |
| 191 | freedqbuf(tmpbuf); |
| 192 | info->dqi_free_entry = blk; |
| 193 | mark_info_dirty(info->dqi_sb, info->dqi_type); |
| 194 | return 0; |
| 195 | out_buf: |
| 196 | freedqbuf(tmpbuf); |
| 197 | return err; |
| 198 | } |
| 199 | |
| 200 | /* Is the entry in the block free? */ |
| 201 | int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) |
| 202 | { |
| 203 | int i; |
| 204 | |
| 205 | for (i = 0; i < info->dqi_entry_size; i++) |
| 206 | if (disk[i]) |
| 207 | return 0; |
| 208 | return 1; |
| 209 | } |
| 210 | EXPORT_SYMBOL(qtree_entry_unused); |
| 211 | |
| 212 | /* Find space for dquot */ |
| 213 | static uint find_free_dqentry(struct qtree_mem_dqinfo *info, |
| 214 | struct dquot *dquot, int *err) |
| 215 | { |
| 216 | uint blk, i; |
| 217 | struct qt_disk_dqdbheader *dh; |
| 218 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); |
| 219 | char *ddquot; |
| 220 | |
| 221 | *err = 0; |
| 222 | if (!buf) { |
| 223 | *err = -ENOMEM; |
| 224 | return 0; |
| 225 | } |
| 226 | dh = (struct qt_disk_dqdbheader *)buf; |
| 227 | if (info->dqi_free_entry) { |
| 228 | blk = info->dqi_free_entry; |
| 229 | *err = read_blk(info, blk, buf); |
| 230 | if (*err < 0) |
| 231 | goto out_buf; |
| 232 | } else { |
| 233 | blk = get_free_dqblk(info); |
| 234 | if ((int)blk < 0) { |
| 235 | *err = blk; |
| 236 | freedqbuf(buf); |
| 237 | return 0; |
| 238 | } |
| 239 | memset(buf, 0, info->dqi_usable_bs); |
| 240 | /* This is enough as block is already zeroed and entry list is empty... */ |
| 241 | info->dqi_free_entry = blk; |
| 242 | mark_info_dirty(dquot->dq_sb, dquot->dq_type); |
| 243 | } |
| 244 | /* Block will be full? */ |
| 245 | if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { |
| 246 | *err = remove_free_dqentry(info, buf, blk); |
| 247 | if (*err < 0) { |
| 248 | printk(KERN_ERR "VFS: find_free_dqentry(): Can't " |
| 249 | "remove block (%u) from entry free list.\n", |
| 250 | blk); |
| 251 | goto out_buf; |
| 252 | } |
| 253 | } |
| 254 | le16_add_cpu(&dh->dqdh_entries, 1); |
| 255 | /* Find free structure in block */ |
| 256 | for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); |
| 257 | i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); |
| 258 | i++, ddquot += info->dqi_entry_size); |
| 259 | #ifdef __QUOTA_QT_PARANOIA |
| 260 | if (i == qtree_dqstr_in_blk(info)) { |
| 261 | printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " |
| 262 | "but it shouldn't.\n"); |
| 263 | *err = -EIO; |
| 264 | goto out_buf; |
| 265 | } |
| 266 | #endif |
| 267 | *err = write_blk(info, blk, buf); |
| 268 | if (*err < 0) { |
| 269 | printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota " |
| 270 | "data block %u.\n", blk); |
| 271 | goto out_buf; |
| 272 | } |
| 273 | dquot->dq_off = (blk << info->dqi_blocksize_bits) + |
| 274 | sizeof(struct qt_disk_dqdbheader) + |
| 275 | i * info->dqi_entry_size; |
| 276 | freedqbuf(buf); |
| 277 | return blk; |
| 278 | out_buf: |
| 279 | freedqbuf(buf); |
| 280 | return 0; |
| 281 | } |
| 282 | |
| 283 | /* Insert reference to structure into the trie */ |
| 284 | static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, |
| 285 | uint *treeblk, int depth) |
| 286 | { |
| 287 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); |
| 288 | int ret = 0, newson = 0, newact = 0; |
| 289 | __le32 *ref; |
| 290 | uint newblk; |
| 291 | |
| 292 | if (!buf) |
| 293 | return -ENOMEM; |
| 294 | if (!*treeblk) { |
| 295 | ret = get_free_dqblk(info); |
| 296 | if (ret < 0) |
| 297 | goto out_buf; |
| 298 | *treeblk = ret; |
| 299 | memset(buf, 0, info->dqi_usable_bs); |
| 300 | newact = 1; |
| 301 | } else { |
| 302 | ret = read_blk(info, *treeblk, buf); |
| 303 | if (ret < 0) { |
| 304 | printk(KERN_ERR "VFS: Can't read tree quota block " |
| 305 | "%u.\n", *treeblk); |
| 306 | goto out_buf; |
| 307 | } |
| 308 | } |
| 309 | ref = (__le32 *)buf; |
| 310 | newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); |
| 311 | if (!newblk) |
| 312 | newson = 1; |
| 313 | if (depth == info->dqi_qtree_depth - 1) { |
| 314 | #ifdef __QUOTA_QT_PARANOIA |
| 315 | if (newblk) { |
| 316 | printk(KERN_ERR "VFS: Inserting already present quota " |
| 317 | "entry (block %u).\n", |
| 318 | le32_to_cpu(ref[get_index(info, |
| 319 | dquot->dq_id, depth)])); |
| 320 | ret = -EIO; |
| 321 | goto out_buf; |
| 322 | } |
| 323 | #endif |
| 324 | newblk = find_free_dqentry(info, dquot, &ret); |
| 325 | } else { |
| 326 | ret = do_insert_tree(info, dquot, &newblk, depth+1); |
| 327 | } |
| 328 | if (newson && ret >= 0) { |
| 329 | ref[get_index(info, dquot->dq_id, depth)] = |
| 330 | cpu_to_le32(newblk); |
| 331 | ret = write_blk(info, *treeblk, buf); |
| 332 | } else if (newact && ret < 0) { |
| 333 | put_free_dqblk(info, buf, *treeblk); |
| 334 | } |
| 335 | out_buf: |
| 336 | freedqbuf(buf); |
| 337 | return ret; |
| 338 | } |
| 339 | |
| 340 | /* Wrapper for inserting quota structure into tree */ |
| 341 | static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, |
| 342 | struct dquot *dquot) |
| 343 | { |
| 344 | int tmp = QT_TREEOFF; |
| 345 | return do_insert_tree(info, dquot, &tmp, 0); |
| 346 | } |
| 347 | |
| 348 | /* |
| 349 | * We don't have to be afraid of deadlocks as we never have quotas on quota files... |
| 350 | */ |
| 351 | int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) |
| 352 | { |
| 353 | int type = dquot->dq_type; |
| 354 | struct super_block *sb = dquot->dq_sb; |
| 355 | ssize_t ret; |
| 356 | dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); |
| 357 | |
| 358 | if (!ddquot) |
| 359 | return -ENOMEM; |
| 360 | |
| 361 | /* dq_off is guarded by dqio_mutex */ |
| 362 | if (!dquot->dq_off) { |
| 363 | ret = dq_insert_tree(info, dquot); |
| 364 | if (ret < 0) { |
| 365 | printk(KERN_ERR "VFS: Error %zd occurred while " |
| 366 | "creating quota.\n", ret); |
| 367 | freedqbuf(ddquot); |
| 368 | return ret; |
| 369 | } |
| 370 | } |
| 371 | spin_lock(&dq_data_lock); |
| 372 | info->dqi_ops->mem2disk_dqblk(ddquot, dquot); |
| 373 | spin_unlock(&dq_data_lock); |
| 374 | ret = sb->s_op->quota_write(sb, type, (char *)ddquot, |
| 375 | info->dqi_entry_size, dquot->dq_off); |
| 376 | if (ret != info->dqi_entry_size) { |
| 377 | printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", |
| 378 | sb->s_id); |
| 379 | if (ret >= 0) |
| 380 | ret = -ENOSPC; |
| 381 | } else { |
| 382 | ret = 0; |
| 383 | } |
| 384 | dqstats.writes++; |
| 385 | freedqbuf(ddquot); |
| 386 | |
| 387 | return ret; |
| 388 | } |
| 389 | EXPORT_SYMBOL(qtree_write_dquot); |
| 390 | |
| 391 | /* Free dquot entry in data block */ |
| 392 | static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, |
| 393 | uint blk) |
| 394 | { |
| 395 | struct qt_disk_dqdbheader *dh; |
| 396 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); |
| 397 | int ret = 0; |
| 398 | |
| 399 | if (!buf) |
| 400 | return -ENOMEM; |
| 401 | if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { |
| 402 | printk(KERN_ERR "VFS: Quota structure has offset to other " |
| 403 | "block (%u) than it should (%u).\n", blk, |
| 404 | (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); |
| 405 | goto out_buf; |
| 406 | } |
| 407 | ret = read_blk(info, blk, buf); |
| 408 | if (ret < 0) { |
| 409 | printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); |
| 410 | goto out_buf; |
| 411 | } |
| 412 | dh = (struct qt_disk_dqdbheader *)buf; |
| 413 | le16_add_cpu(&dh->dqdh_entries, -1); |
| 414 | if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ |
| 415 | ret = remove_free_dqentry(info, buf, blk); |
| 416 | if (ret >= 0) |
| 417 | ret = put_free_dqblk(info, buf, blk); |
| 418 | if (ret < 0) { |
| 419 | printk(KERN_ERR "VFS: Can't move quota data block (%u) " |
| 420 | "to free list.\n", blk); |
| 421 | goto out_buf; |
| 422 | } |
| 423 | } else { |
| 424 | memset(buf + |
| 425 | (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), |
| 426 | 0, info->dqi_entry_size); |
| 427 | if (le16_to_cpu(dh->dqdh_entries) == |
| 428 | qtree_dqstr_in_blk(info) - 1) { |
| 429 | /* Insert will write block itself */ |
| 430 | ret = insert_free_dqentry(info, buf, blk); |
| 431 | if (ret < 0) { |
| 432 | printk(KERN_ERR "VFS: Can't insert quota data " |
| 433 | "block (%u) to free entry list.\n", blk); |
| 434 | goto out_buf; |
| 435 | } |
| 436 | } else { |
| 437 | ret = write_blk(info, blk, buf); |
| 438 | if (ret < 0) { |
| 439 | printk(KERN_ERR "VFS: Can't write quota data " |
| 440 | "block %u\n", blk); |
| 441 | goto out_buf; |
| 442 | } |
| 443 | } |
| 444 | } |
| 445 | dquot->dq_off = 0; /* Quota is now unattached */ |
| 446 | out_buf: |
| 447 | freedqbuf(buf); |
| 448 | return ret; |
| 449 | } |
| 450 | |
| 451 | /* Remove reference to dquot from tree */ |
| 452 | static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, |
| 453 | uint *blk, int depth) |
| 454 | { |
| 455 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); |
| 456 | int ret = 0; |
| 457 | uint newblk; |
| 458 | __le32 *ref = (__le32 *)buf; |
| 459 | |
| 460 | if (!buf) |
| 461 | return -ENOMEM; |
| 462 | ret = read_blk(info, *blk, buf); |
| 463 | if (ret < 0) { |
| 464 | printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); |
| 465 | goto out_buf; |
| 466 | } |
| 467 | newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); |
| 468 | if (depth == info->dqi_qtree_depth - 1) { |
| 469 | ret = free_dqentry(info, dquot, newblk); |
| 470 | newblk = 0; |
| 471 | } else { |
| 472 | ret = remove_tree(info, dquot, &newblk, depth+1); |
| 473 | } |
| 474 | if (ret >= 0 && !newblk) { |
| 475 | int i; |
| 476 | ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); |
| 477 | /* Block got empty? */ |
| 478 | for (i = 0; |
| 479 | i < (info->dqi_usable_bs >> 2) && !ref[i]; |
| 480 | i++); |
| 481 | /* Don't put the root block into the free block list */ |
| 482 | if (i == (info->dqi_usable_bs >> 2) |
| 483 | && *blk != QT_TREEOFF) { |
| 484 | put_free_dqblk(info, buf, *blk); |
| 485 | *blk = 0; |
| 486 | } else { |
| 487 | ret = write_blk(info, *blk, buf); |
| 488 | if (ret < 0) |
| 489 | printk(KERN_ERR "VFS: Can't write quota tree " |
| 490 | "block %u.\n", *blk); |
| 491 | } |
| 492 | } |
| 493 | out_buf: |
| 494 | freedqbuf(buf); |
| 495 | return ret; |
| 496 | } |
| 497 | |
| 498 | /* Delete dquot from tree */ |
| 499 | int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) |
| 500 | { |
| 501 | uint tmp = QT_TREEOFF; |
| 502 | |
| 503 | if (!dquot->dq_off) /* Even not allocated? */ |
| 504 | return 0; |
| 505 | return remove_tree(info, dquot, &tmp, 0); |
| 506 | } |
| 507 | EXPORT_SYMBOL(qtree_delete_dquot); |
| 508 | |
| 509 | /* Find entry in block */ |
| 510 | static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, |
| 511 | struct dquot *dquot, uint blk) |
| 512 | { |
| 513 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); |
| 514 | loff_t ret = 0; |
| 515 | int i; |
| 516 | char *ddquot; |
| 517 | |
| 518 | if (!buf) |
| 519 | return -ENOMEM; |
| 520 | ret = read_blk(info, blk, buf); |
| 521 | if (ret < 0) { |
| 522 | printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); |
| 523 | goto out_buf; |
| 524 | } |
| 525 | for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); |
| 526 | i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); |
| 527 | i++, ddquot += info->dqi_entry_size); |
| 528 | if (i == qtree_dqstr_in_blk(info)) { |
| 529 | printk(KERN_ERR "VFS: Quota for id %u referenced " |
| 530 | "but not present.\n", dquot->dq_id); |
| 531 | ret = -EIO; |
| 532 | goto out_buf; |
| 533 | } else { |
| 534 | ret = (blk << info->dqi_blocksize_bits) + sizeof(struct |
| 535 | qt_disk_dqdbheader) + i * info->dqi_entry_size; |
| 536 | } |
| 537 | out_buf: |
| 538 | freedqbuf(buf); |
| 539 | return ret; |
| 540 | } |
| 541 | |
| 542 | /* Find entry for given id in the tree */ |
| 543 | static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, |
| 544 | struct dquot *dquot, uint blk, int depth) |
| 545 | { |
| 546 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); |
| 547 | loff_t ret = 0; |
| 548 | __le32 *ref = (__le32 *)buf; |
| 549 | |
| 550 | if (!buf) |
| 551 | return -ENOMEM; |
| 552 | ret = read_blk(info, blk, buf); |
| 553 | if (ret < 0) { |
| 554 | printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); |
| 555 | goto out_buf; |
| 556 | } |
| 557 | ret = 0; |
| 558 | blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); |
| 559 | if (!blk) /* No reference? */ |
| 560 | goto out_buf; |
| 561 | if (depth < info->dqi_qtree_depth - 1) |
| 562 | ret = find_tree_dqentry(info, dquot, blk, depth+1); |
| 563 | else |
| 564 | ret = find_block_dqentry(info, dquot, blk); |
| 565 | out_buf: |
| 566 | freedqbuf(buf); |
| 567 | return ret; |
| 568 | } |
| 569 | |
| 570 | /* Find entry for given id in the tree - wrapper function */ |
| 571 | static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, |
| 572 | struct dquot *dquot) |
| 573 | { |
| 574 | return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); |
| 575 | } |
| 576 | |
| 577 | int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) |
| 578 | { |
| 579 | int type = dquot->dq_type; |
| 580 | struct super_block *sb = dquot->dq_sb; |
| 581 | loff_t offset; |
| 582 | dqbuf_t ddquot; |
| 583 | int ret = 0; |
| 584 | |
| 585 | #ifdef __QUOTA_QT_PARANOIA |
| 586 | /* Invalidated quota? */ |
| 587 | if (!sb_dqopt(dquot->dq_sb)->files[type]) { |
| 588 | printk(KERN_ERR "VFS: Quota invalidated while reading!\n"); |
| 589 | return -EIO; |
| 590 | } |
| 591 | #endif |
| 592 | /* Do we know offset of the dquot entry in the quota file? */ |
| 593 | if (!dquot->dq_off) { |
| 594 | offset = find_dqentry(info, dquot); |
| 595 | if (offset <= 0) { /* Entry not present? */ |
| 596 | if (offset < 0) |
| 597 | printk(KERN_ERR "VFS: Can't read quota " |
| 598 | "structure for id %u.\n", dquot->dq_id); |
| 599 | dquot->dq_off = 0; |
| 600 | set_bit(DQ_FAKE_B, &dquot->dq_flags); |
| 601 | memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); |
| 602 | ret = offset; |
| 603 | goto out; |
| 604 | } |
| 605 | dquot->dq_off = offset; |
| 606 | } |
| 607 | ddquot = getdqbuf(info->dqi_entry_size); |
| 608 | if (!ddquot) |
| 609 | return -ENOMEM; |
| 610 | ret = sb->s_op->quota_read(sb, type, (char *)ddquot, |
| 611 | info->dqi_entry_size, dquot->dq_off); |
| 612 | if (ret != info->dqi_entry_size) { |
| 613 | if (ret >= 0) |
| 614 | ret = -EIO; |
| 615 | printk(KERN_ERR "VFS: Error while reading quota " |
| 616 | "structure for id %u.\n", dquot->dq_id); |
| 617 | set_bit(DQ_FAKE_B, &dquot->dq_flags); |
| 618 | memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); |
| 619 | freedqbuf(ddquot); |
| 620 | goto out; |
| 621 | } |
| 622 | spin_lock(&dq_data_lock); |
| 623 | info->dqi_ops->disk2mem_dqblk(dquot, ddquot); |
| 624 | if (!dquot->dq_dqb.dqb_bhardlimit && |
| 625 | !dquot->dq_dqb.dqb_bsoftlimit && |
| 626 | !dquot->dq_dqb.dqb_ihardlimit && |
| 627 | !dquot->dq_dqb.dqb_isoftlimit) |
| 628 | set_bit(DQ_FAKE_B, &dquot->dq_flags); |
| 629 | spin_unlock(&dq_data_lock); |
| 630 | freedqbuf(ddquot); |
| 631 | out: |
| 632 | dqstats.reads++; |
| 633 | return ret; |
| 634 | } |
| 635 | EXPORT_SYMBOL(qtree_read_dquot); |
| 636 | |
| 637 | /* Check whether dquot should not be deleted. We know we are |
| 638 | * the only one operating on dquot (thanks to dq_lock) */ |
| 639 | int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) |
| 640 | { |
| 641 | if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) |
| 642 | return qtree_delete_dquot(info, dquot); |
| 643 | return 0; |
| 644 | } |
| 645 | EXPORT_SYMBOL(qtree_release_dquot); |