Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/fs/inode.c |
| 3 | * |
| 4 | * (C) 1997 Linus Torvalds |
| 5 | */ |
| 6 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/fs.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/dcache.h> |
| 10 | #include <linux/init.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/slab.h> |
| 12 | #include <linux/writeback.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/backing-dev.h> |
| 15 | #include <linux/wait.h> |
Nick Piggin | 88e0fbc | 2009-09-22 16:43:50 -0700 | [diff] [blame] | 16 | #include <linux/rwsem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/hash.h> |
| 18 | #include <linux/swap.h> |
| 19 | #include <linux/security.h> |
| 20 | #include <linux/pagemap.h> |
| 21 | #include <linux/cdev.h> |
| 22 | #include <linux/bootmem.h> |
Eric Paris | 3be25f4 | 2009-05-21 17:01:26 -0400 | [diff] [blame] | 23 | #include <linux/fsnotify.h> |
Christoph Hellwig | fc33a7b | 2006-01-09 20:52:17 -0800 | [diff] [blame] | 24 | #include <linux/mount.h> |
Arjan van de Ven | efaee19 | 2009-01-06 07:20:54 -0800 | [diff] [blame] | 25 | #include <linux/async.h> |
Al Viro | f19d4a8 | 2009-06-08 19:50:45 -0400 | [diff] [blame] | 26 | #include <linux/posix_acl.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * This is needed for the following functions: |
| 30 | * - inode_has_buffers |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | * - invalidate_bdev |
| 32 | * |
| 33 | * FIXME: remove all knowledge of the buffer layer from this file |
| 34 | */ |
| 35 | #include <linux/buffer_head.h> |
| 36 | |
| 37 | /* |
| 38 | * New inode.c implementation. |
| 39 | * |
| 40 | * This implementation has the basic premise of trying |
| 41 | * to be extremely low-overhead and SMP-safe, yet be |
| 42 | * simple enough to be "obviously correct". |
| 43 | * |
| 44 | * Famous last words. |
| 45 | */ |
| 46 | |
| 47 | /* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */ |
| 48 | |
| 49 | /* #define INODE_PARANOIA 1 */ |
| 50 | /* #define INODE_DEBUG 1 */ |
| 51 | |
| 52 | /* |
| 53 | * Inode lookup is no longer as critical as it used to be: |
| 54 | * most of the lookups are going to be through the dcache. |
| 55 | */ |
| 56 | #define I_HASHBITS i_hash_shift |
| 57 | #define I_HASHMASK i_hash_mask |
| 58 | |
Eric Dumazet | fa3536c | 2006-03-26 01:37:24 -0800 | [diff] [blame] | 59 | static unsigned int i_hash_mask __read_mostly; |
| 60 | static unsigned int i_hash_shift __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
| 62 | /* |
| 63 | * Each inode can be on two separate lists. One is |
| 64 | * the hash list of the inode, used for lookups. The |
| 65 | * other linked list is the "type" list: |
| 66 | * "in_use" - valid inode, i_count > 0, i_nlink > 0 |
| 67 | * "dirty" - as "in_use" but also dirty |
| 68 | * "unused" - valid inode, i_count = 0 |
| 69 | * |
| 70 | * A "dirty" list is maintained for each super block, |
| 71 | * allowing for low-overhead inode sync() operations. |
| 72 | */ |
| 73 | |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 74 | static LIST_HEAD(inode_unused); |
Eric Dumazet | fa3536c | 2006-03-26 01:37:24 -0800 | [diff] [blame] | 75 | static struct hlist_head *inode_hashtable __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
| 77 | /* |
| 78 | * A simple spinlock to protect the list manipulations. |
| 79 | * |
| 80 | * NOTE! You also have to own the lock if you change |
| 81 | * the i_state of an inode while it is in use.. |
| 82 | */ |
| 83 | DEFINE_SPINLOCK(inode_lock); |
| 84 | |
| 85 | /* |
Nick Piggin | 88e0fbc | 2009-09-22 16:43:50 -0700 | [diff] [blame] | 86 | * iprune_sem provides exclusion between the kswapd or try_to_free_pages |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | * icache shrinking path, and the umount path. Without this exclusion, |
| 88 | * by the time prune_icache calls iput for the inode whose pages it has |
| 89 | * been invalidating, or by the time it calls clear_inode & destroy_inode |
| 90 | * from its final dispose_list, the struct super_block they refer to |
| 91 | * (for inode->i_sb->s_op) may already have been freed and reused. |
Nick Piggin | 88e0fbc | 2009-09-22 16:43:50 -0700 | [diff] [blame] | 92 | * |
| 93 | * We make this an rwsem because the fastpath is icache shrinking. In |
| 94 | * some cases a filesystem may be doing a significant amount of work in |
| 95 | * its inode reclaim code, so this should improve parallelism. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | */ |
Nick Piggin | 88e0fbc | 2009-09-22 16:43:50 -0700 | [diff] [blame] | 97 | static DECLARE_RWSEM(iprune_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | |
| 99 | /* |
| 100 | * Statistics gathering.. |
| 101 | */ |
| 102 | struct inodes_stat_t inodes_stat; |
| 103 | |
Dave Chinner | cffbc8a | 2010-10-23 05:03:02 -0400 | [diff] [blame] | 104 | static struct percpu_counter nr_inodes __cacheline_aligned_in_smp; |
| 105 | static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp; |
| 106 | |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 107 | static struct kmem_cache *inode_cachep __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
Dave Chinner | cffbc8a | 2010-10-23 05:03:02 -0400 | [diff] [blame] | 109 | static inline int get_nr_inodes(void) |
| 110 | { |
| 111 | return percpu_counter_sum_positive(&nr_inodes); |
| 112 | } |
| 113 | |
| 114 | static inline int get_nr_inodes_unused(void) |
| 115 | { |
| 116 | return percpu_counter_sum_positive(&nr_inodes_unused); |
| 117 | } |
| 118 | |
| 119 | int get_nr_dirty_inodes(void) |
| 120 | { |
| 121 | int nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); |
| 122 | return nr_dirty > 0 ? nr_dirty : 0; |
| 123 | |
| 124 | } |
| 125 | |
| 126 | /* |
| 127 | * Handle nr_inode sysctl |
| 128 | */ |
| 129 | #ifdef CONFIG_SYSCTL |
| 130 | int proc_nr_inodes(ctl_table *table, int write, |
| 131 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 132 | { |
| 133 | inodes_stat.nr_inodes = get_nr_inodes(); |
| 134 | inodes_stat.nr_unused = get_nr_inodes_unused(); |
| 135 | return proc_dointvec(table, write, buffer, lenp, ppos); |
| 136 | } |
| 137 | #endif |
| 138 | |
Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 139 | static void wake_up_inode(struct inode *inode) |
| 140 | { |
| 141 | /* |
| 142 | * Prevent speculative execution through spin_unlock(&inode_lock); |
| 143 | */ |
| 144 | smp_mb(); |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 145 | wake_up_bit(&inode->i_state, __I_NEW); |
Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 146 | } |
| 147 | |
David Chinner | 2cb1599 | 2008-10-30 17:32:23 +1100 | [diff] [blame] | 148 | /** |
| 149 | * inode_init_always - perform inode structure intialisation |
Randy Dunlap | 0bc02f3 | 2009-01-06 14:41:13 -0800 | [diff] [blame] | 150 | * @sb: superblock inode belongs to |
| 151 | * @inode: inode to initialise |
David Chinner | 2cb1599 | 2008-10-30 17:32:23 +1100 | [diff] [blame] | 152 | * |
| 153 | * These are initializations that need to be done on every inode |
| 154 | * allocation as the fields are not initialised by slab allocation. |
| 155 | */ |
Christoph Hellwig | 54e3462 | 2009-08-07 14:38:25 -0300 | [diff] [blame] | 156 | int inode_init_always(struct super_block *sb, struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | { |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 158 | static const struct address_space_operations empty_aops; |
Alexey Dobriyan | 6e1d5dc | 2009-09-21 17:01:11 -0700 | [diff] [blame] | 159 | static const struct inode_operations empty_iops; |
Arjan van de Ven | 99ac48f | 2006-03-28 01:56:41 -0800 | [diff] [blame] | 160 | static const struct file_operations empty_fops; |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 161 | struct address_space *const mapping = &inode->i_data; |
David Chinner | 2cb1599 | 2008-10-30 17:32:23 +1100 | [diff] [blame] | 162 | |
| 163 | inode->i_sb = sb; |
| 164 | inode->i_blkbits = sb->s_blocksize_bits; |
| 165 | inode->i_flags = 0; |
| 166 | atomic_set(&inode->i_count, 1); |
| 167 | inode->i_op = &empty_iops; |
| 168 | inode->i_fop = &empty_fops; |
| 169 | inode->i_nlink = 1; |
Al Viro | 56ff5ef | 2008-12-09 09:34:39 -0500 | [diff] [blame] | 170 | inode->i_uid = 0; |
| 171 | inode->i_gid = 0; |
David Chinner | 2cb1599 | 2008-10-30 17:32:23 +1100 | [diff] [blame] | 172 | atomic_set(&inode->i_writecount, 0); |
| 173 | inode->i_size = 0; |
| 174 | inode->i_blocks = 0; |
| 175 | inode->i_bytes = 0; |
| 176 | inode->i_generation = 0; |
| 177 | #ifdef CONFIG_QUOTA |
| 178 | memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); |
| 179 | #endif |
| 180 | inode->i_pipe = NULL; |
| 181 | inode->i_bdev = NULL; |
| 182 | inode->i_cdev = NULL; |
| 183 | inode->i_rdev = 0; |
| 184 | inode->dirtied_when = 0; |
Mimi Zohar | 6146f0d | 2009-02-04 09:06:57 -0500 | [diff] [blame] | 185 | |
| 186 | if (security_inode_alloc(inode)) |
Christoph Hellwig | 54e3462 | 2009-08-07 14:38:25 -0300 | [diff] [blame] | 187 | goto out; |
David Chinner | 2cb1599 | 2008-10-30 17:32:23 +1100 | [diff] [blame] | 188 | spin_lock_init(&inode->i_lock); |
| 189 | lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); |
| 190 | |
| 191 | mutex_init(&inode->i_mutex); |
| 192 | lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); |
| 193 | |
| 194 | init_rwsem(&inode->i_alloc_sem); |
| 195 | lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); |
| 196 | |
| 197 | mapping->a_ops = &empty_aops; |
| 198 | mapping->host = inode; |
| 199 | mapping->flags = 0; |
Hugh Dickins | 3c1d437 | 2009-01-06 14:39:23 -0800 | [diff] [blame] | 200 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); |
David Chinner | 2cb1599 | 2008-10-30 17:32:23 +1100 | [diff] [blame] | 201 | mapping->assoc_mapping = NULL; |
| 202 | mapping->backing_dev_info = &default_backing_dev_info; |
| 203 | mapping->writeback_index = 0; |
| 204 | |
| 205 | /* |
| 206 | * If the block_device provides a backing_dev_info for client |
| 207 | * inodes then use that. Otherwise the inode share the bdev's |
| 208 | * backing_dev_info. |
| 209 | */ |
| 210 | if (sb->s_bdev) { |
| 211 | struct backing_dev_info *bdi; |
| 212 | |
Jens Axboe | 2c96ce9 | 2009-09-15 09:43:56 +0200 | [diff] [blame] | 213 | bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; |
David Chinner | 2cb1599 | 2008-10-30 17:32:23 +1100 | [diff] [blame] | 214 | mapping->backing_dev_info = bdi; |
| 215 | } |
| 216 | inode->i_private = NULL; |
| 217 | inode->i_mapping = mapping; |
Al Viro | f19d4a8 | 2009-06-08 19:50:45 -0400 | [diff] [blame] | 218 | #ifdef CONFIG_FS_POSIX_ACL |
| 219 | inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; |
| 220 | #endif |
David Chinner | 2cb1599 | 2008-10-30 17:32:23 +1100 | [diff] [blame] | 221 | |
Eric Paris | 3be25f4 | 2009-05-21 17:01:26 -0400 | [diff] [blame] | 222 | #ifdef CONFIG_FSNOTIFY |
| 223 | inode->i_fsnotify_mask = 0; |
| 224 | #endif |
| 225 | |
Dave Chinner | cffbc8a | 2010-10-23 05:03:02 -0400 | [diff] [blame] | 226 | percpu_counter_inc(&nr_inodes); |
| 227 | |
Christoph Hellwig | 54e3462 | 2009-08-07 14:38:25 -0300 | [diff] [blame] | 228 | return 0; |
Christoph Hellwig | 54e3462 | 2009-08-07 14:38:25 -0300 | [diff] [blame] | 229 | out: |
| 230 | return -ENOMEM; |
David Chinner | 2cb1599 | 2008-10-30 17:32:23 +1100 | [diff] [blame] | 231 | } |
| 232 | EXPORT_SYMBOL(inode_init_always); |
| 233 | |
| 234 | static struct inode *alloc_inode(struct super_block *sb) |
| 235 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | struct inode *inode; |
| 237 | |
| 238 | if (sb->s_op->alloc_inode) |
| 239 | inode = sb->s_op->alloc_inode(sb); |
| 240 | else |
David Chinner | 2cb1599 | 2008-10-30 17:32:23 +1100 | [diff] [blame] | 241 | inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | |
Christoph Hellwig | 54e3462 | 2009-08-07 14:38:25 -0300 | [diff] [blame] | 243 | if (!inode) |
| 244 | return NULL; |
| 245 | |
| 246 | if (unlikely(inode_init_always(sb, inode))) { |
| 247 | if (inode->i_sb->s_op->destroy_inode) |
| 248 | inode->i_sb->s_op->destroy_inode(inode); |
| 249 | else |
| 250 | kmem_cache_free(inode_cachep, inode); |
| 251 | return NULL; |
| 252 | } |
| 253 | |
| 254 | return inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | } |
| 256 | |
Christoph Hellwig | 2e00c97 | 2009-08-07 14:38:29 -0300 | [diff] [blame] | 257 | void __destroy_inode(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | { |
Eric Sesterhenn | b7542f8 | 2006-04-02 13:38:18 +0200 | [diff] [blame] | 259 | BUG_ON(inode_has_buffers(inode)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | security_inode_free(inode); |
Eric Paris | 3be25f4 | 2009-05-21 17:01:26 -0400 | [diff] [blame] | 261 | fsnotify_inode_delete(inode); |
Al Viro | f19d4a8 | 2009-06-08 19:50:45 -0400 | [diff] [blame] | 262 | #ifdef CONFIG_FS_POSIX_ACL |
| 263 | if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED) |
| 264 | posix_acl_release(inode->i_acl); |
| 265 | if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) |
| 266 | posix_acl_release(inode->i_default_acl); |
| 267 | #endif |
Dave Chinner | cffbc8a | 2010-10-23 05:03:02 -0400 | [diff] [blame] | 268 | percpu_counter_dec(&nr_inodes); |
Christoph Hellwig | 2e00c97 | 2009-08-07 14:38:29 -0300 | [diff] [blame] | 269 | } |
| 270 | EXPORT_SYMBOL(__destroy_inode); |
| 271 | |
Christoph Hellwig | 56b0dac | 2010-10-06 10:48:55 +0200 | [diff] [blame] | 272 | static void destroy_inode(struct inode *inode) |
Christoph Hellwig | 2e00c97 | 2009-08-07 14:38:29 -0300 | [diff] [blame] | 273 | { |
| 274 | __destroy_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | if (inode->i_sb->s_op->destroy_inode) |
| 276 | inode->i_sb->s_op->destroy_inode(inode); |
| 277 | else |
| 278 | kmem_cache_free(inode_cachep, (inode)); |
| 279 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | |
| 281 | /* |
| 282 | * These are initializations that only need to be done |
| 283 | * once, because the fields are idempotent across use |
| 284 | * of the inode, so let the slab aware of that. |
| 285 | */ |
| 286 | void inode_init_once(struct inode *inode) |
| 287 | { |
| 288 | memset(inode, 0, sizeof(*inode)); |
| 289 | INIT_HLIST_NODE(&inode->i_hash); |
| 290 | INIT_LIST_HEAD(&inode->i_dentry); |
| 291 | INIT_LIST_HEAD(&inode->i_devices); |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 292 | INIT_LIST_HEAD(&inode->i_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); |
Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 294 | spin_lock_init(&inode->i_data.tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | spin_lock_init(&inode->i_data.i_mmap_lock); |
| 296 | INIT_LIST_HEAD(&inode->i_data.private_list); |
| 297 | spin_lock_init(&inode->i_data.private_lock); |
| 298 | INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); |
| 299 | INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | i_size_ordered_init(inode); |
Eric Paris | 3be25f4 | 2009-05-21 17:01:26 -0400 | [diff] [blame] | 301 | #ifdef CONFIG_FSNOTIFY |
Eric Paris | e61ce86 | 2009-12-17 21:24:24 -0500 | [diff] [blame] | 302 | INIT_HLIST_HEAD(&inode->i_fsnotify_marks); |
Eric Paris | 3be25f4 | 2009-05-21 17:01:26 -0400 | [diff] [blame] | 303 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | EXPORT_SYMBOL(inode_init_once); |
| 306 | |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 307 | static void init_once(void *foo) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | { |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 309 | struct inode *inode = (struct inode *) foo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | |
Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 311 | inode_init_once(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | /* |
| 315 | * inode_lock must be held |
| 316 | */ |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 317 | void __iget(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | { |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 319 | atomic_inc(&inode->i_count); |
| 320 | } |
Richard Kennedy | 2e147f1 | 2010-05-14 10:49:22 +0100 | [diff] [blame] | 321 | |
Al Viro | 7de9c6e | 2010-10-23 11:11:40 -0400 | [diff] [blame] | 322 | /* |
| 323 | * get additional reference to inode; caller must already hold one. |
| 324 | */ |
| 325 | void ihold(struct inode *inode) |
| 326 | { |
| 327 | WARN_ON(atomic_inc_return(&inode->i_count) < 2); |
| 328 | } |
| 329 | EXPORT_SYMBOL(ihold); |
| 330 | |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 331 | static void inode_lru_list_add(struct inode *inode) |
| 332 | { |
| 333 | if (list_empty(&inode->i_list)) { |
| 334 | list_add(&inode->i_list, &inode_unused); |
| 335 | percpu_counter_inc(&nr_inodes_unused); |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | static void inode_lru_list_del(struct inode *inode) |
| 340 | { |
| 341 | if (!list_empty(&inode->i_list)) { |
| 342 | list_del_init(&inode->i_list); |
| 343 | percpu_counter_dec(&nr_inodes_unused); |
| 344 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | } |
| 346 | |
Christoph Hellwig | 646ec46 | 2010-10-23 07:15:32 -0400 | [diff] [blame] | 347 | static inline void __inode_sb_list_add(struct inode *inode) |
| 348 | { |
| 349 | list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); |
| 350 | } |
| 351 | |
| 352 | /** |
| 353 | * inode_sb_list_add - add inode to the superblock list of inodes |
| 354 | * @inode: inode to add |
| 355 | */ |
| 356 | void inode_sb_list_add(struct inode *inode) |
| 357 | { |
| 358 | spin_lock(&inode_lock); |
| 359 | __inode_sb_list_add(inode); |
| 360 | spin_unlock(&inode_lock); |
| 361 | } |
| 362 | EXPORT_SYMBOL_GPL(inode_sb_list_add); |
| 363 | |
| 364 | static inline void __inode_sb_list_del(struct inode *inode) |
| 365 | { |
| 366 | list_del_init(&inode->i_sb_list); |
| 367 | } |
| 368 | |
Dave Chinner | 4c51acb | 2010-10-23 06:58:09 -0400 | [diff] [blame] | 369 | static unsigned long hash(struct super_block *sb, unsigned long hashval) |
| 370 | { |
| 371 | unsigned long tmp; |
| 372 | |
| 373 | tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / |
| 374 | L1_CACHE_BYTES; |
| 375 | tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); |
| 376 | return tmp & I_HASHMASK; |
| 377 | } |
| 378 | |
| 379 | /** |
| 380 | * __insert_inode_hash - hash an inode |
| 381 | * @inode: unhashed inode |
| 382 | * @hashval: unsigned long value used to locate this object in the |
| 383 | * inode_hashtable. |
| 384 | * |
| 385 | * Add an inode to the inode hash for this superblock. |
| 386 | */ |
| 387 | void __insert_inode_hash(struct inode *inode, unsigned long hashval) |
| 388 | { |
Christoph Hellwig | 646ec46 | 2010-10-23 07:15:32 -0400 | [diff] [blame] | 389 | struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); |
| 390 | |
Dave Chinner | 4c51acb | 2010-10-23 06:58:09 -0400 | [diff] [blame] | 391 | spin_lock(&inode_lock); |
Christoph Hellwig | 646ec46 | 2010-10-23 07:15:32 -0400 | [diff] [blame] | 392 | hlist_add_head(&inode->i_hash, b); |
Dave Chinner | 4c51acb | 2010-10-23 06:58:09 -0400 | [diff] [blame] | 393 | spin_unlock(&inode_lock); |
| 394 | } |
| 395 | EXPORT_SYMBOL(__insert_inode_hash); |
| 396 | |
| 397 | /** |
| 398 | * __remove_inode_hash - remove an inode from the hash |
| 399 | * @inode: inode to unhash |
| 400 | * |
| 401 | * Remove an inode from the superblock. |
| 402 | */ |
| 403 | static void __remove_inode_hash(struct inode *inode) |
| 404 | { |
| 405 | hlist_del_init(&inode->i_hash); |
| 406 | } |
| 407 | |
| 408 | /** |
| 409 | * remove_inode_hash - remove an inode from the hash |
| 410 | * @inode: inode to unhash |
| 411 | * |
| 412 | * Remove an inode from the superblock. |
| 413 | */ |
| 414 | void remove_inode_hash(struct inode *inode) |
| 415 | { |
| 416 | spin_lock(&inode_lock); |
| 417 | hlist_del_init(&inode->i_hash); |
| 418 | spin_unlock(&inode_lock); |
| 419 | } |
| 420 | EXPORT_SYMBOL(remove_inode_hash); |
| 421 | |
Al Viro | b0683aa | 2010-06-04 20:55:25 -0400 | [diff] [blame] | 422 | void end_writeback(struct inode *inode) |
| 423 | { |
| 424 | might_sleep(); |
| 425 | BUG_ON(inode->i_data.nrpages); |
| 426 | BUG_ON(!list_empty(&inode->i_data.private_list)); |
| 427 | BUG_ON(!(inode->i_state & I_FREEING)); |
| 428 | BUG_ON(inode->i_state & I_CLEAR); |
| 429 | inode_sync_wait(inode); |
| 430 | inode->i_state = I_FREEING | I_CLEAR; |
| 431 | } |
| 432 | EXPORT_SYMBOL(end_writeback); |
| 433 | |
Al Viro | 644da59 | 2010-06-07 13:21:05 -0400 | [diff] [blame] | 434 | static void evict(struct inode *inode) |
Al Viro | b4272d4 | 2010-06-04 19:33:20 -0400 | [diff] [blame] | 435 | { |
| 436 | const struct super_operations *op = inode->i_sb->s_op; |
| 437 | |
Al Viro | be7ce41 | 2010-06-04 19:40:39 -0400 | [diff] [blame] | 438 | if (op->evict_inode) { |
| 439 | op->evict_inode(inode); |
Al Viro | b4272d4 | 2010-06-04 19:33:20 -0400 | [diff] [blame] | 440 | } else { |
| 441 | if (inode->i_data.nrpages) |
| 442 | truncate_inode_pages(&inode->i_data, 0); |
Al Viro | 3014083 | 2010-06-07 13:23:20 -0400 | [diff] [blame] | 443 | end_writeback(inode); |
Al Viro | b4272d4 | 2010-06-04 19:33:20 -0400 | [diff] [blame] | 444 | } |
Al Viro | 661074e | 2010-06-04 20:19:55 -0400 | [diff] [blame] | 445 | if (S_ISBLK(inode->i_mode) && inode->i_bdev) |
| 446 | bd_forget(inode); |
| 447 | if (S_ISCHR(inode->i_mode) && inode->i_cdev) |
| 448 | cd_forget(inode); |
Al Viro | b4272d4 | 2010-06-04 19:33:20 -0400 | [diff] [blame] | 449 | } |
| 450 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | /* |
| 452 | * dispose_list - dispose of the contents of a local list |
| 453 | * @head: the head of the list to free |
| 454 | * |
| 455 | * Dispose-list gets a local list with local inodes in it, so it doesn't |
| 456 | * need to worry about list corruption and SMP locks. |
| 457 | */ |
| 458 | static void dispose_list(struct list_head *head) |
| 459 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | while (!list_empty(head)) { |
| 461 | struct inode *inode; |
| 462 | |
Pavel Emelianov | b5e6181 | 2007-05-08 00:30:19 -0700 | [diff] [blame] | 463 | inode = list_first_entry(head, struct inode, i_list); |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 464 | list_del_init(&inode->i_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | |
Al Viro | 644da59 | 2010-06-07 13:21:05 -0400 | [diff] [blame] | 466 | evict(inode); |
Artem B. Bityuckiy | 4120db4 | 2005-07-12 13:58:12 -0700 | [diff] [blame] | 467 | |
| 468 | spin_lock(&inode_lock); |
Dave Chinner | 4c51acb | 2010-10-23 06:58:09 -0400 | [diff] [blame] | 469 | __remove_inode_hash(inode); |
Christoph Hellwig | 646ec46 | 2010-10-23 07:15:32 -0400 | [diff] [blame] | 470 | __inode_sb_list_del(inode); |
Artem B. Bityuckiy | 4120db4 | 2005-07-12 13:58:12 -0700 | [diff] [blame] | 471 | spin_unlock(&inode_lock); |
| 472 | |
| 473 | wake_up_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | destroy_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | } |
| 477 | |
| 478 | /* |
| 479 | * Invalidate all inodes for a device. |
| 480 | */ |
| 481 | static int invalidate_list(struct list_head *head, struct list_head *dispose) |
| 482 | { |
| 483 | struct list_head *next; |
Dave Chinner | cffbc8a | 2010-10-23 05:03:02 -0400 | [diff] [blame] | 484 | int busy = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | |
| 486 | next = head->next; |
| 487 | for (;;) { |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 488 | struct list_head *tmp = next; |
| 489 | struct inode *inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | |
| 491 | /* |
| 492 | * We can reschedule here without worrying about the list's |
| 493 | * consistency because the per-sb list of inodes must not |
Nick Piggin | 88e0fbc | 2009-09-22 16:43:50 -0700 | [diff] [blame] | 494 | * change during umount anymore, and because iprune_sem keeps |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | * shrink_icache_memory() away. |
| 496 | */ |
| 497 | cond_resched_lock(&inode_lock); |
| 498 | |
| 499 | next = next->next; |
| 500 | if (tmp == head) |
| 501 | break; |
| 502 | inode = list_entry(tmp, struct inode, i_sb_list); |
Nick Piggin | aabb8fd | 2009-03-11 13:17:36 -0700 | [diff] [blame] | 503 | if (inode->i_state & I_NEW) |
| 504 | continue; |
Christoph Hellwig | 99a3891 | 2010-10-23 19:07:20 +0200 | [diff] [blame^] | 505 | if (atomic_read(&inode->i_count)) { |
| 506 | busy = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | continue; |
| 508 | } |
Christoph Hellwig | 99a3891 | 2010-10-23 19:07:20 +0200 | [diff] [blame^] | 509 | |
| 510 | list_move(&inode->i_list, dispose); |
| 511 | inode->i_state |= I_FREEING; |
| 512 | if (!(inode->i_state & (I_DIRTY | I_SYNC))) |
| 513 | percpu_counter_dec(&nr_inodes_unused); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | return busy; |
| 516 | } |
| 517 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | /** |
| 519 | * invalidate_inodes - discard the inodes on a device |
| 520 | * @sb: superblock |
| 521 | * |
| 522 | * Discard all of the inodes for a given superblock. If the discard |
| 523 | * fails because there are busy inodes then a non zero value is returned. |
| 524 | * If the discard is successful all the inodes have been discarded. |
| 525 | */ |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 526 | int invalidate_inodes(struct super_block *sb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | { |
| 528 | int busy; |
| 529 | LIST_HEAD(throw_away); |
| 530 | |
Nick Piggin | 88e0fbc | 2009-09-22 16:43:50 -0700 | [diff] [blame] | 531 | down_write(&iprune_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | spin_lock(&inode_lock); |
Eric Paris | 164bc61 | 2009-05-21 17:01:58 -0400 | [diff] [blame] | 533 | fsnotify_unmount_inodes(&sb->s_inodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | busy = invalidate_list(&sb->s_inodes, &throw_away); |
| 535 | spin_unlock(&inode_lock); |
| 536 | |
| 537 | dispose_list(&throw_away); |
Nick Piggin | 88e0fbc | 2009-09-22 16:43:50 -0700 | [diff] [blame] | 538 | up_write(&iprune_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | |
| 540 | return busy; |
| 541 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | |
| 543 | static int can_unuse(struct inode *inode) |
| 544 | { |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 545 | if (inode->i_state & ~I_REFERENCED) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | return 0; |
| 547 | if (inode_has_buffers(inode)) |
| 548 | return 0; |
| 549 | if (atomic_read(&inode->i_count)) |
| 550 | return 0; |
| 551 | if (inode->i_data.nrpages) |
| 552 | return 0; |
| 553 | return 1; |
| 554 | } |
| 555 | |
| 556 | /* |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 557 | * Scan `goal' inodes on the unused list for freeable ones. They are moved to a |
| 558 | * temporary list and then are freed outside inode_lock by dispose_list(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | * |
| 560 | * Any inodes which are pinned purely because of attached pagecache have their |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 561 | * pagecache removed. If the inode has metadata buffers attached to |
| 562 | * mapping->private_list then try to remove them. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | * |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 564 | * If the inode has the I_REFERENCED flag set, then it means that it has been |
| 565 | * used recently - the flag is set in iput_final(). When we encounter such an |
| 566 | * inode, clear the flag and move it to the back of the LRU so it gets another |
| 567 | * pass through the LRU before it gets reclaimed. This is necessary because of |
| 568 | * the fact we are doing lazy LRU updates to minimise lock contention so the |
| 569 | * LRU does not have strict ordering. Hence we don't want to reclaim inodes |
| 570 | * with this flag set because they are the inodes that are out of order. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | */ |
| 572 | static void prune_icache(int nr_to_scan) |
| 573 | { |
| 574 | LIST_HEAD(freeable); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | int nr_scanned; |
| 576 | unsigned long reap = 0; |
| 577 | |
Nick Piggin | 88e0fbc | 2009-09-22 16:43:50 -0700 | [diff] [blame] | 578 | down_read(&iprune_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | spin_lock(&inode_lock); |
| 580 | for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { |
| 581 | struct inode *inode; |
| 582 | |
| 583 | if (list_empty(&inode_unused)) |
| 584 | break; |
| 585 | |
| 586 | inode = list_entry(inode_unused.prev, struct inode, i_list); |
| 587 | |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 588 | /* |
| 589 | * Referenced or dirty inodes are still in use. Give them |
| 590 | * another pass through the LRU as we canot reclaim them now. |
| 591 | */ |
| 592 | if (atomic_read(&inode->i_count) || |
| 593 | (inode->i_state & ~I_REFERENCED)) { |
| 594 | list_del_init(&inode->i_list); |
| 595 | percpu_counter_dec(&nr_inodes_unused); |
| 596 | continue; |
| 597 | } |
| 598 | |
| 599 | /* recently referenced inodes get one more pass */ |
| 600 | if (inode->i_state & I_REFERENCED) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | list_move(&inode->i_list, &inode_unused); |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 602 | inode->i_state &= ~I_REFERENCED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | continue; |
| 604 | } |
| 605 | if (inode_has_buffers(inode) || inode->i_data.nrpages) { |
| 606 | __iget(inode); |
| 607 | spin_unlock(&inode_lock); |
| 608 | if (remove_inode_buffers(inode)) |
Andrew Morton | fc0ecff | 2007-02-10 01:45:39 -0800 | [diff] [blame] | 609 | reap += invalidate_mapping_pages(&inode->i_data, |
| 610 | 0, -1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | iput(inode); |
| 612 | spin_lock(&inode_lock); |
| 613 | |
| 614 | if (inode != list_entry(inode_unused.next, |
| 615 | struct inode, i_list)) |
| 616 | continue; /* wrong inode or list_empty */ |
| 617 | if (!can_unuse(inode)) |
| 618 | continue; |
| 619 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | list_move(&inode->i_list, &freeable); |
Nick Piggin | 7ef0d73 | 2009-03-12 14:31:38 -0700 | [diff] [blame] | 621 | WARN_ON(inode->i_state & I_NEW); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | inode->i_state |= I_FREEING; |
Dave Chinner | cffbc8a | 2010-10-23 05:03:02 -0400 | [diff] [blame] | 623 | percpu_counter_dec(&nr_inodes_unused); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | } |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 625 | if (current_is_kswapd()) |
| 626 | __count_vm_events(KSWAPD_INODESTEAL, reap); |
| 627 | else |
| 628 | __count_vm_events(PGINODESTEAL, reap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | spin_unlock(&inode_lock); |
| 630 | |
| 631 | dispose_list(&freeable); |
Nick Piggin | 88e0fbc | 2009-09-22 16:43:50 -0700 | [diff] [blame] | 632 | up_read(&iprune_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | } |
| 634 | |
| 635 | /* |
| 636 | * shrink_icache_memory() will attempt to reclaim some unused inodes. Here, |
| 637 | * "unused" means that no dentries are referring to the inodes: the files are |
| 638 | * not open and the dcache references to those inodes have already been |
| 639 | * reclaimed. |
| 640 | * |
| 641 | * This function is passed the number of inodes to scan, and it returns the |
| 642 | * total number of remaining possibly-reclaimable inodes. |
| 643 | */ |
Dave Chinner | 7f8275d | 2010-07-19 14:56:17 +1000 | [diff] [blame] | 644 | static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | { |
| 646 | if (nr) { |
| 647 | /* |
| 648 | * Nasty deadlock avoidance. We may hold various FS locks, |
| 649 | * and we don't want to recurse into the FS that called us |
| 650 | * in clear_inode() and friends.. |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 651 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | if (!(gfp_mask & __GFP_FS)) |
| 653 | return -1; |
| 654 | prune_icache(nr); |
| 655 | } |
Dave Chinner | cffbc8a | 2010-10-23 05:03:02 -0400 | [diff] [blame] | 656 | return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | } |
| 658 | |
Rusty Russell | 8e1f936 | 2007-07-17 04:03:17 -0700 | [diff] [blame] | 659 | static struct shrinker icache_shrinker = { |
| 660 | .shrink = shrink_icache_memory, |
| 661 | .seeks = DEFAULT_SEEKS, |
| 662 | }; |
| 663 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | static void __wait_on_freeing_inode(struct inode *inode); |
| 665 | /* |
| 666 | * Called with the inode lock held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | */ |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 668 | static struct inode *find_inode(struct super_block *sb, |
| 669 | struct hlist_head *head, |
| 670 | int (*test)(struct inode *, void *), |
| 671 | void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | { |
| 673 | struct hlist_node *node; |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 674 | struct inode *inode = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | |
| 676 | repeat: |
Matthias Kaehlcke | c5c8be3 | 2008-04-29 00:59:40 -0700 | [diff] [blame] | 677 | hlist_for_each_entry(inode, node, head, i_hash) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | if (inode->i_sb != sb) |
| 679 | continue; |
| 680 | if (!test(inode, data)) |
| 681 | continue; |
Al Viro | a4ffdde | 2010-06-02 17:38:30 -0400 | [diff] [blame] | 682 | if (inode->i_state & (I_FREEING|I_WILL_FREE)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | __wait_on_freeing_inode(inode); |
| 684 | goto repeat; |
| 685 | } |
Christoph Hellwig | f7899bd | 2010-10-23 07:09:06 -0400 | [diff] [blame] | 686 | __iget(inode); |
| 687 | return inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | } |
Christoph Hellwig | f7899bd | 2010-10-23 07:09:06 -0400 | [diff] [blame] | 689 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | } |
| 691 | |
| 692 | /* |
| 693 | * find_inode_fast is the fast path version of find_inode, see the comment at |
| 694 | * iget_locked for details. |
| 695 | */ |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 696 | static struct inode *find_inode_fast(struct super_block *sb, |
| 697 | struct hlist_head *head, unsigned long ino) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | { |
| 699 | struct hlist_node *node; |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 700 | struct inode *inode = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | |
| 702 | repeat: |
Matthias Kaehlcke | c5c8be3 | 2008-04-29 00:59:40 -0700 | [diff] [blame] | 703 | hlist_for_each_entry(inode, node, head, i_hash) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | if (inode->i_ino != ino) |
| 705 | continue; |
| 706 | if (inode->i_sb != sb) |
| 707 | continue; |
Al Viro | a4ffdde | 2010-06-02 17:38:30 -0400 | [diff] [blame] | 708 | if (inode->i_state & (I_FREEING|I_WILL_FREE)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | __wait_on_freeing_inode(inode); |
| 710 | goto repeat; |
| 711 | } |
Christoph Hellwig | f7899bd | 2010-10-23 07:09:06 -0400 | [diff] [blame] | 712 | __iget(inode); |
| 713 | return inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | } |
Christoph Hellwig | f7899bd | 2010-10-23 07:09:06 -0400 | [diff] [blame] | 715 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | } |
| 717 | |
Eric Dumazet | f991bd2 | 2010-10-23 11:18:01 -0400 | [diff] [blame] | 718 | /* |
| 719 | * Each cpu owns a range of LAST_INO_BATCH numbers. |
| 720 | * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, |
| 721 | * to renew the exhausted range. |
| 722 | * |
| 723 | * This does not significantly increase overflow rate because every CPU can |
| 724 | * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is |
| 725 | * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the |
| 726 | * 2^32 range, and is a worst-case. Even a 50% wastage would only increase |
| 727 | * overflow rate by 2x, which does not seem too significant. |
| 728 | * |
| 729 | * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW |
| 730 | * error if st_ino won't fit in target struct field. Use 32bit counter |
| 731 | * here to attempt to avoid that. |
| 732 | */ |
| 733 | #define LAST_INO_BATCH 1024 |
| 734 | static DEFINE_PER_CPU(unsigned int, last_ino); |
| 735 | |
Christoph Hellwig | 85fe402 | 2010-10-23 11:19:54 -0400 | [diff] [blame] | 736 | unsigned int get_next_ino(void) |
Eric Dumazet | f991bd2 | 2010-10-23 11:18:01 -0400 | [diff] [blame] | 737 | { |
| 738 | unsigned int *p = &get_cpu_var(last_ino); |
| 739 | unsigned int res = *p; |
| 740 | |
| 741 | #ifdef CONFIG_SMP |
| 742 | if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { |
| 743 | static atomic_t shared_last_ino; |
| 744 | int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); |
| 745 | |
| 746 | res = next - LAST_INO_BATCH; |
| 747 | } |
| 748 | #endif |
| 749 | |
| 750 | *p = ++res; |
| 751 | put_cpu_var(last_ino); |
| 752 | return res; |
| 753 | } |
Christoph Hellwig | 85fe402 | 2010-10-23 11:19:54 -0400 | [diff] [blame] | 754 | EXPORT_SYMBOL(get_next_ino); |
Eric Dumazet | f991bd2 | 2010-10-23 11:18:01 -0400 | [diff] [blame] | 755 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | /** |
| 757 | * new_inode - obtain an inode |
| 758 | * @sb: superblock |
| 759 | * |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 760 | * Allocates a new inode for given superblock. The default gfp_mask |
Hugh Dickins | 3c1d437 | 2009-01-06 14:39:23 -0800 | [diff] [blame] | 761 | * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 762 | * If HIGHMEM pages are unsuitable or it is known that pages allocated |
| 763 | * for the page cache are not reclaimable or migratable, |
| 764 | * mapping_set_gfp_mask() must be called with suitable flags on the |
| 765 | * newly created inode's mapping |
| 766 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | */ |
| 768 | struct inode *new_inode(struct super_block *sb) |
| 769 | { |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 770 | struct inode *inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | |
| 772 | spin_lock_prefetch(&inode_lock); |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 773 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | inode = alloc_inode(sb); |
| 775 | if (inode) { |
| 776 | spin_lock(&inode_lock); |
Christoph Hellwig | 646ec46 | 2010-10-23 07:15:32 -0400 | [diff] [blame] | 777 | __inode_sb_list_add(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | inode->i_state = 0; |
| 779 | spin_unlock(&inode_lock); |
| 780 | } |
| 781 | return inode; |
| 782 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | EXPORT_SYMBOL(new_inode); |
| 784 | |
| 785 | void unlock_new_inode(struct inode *inode) |
| 786 | { |
Peter Zijlstra | 14358e6 | 2007-10-14 01:38:33 +0200 | [diff] [blame] | 787 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
Namhyung Kim | a3314a0 | 2010-10-11 22:38:00 +0900 | [diff] [blame] | 788 | if (S_ISDIR(inode->i_mode)) { |
Peter Zijlstra | 1e89a5e | 2007-10-16 06:47:54 +0200 | [diff] [blame] | 789 | struct file_system_type *type = inode->i_sb->s_type; |
| 790 | |
Jan Kara | 9a7aa12 | 2009-06-04 15:26:49 +0200 | [diff] [blame] | 791 | /* Set new key only if filesystem hasn't already changed it */ |
| 792 | if (!lockdep_match_class(&inode->i_mutex, |
| 793 | &type->i_mutex_key)) { |
| 794 | /* |
| 795 | * ensure nobody is actually holding i_mutex |
| 796 | */ |
| 797 | mutex_destroy(&inode->i_mutex); |
| 798 | mutex_init(&inode->i_mutex); |
| 799 | lockdep_set_class(&inode->i_mutex, |
| 800 | &type->i_mutex_dir_key); |
| 801 | } |
Peter Zijlstra | 1e89a5e | 2007-10-16 06:47:54 +0200 | [diff] [blame] | 802 | } |
Peter Zijlstra | 14358e6 | 2007-10-14 01:38:33 +0200 | [diff] [blame] | 803 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 804 | /* |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 805 | * This is special! We do not need the spinlock when clearing I_NEW, |
Jan Kara | 580be08 | 2009-09-21 17:01:06 -0700 | [diff] [blame] | 806 | * because we're guaranteed that nobody else tries to do anything about |
| 807 | * the state of the inode when it is locked, as we just created it (so |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 808 | * there can be no old holders that haven't tested I_NEW). |
Jan Kara | 580be08 | 2009-09-21 17:01:06 -0700 | [diff] [blame] | 809 | * However we must emit the memory barrier so that other CPUs reliably |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 810 | * see the clearing of I_NEW after the other inode initialisation has |
Jan Kara | 580be08 | 2009-09-21 17:01:06 -0700 | [diff] [blame] | 811 | * completed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | */ |
Jan Kara | 580be08 | 2009-09-21 17:01:06 -0700 | [diff] [blame] | 813 | smp_mb(); |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 814 | WARN_ON(!(inode->i_state & I_NEW)); |
| 815 | inode->i_state &= ~I_NEW; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | wake_up_inode(inode); |
| 817 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | EXPORT_SYMBOL(unlock_new_inode); |
| 819 | |
| 820 | /* |
| 821 | * This is called without the inode lock held.. Be careful. |
| 822 | * |
| 823 | * We no longer cache the sb_flags in i_flags - see fs.h |
| 824 | * -- rmk@arm.uk.linux.org |
| 825 | */ |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 826 | static struct inode *get_new_inode(struct super_block *sb, |
| 827 | struct hlist_head *head, |
| 828 | int (*test)(struct inode *, void *), |
| 829 | int (*set)(struct inode *, void *), |
| 830 | void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | { |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 832 | struct inode *inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | |
| 834 | inode = alloc_inode(sb); |
| 835 | if (inode) { |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 836 | struct inode *old; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | |
| 838 | spin_lock(&inode_lock); |
| 839 | /* We released the lock, so.. */ |
| 840 | old = find_inode(sb, head, test, data); |
| 841 | if (!old) { |
| 842 | if (set(inode, data)) |
| 843 | goto set_failed; |
| 844 | |
Christoph Hellwig | 646ec46 | 2010-10-23 07:15:32 -0400 | [diff] [blame] | 845 | hlist_add_head(&inode->i_hash, head); |
| 846 | __inode_sb_list_add(inode); |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 847 | inode->i_state = I_NEW; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | spin_unlock(&inode_lock); |
| 849 | |
| 850 | /* Return the locked inode with I_NEW set, the |
| 851 | * caller is responsible for filling in the contents |
| 852 | */ |
| 853 | return inode; |
| 854 | } |
| 855 | |
| 856 | /* |
| 857 | * Uhhuh, somebody else created the same inode under |
| 858 | * us. Use the old inode instead of the one we just |
| 859 | * allocated. |
| 860 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | spin_unlock(&inode_lock); |
| 862 | destroy_inode(inode); |
| 863 | inode = old; |
| 864 | wait_on_inode(inode); |
| 865 | } |
| 866 | return inode; |
| 867 | |
| 868 | set_failed: |
| 869 | spin_unlock(&inode_lock); |
| 870 | destroy_inode(inode); |
| 871 | return NULL; |
| 872 | } |
| 873 | |
| 874 | /* |
| 875 | * get_new_inode_fast is the fast path version of get_new_inode, see the |
| 876 | * comment at iget_locked for details. |
| 877 | */ |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 878 | static struct inode *get_new_inode_fast(struct super_block *sb, |
| 879 | struct hlist_head *head, unsigned long ino) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | { |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 881 | struct inode *inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | |
| 883 | inode = alloc_inode(sb); |
| 884 | if (inode) { |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 885 | struct inode *old; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | |
| 887 | spin_lock(&inode_lock); |
| 888 | /* We released the lock, so.. */ |
| 889 | old = find_inode_fast(sb, head, ino); |
| 890 | if (!old) { |
| 891 | inode->i_ino = ino; |
Christoph Hellwig | 646ec46 | 2010-10-23 07:15:32 -0400 | [diff] [blame] | 892 | hlist_add_head(&inode->i_hash, head); |
| 893 | __inode_sb_list_add(inode); |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 894 | inode->i_state = I_NEW; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | spin_unlock(&inode_lock); |
| 896 | |
| 897 | /* Return the locked inode with I_NEW set, the |
| 898 | * caller is responsible for filling in the contents |
| 899 | */ |
| 900 | return inode; |
| 901 | } |
| 902 | |
| 903 | /* |
| 904 | * Uhhuh, somebody else created the same inode under |
| 905 | * us. Use the old inode instead of the one we just |
| 906 | * allocated. |
| 907 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | spin_unlock(&inode_lock); |
| 909 | destroy_inode(inode); |
| 910 | inode = old; |
| 911 | wait_on_inode(inode); |
| 912 | } |
| 913 | return inode; |
| 914 | } |
| 915 | |
Christoph Hellwig | ad5e195 | 2010-10-23 07:00:16 -0400 | [diff] [blame] | 916 | /* |
| 917 | * search the inode cache for a matching inode number. |
| 918 | * If we find one, then the inode number we are trying to |
| 919 | * allocate is not unique and so we should not use it. |
| 920 | * |
| 921 | * Returns 1 if the inode number is unique, 0 if it is not. |
| 922 | */ |
| 923 | static int test_inode_iunique(struct super_block *sb, unsigned long ino) |
| 924 | { |
| 925 | struct hlist_head *b = inode_hashtable + hash(sb, ino); |
| 926 | struct hlist_node *node; |
| 927 | struct inode *inode; |
| 928 | |
| 929 | hlist_for_each_entry(inode, node, b, i_hash) { |
| 930 | if (inode->i_ino == ino && inode->i_sb == sb) |
| 931 | return 0; |
| 932 | } |
| 933 | |
| 934 | return 1; |
| 935 | } |
| 936 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 937 | /** |
| 938 | * iunique - get a unique inode number |
| 939 | * @sb: superblock |
| 940 | * @max_reserved: highest reserved inode number |
| 941 | * |
| 942 | * Obtain an inode number that is unique on the system for a given |
| 943 | * superblock. This is used by file systems that have no natural |
| 944 | * permanent inode numbering system. An inode number is returned that |
| 945 | * is higher than the reserved limit but unique. |
| 946 | * |
| 947 | * BUGS: |
| 948 | * With a large number of inodes live on the file system this function |
| 949 | * currently becomes quite slow. |
| 950 | */ |
| 951 | ino_t iunique(struct super_block *sb, ino_t max_reserved) |
| 952 | { |
Jeff Layton | 866b04f | 2007-05-08 00:32:29 -0700 | [diff] [blame] | 953 | /* |
| 954 | * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW |
| 955 | * error if st_ino won't fit in target struct field. Use 32bit counter |
| 956 | * here to attempt to avoid that. |
| 957 | */ |
Christoph Hellwig | ad5e195 | 2010-10-23 07:00:16 -0400 | [diff] [blame] | 958 | static DEFINE_SPINLOCK(iunique_lock); |
Jeff Layton | 866b04f | 2007-05-08 00:32:29 -0700 | [diff] [blame] | 959 | static unsigned int counter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | ino_t res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | |
Jeffrey Layton | 3361c7b | 2007-05-08 00:29:48 -0700 | [diff] [blame] | 962 | spin_lock(&inode_lock); |
Christoph Hellwig | ad5e195 | 2010-10-23 07:00:16 -0400 | [diff] [blame] | 963 | spin_lock(&iunique_lock); |
Jeffrey Layton | 3361c7b | 2007-05-08 00:29:48 -0700 | [diff] [blame] | 964 | do { |
| 965 | if (counter <= max_reserved) |
| 966 | counter = max_reserved + 1; |
| 967 | res = counter++; |
Christoph Hellwig | ad5e195 | 2010-10-23 07:00:16 -0400 | [diff] [blame] | 968 | } while (!test_inode_iunique(sb, res)); |
| 969 | spin_unlock(&iunique_lock); |
Jeffrey Layton | 3361c7b | 2007-05-08 00:29:48 -0700 | [diff] [blame] | 970 | spin_unlock(&inode_lock); |
| 971 | |
| 972 | return res; |
| 973 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 | EXPORT_SYMBOL(iunique); |
| 975 | |
| 976 | struct inode *igrab(struct inode *inode) |
| 977 | { |
| 978 | spin_lock(&inode_lock); |
Al Viro | a4ffdde | 2010-06-02 17:38:30 -0400 | [diff] [blame] | 979 | if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 | __iget(inode); |
| 981 | else |
| 982 | /* |
| 983 | * Handle the case where s_op->clear_inode is not been |
| 984 | * called yet, and somebody is calling igrab |
| 985 | * while the inode is getting freed. |
| 986 | */ |
| 987 | inode = NULL; |
| 988 | spin_unlock(&inode_lock); |
| 989 | return inode; |
| 990 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 991 | EXPORT_SYMBOL(igrab); |
| 992 | |
| 993 | /** |
| 994 | * ifind - internal function, you want ilookup5() or iget5(). |
| 995 | * @sb: super block of file system to search |
| 996 | * @head: the head of the list to search |
| 997 | * @test: callback used for comparisons between inodes |
| 998 | * @data: opaque data pointer to pass to @test |
Anton Altaparmakov | 88bd512 | 2005-07-13 01:10:44 -0700 | [diff] [blame] | 999 | * @wait: if true wait for the inode to be unlocked, if false do not |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1000 | * |
| 1001 | * ifind() searches for the inode specified by @data in the inode |
| 1002 | * cache. This is a generalized version of ifind_fast() for file systems where |
| 1003 | * the inode number is not sufficient for unique identification of an inode. |
| 1004 | * |
| 1005 | * If the inode is in the cache, the inode is returned with an incremented |
| 1006 | * reference count. |
| 1007 | * |
| 1008 | * Otherwise NULL is returned. |
| 1009 | * |
| 1010 | * Note, @test is called with the inode_lock held, so can't sleep. |
| 1011 | */ |
Matt Mackall | 5d2bea4 | 2006-01-08 01:05:21 -0800 | [diff] [blame] | 1012 | static struct inode *ifind(struct super_block *sb, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | struct hlist_head *head, int (*test)(struct inode *, void *), |
Anton Altaparmakov | 88bd512 | 2005-07-13 01:10:44 -0700 | [diff] [blame] | 1014 | void *data, const int wait) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | { |
| 1016 | struct inode *inode; |
| 1017 | |
| 1018 | spin_lock(&inode_lock); |
| 1019 | inode = find_inode(sb, head, test, data); |
| 1020 | if (inode) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | spin_unlock(&inode_lock); |
Anton Altaparmakov | 88bd512 | 2005-07-13 01:10:44 -0700 | [diff] [blame] | 1022 | if (likely(wait)) |
| 1023 | wait_on_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1024 | return inode; |
| 1025 | } |
| 1026 | spin_unlock(&inode_lock); |
| 1027 | return NULL; |
| 1028 | } |
| 1029 | |
| 1030 | /** |
| 1031 | * ifind_fast - internal function, you want ilookup() or iget(). |
| 1032 | * @sb: super block of file system to search |
| 1033 | * @head: head of the list to search |
| 1034 | * @ino: inode number to search for |
| 1035 | * |
| 1036 | * ifind_fast() searches for the inode @ino in the inode cache. This is for |
| 1037 | * file systems where the inode number is sufficient for unique identification |
| 1038 | * of an inode. |
| 1039 | * |
| 1040 | * If the inode is in the cache, the inode is returned with an incremented |
| 1041 | * reference count. |
| 1042 | * |
| 1043 | * Otherwise NULL is returned. |
| 1044 | */ |
Matt Mackall | 5d2bea4 | 2006-01-08 01:05:21 -0800 | [diff] [blame] | 1045 | static struct inode *ifind_fast(struct super_block *sb, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | struct hlist_head *head, unsigned long ino) |
| 1047 | { |
| 1048 | struct inode *inode; |
| 1049 | |
| 1050 | spin_lock(&inode_lock); |
| 1051 | inode = find_inode_fast(sb, head, ino); |
| 1052 | if (inode) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1053 | spin_unlock(&inode_lock); |
| 1054 | wait_on_inode(inode); |
| 1055 | return inode; |
| 1056 | } |
| 1057 | spin_unlock(&inode_lock); |
| 1058 | return NULL; |
| 1059 | } |
| 1060 | |
| 1061 | /** |
Anton Altaparmakov | 88bd512 | 2005-07-13 01:10:44 -0700 | [diff] [blame] | 1062 | * ilookup5_nowait - search for an inode in the inode cache |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 | * @sb: super block of file system to search |
| 1064 | * @hashval: hash value (usually inode number) to search for |
| 1065 | * @test: callback used for comparisons between inodes |
| 1066 | * @data: opaque data pointer to pass to @test |
| 1067 | * |
| 1068 | * ilookup5() uses ifind() to search for the inode specified by @hashval and |
| 1069 | * @data in the inode cache. This is a generalized version of ilookup() for |
| 1070 | * file systems where the inode number is not sufficient for unique |
| 1071 | * identification of an inode. |
| 1072 | * |
| 1073 | * If the inode is in the cache, the inode is returned with an incremented |
Anton Altaparmakov | 88bd512 | 2005-07-13 01:10:44 -0700 | [diff] [blame] | 1074 | * reference count. Note, the inode lock is not waited upon so you have to be |
| 1075 | * very careful what you do with the returned inode. You probably should be |
| 1076 | * using ilookup5() instead. |
| 1077 | * |
| 1078 | * Otherwise NULL is returned. |
| 1079 | * |
| 1080 | * Note, @test is called with the inode_lock held, so can't sleep. |
| 1081 | */ |
| 1082 | struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, |
| 1083 | int (*test)(struct inode *, void *), void *data) |
| 1084 | { |
| 1085 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); |
| 1086 | |
| 1087 | return ifind(sb, head, test, data, 0); |
| 1088 | } |
Anton Altaparmakov | 88bd512 | 2005-07-13 01:10:44 -0700 | [diff] [blame] | 1089 | EXPORT_SYMBOL(ilookup5_nowait); |
| 1090 | |
| 1091 | /** |
| 1092 | * ilookup5 - search for an inode in the inode cache |
| 1093 | * @sb: super block of file system to search |
| 1094 | * @hashval: hash value (usually inode number) to search for |
| 1095 | * @test: callback used for comparisons between inodes |
| 1096 | * @data: opaque data pointer to pass to @test |
| 1097 | * |
| 1098 | * ilookup5() uses ifind() to search for the inode specified by @hashval and |
| 1099 | * @data in the inode cache. This is a generalized version of ilookup() for |
| 1100 | * file systems where the inode number is not sufficient for unique |
| 1101 | * identification of an inode. |
| 1102 | * |
| 1103 | * If the inode is in the cache, the inode lock is waited upon and the inode is |
| 1104 | * returned with an incremented reference count. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1105 | * |
| 1106 | * Otherwise NULL is returned. |
| 1107 | * |
| 1108 | * Note, @test is called with the inode_lock held, so can't sleep. |
| 1109 | */ |
| 1110 | struct inode *ilookup5(struct super_block *sb, unsigned long hashval, |
| 1111 | int (*test)(struct inode *, void *), void *data) |
| 1112 | { |
| 1113 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); |
| 1114 | |
Anton Altaparmakov | 88bd512 | 2005-07-13 01:10:44 -0700 | [diff] [blame] | 1115 | return ifind(sb, head, test, data, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | EXPORT_SYMBOL(ilookup5); |
| 1118 | |
| 1119 | /** |
| 1120 | * ilookup - search for an inode in the inode cache |
| 1121 | * @sb: super block of file system to search |
| 1122 | * @ino: inode number to search for |
| 1123 | * |
| 1124 | * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache. |
| 1125 | * This is for file systems where the inode number is sufficient for unique |
| 1126 | * identification of an inode. |
| 1127 | * |
| 1128 | * If the inode is in the cache, the inode is returned with an incremented |
| 1129 | * reference count. |
| 1130 | * |
| 1131 | * Otherwise NULL is returned. |
| 1132 | */ |
| 1133 | struct inode *ilookup(struct super_block *sb, unsigned long ino) |
| 1134 | { |
| 1135 | struct hlist_head *head = inode_hashtable + hash(sb, ino); |
| 1136 | |
| 1137 | return ifind_fast(sb, head, ino); |
| 1138 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | EXPORT_SYMBOL(ilookup); |
| 1140 | |
| 1141 | /** |
| 1142 | * iget5_locked - obtain an inode from a mounted file system |
| 1143 | * @sb: super block of file system |
| 1144 | * @hashval: hash value (usually inode number) to get |
| 1145 | * @test: callback used for comparisons between inodes |
| 1146 | * @set: callback used to initialize a new struct inode |
| 1147 | * @data: opaque data pointer to pass to @test and @set |
| 1148 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | * iget5_locked() uses ifind() to search for the inode specified by @hashval |
| 1150 | * and @data in the inode cache and if present it is returned with an increased |
| 1151 | * reference count. This is a generalized version of iget_locked() for file |
| 1152 | * systems where the inode number is not sufficient for unique identification |
| 1153 | * of an inode. |
| 1154 | * |
| 1155 | * If the inode is not in cache, get_new_inode() is called to allocate a new |
| 1156 | * inode and this is returned locked, hashed, and with the I_NEW flag set. The |
| 1157 | * file system gets to fill it in before unlocking it via unlock_new_inode(). |
| 1158 | * |
| 1159 | * Note both @test and @set are called with the inode_lock held, so can't sleep. |
| 1160 | */ |
| 1161 | struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, |
| 1162 | int (*test)(struct inode *, void *), |
| 1163 | int (*set)(struct inode *, void *), void *data) |
| 1164 | { |
| 1165 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); |
| 1166 | struct inode *inode; |
| 1167 | |
Anton Altaparmakov | 88bd512 | 2005-07-13 01:10:44 -0700 | [diff] [blame] | 1168 | inode = ifind(sb, head, test, data, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | if (inode) |
| 1170 | return inode; |
| 1171 | /* |
| 1172 | * get_new_inode() will do the right thing, re-trying the search |
| 1173 | * in case it had to block at any point. |
| 1174 | */ |
| 1175 | return get_new_inode(sb, head, test, set, data); |
| 1176 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1177 | EXPORT_SYMBOL(iget5_locked); |
| 1178 | |
| 1179 | /** |
| 1180 | * iget_locked - obtain an inode from a mounted file system |
| 1181 | * @sb: super block of file system |
| 1182 | * @ino: inode number to get |
| 1183 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 | * iget_locked() uses ifind_fast() to search for the inode specified by @ino in |
| 1185 | * the inode cache and if present it is returned with an increased reference |
| 1186 | * count. This is for file systems where the inode number is sufficient for |
| 1187 | * unique identification of an inode. |
| 1188 | * |
| 1189 | * If the inode is not in cache, get_new_inode_fast() is called to allocate a |
| 1190 | * new inode and this is returned locked, hashed, and with the I_NEW flag set. |
| 1191 | * The file system gets to fill it in before unlocking it via |
| 1192 | * unlock_new_inode(). |
| 1193 | */ |
| 1194 | struct inode *iget_locked(struct super_block *sb, unsigned long ino) |
| 1195 | { |
| 1196 | struct hlist_head *head = inode_hashtable + hash(sb, ino); |
| 1197 | struct inode *inode; |
| 1198 | |
| 1199 | inode = ifind_fast(sb, head, ino); |
| 1200 | if (inode) |
| 1201 | return inode; |
| 1202 | /* |
| 1203 | * get_new_inode_fast() will do the right thing, re-trying the search |
| 1204 | * in case it had to block at any point. |
| 1205 | */ |
| 1206 | return get_new_inode_fast(sb, head, ino); |
| 1207 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 | EXPORT_SYMBOL(iget_locked); |
| 1209 | |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1210 | int insert_inode_locked(struct inode *inode) |
| 1211 | { |
| 1212 | struct super_block *sb = inode->i_sb; |
| 1213 | ino_t ino = inode->i_ino; |
| 1214 | struct hlist_head *head = inode_hashtable + hash(sb, ino); |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1215 | |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 1216 | inode->i_state |= I_NEW; |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1217 | while (1) { |
Al Viro | 72a43d6 | 2009-05-13 19:13:40 +0100 | [diff] [blame] | 1218 | struct hlist_node *node; |
| 1219 | struct inode *old = NULL; |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1220 | spin_lock(&inode_lock); |
Al Viro | 72a43d6 | 2009-05-13 19:13:40 +0100 | [diff] [blame] | 1221 | hlist_for_each_entry(old, node, head, i_hash) { |
| 1222 | if (old->i_ino != ino) |
| 1223 | continue; |
| 1224 | if (old->i_sb != sb) |
| 1225 | continue; |
Al Viro | a4ffdde | 2010-06-02 17:38:30 -0400 | [diff] [blame] | 1226 | if (old->i_state & (I_FREEING|I_WILL_FREE)) |
Al Viro | 72a43d6 | 2009-05-13 19:13:40 +0100 | [diff] [blame] | 1227 | continue; |
| 1228 | break; |
| 1229 | } |
| 1230 | if (likely(!node)) { |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1231 | hlist_add_head(&inode->i_hash, head); |
| 1232 | spin_unlock(&inode_lock); |
| 1233 | return 0; |
| 1234 | } |
| 1235 | __iget(old); |
| 1236 | spin_unlock(&inode_lock); |
| 1237 | wait_on_inode(old); |
Al Viro | 1d3382c | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 1238 | if (unlikely(!inode_unhashed(old))) { |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1239 | iput(old); |
| 1240 | return -EBUSY; |
| 1241 | } |
| 1242 | iput(old); |
| 1243 | } |
| 1244 | } |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1245 | EXPORT_SYMBOL(insert_inode_locked); |
| 1246 | |
| 1247 | int insert_inode_locked4(struct inode *inode, unsigned long hashval, |
| 1248 | int (*test)(struct inode *, void *), void *data) |
| 1249 | { |
| 1250 | struct super_block *sb = inode->i_sb; |
| 1251 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1252 | |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 1253 | inode->i_state |= I_NEW; |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1254 | |
| 1255 | while (1) { |
Al Viro | 72a43d6 | 2009-05-13 19:13:40 +0100 | [diff] [blame] | 1256 | struct hlist_node *node; |
| 1257 | struct inode *old = NULL; |
| 1258 | |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1259 | spin_lock(&inode_lock); |
Al Viro | 72a43d6 | 2009-05-13 19:13:40 +0100 | [diff] [blame] | 1260 | hlist_for_each_entry(old, node, head, i_hash) { |
| 1261 | if (old->i_sb != sb) |
| 1262 | continue; |
| 1263 | if (!test(old, data)) |
| 1264 | continue; |
Al Viro | a4ffdde | 2010-06-02 17:38:30 -0400 | [diff] [blame] | 1265 | if (old->i_state & (I_FREEING|I_WILL_FREE)) |
Al Viro | 72a43d6 | 2009-05-13 19:13:40 +0100 | [diff] [blame] | 1266 | continue; |
| 1267 | break; |
| 1268 | } |
| 1269 | if (likely(!node)) { |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1270 | hlist_add_head(&inode->i_hash, head); |
| 1271 | spin_unlock(&inode_lock); |
| 1272 | return 0; |
| 1273 | } |
| 1274 | __iget(old); |
| 1275 | spin_unlock(&inode_lock); |
| 1276 | wait_on_inode(old); |
Al Viro | 1d3382c | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 1277 | if (unlikely(!inode_unhashed(old))) { |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1278 | iput(old); |
| 1279 | return -EBUSY; |
| 1280 | } |
| 1281 | iput(old); |
| 1282 | } |
| 1283 | } |
Al Viro | 261bca8 | 2008-12-30 01:48:21 -0500 | [diff] [blame] | 1284 | EXPORT_SYMBOL(insert_inode_locked4); |
| 1285 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | |
Al Viro | 45321ac | 2010-06-07 13:43:19 -0400 | [diff] [blame] | 1287 | int generic_delete_inode(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 | { |
Al Viro | 45321ac | 2010-06-07 13:43:19 -0400 | [diff] [blame] | 1289 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1290 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1291 | EXPORT_SYMBOL(generic_delete_inode); |
| 1292 | |
Al Viro | 45321ac | 2010-06-07 13:43:19 -0400 | [diff] [blame] | 1293 | /* |
| 1294 | * Normal UNIX filesystem behaviour: delete the |
| 1295 | * inode when the usage count drops to zero, and |
| 1296 | * i_nlink is zero. |
Jan Kara | 22fe4042 | 2009-09-18 13:05:44 -0700 | [diff] [blame] | 1297 | */ |
Al Viro | 45321ac | 2010-06-07 13:43:19 -0400 | [diff] [blame] | 1298 | int generic_drop_inode(struct inode *inode) |
| 1299 | { |
Al Viro | 1d3382c | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 1300 | return !inode->i_nlink || inode_unhashed(inode); |
Al Viro | 45321ac | 2010-06-07 13:43:19 -0400 | [diff] [blame] | 1301 | } |
| 1302 | EXPORT_SYMBOL_GPL(generic_drop_inode); |
| 1303 | |
| 1304 | /* |
| 1305 | * Called when we're dropping the last reference |
| 1306 | * to an inode. |
| 1307 | * |
| 1308 | * Call the FS "drop_inode()" function, defaulting to |
| 1309 | * the legacy UNIX filesystem behaviour. If it tells |
| 1310 | * us to evict inode, do so. Otherwise, retain inode |
| 1311 | * in cache if fs is alive, sync and evict if fs is |
| 1312 | * shutting down. |
| 1313 | */ |
| 1314 | static void iput_final(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1315 | { |
| 1316 | struct super_block *sb = inode->i_sb; |
Al Viro | 45321ac | 2010-06-07 13:43:19 -0400 | [diff] [blame] | 1317 | const struct super_operations *op = inode->i_sb->s_op; |
| 1318 | int drop; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | |
Al Viro | 45321ac | 2010-06-07 13:43:19 -0400 | [diff] [blame] | 1320 | if (op && op->drop_inode) |
| 1321 | drop = op->drop_inode(inode); |
| 1322 | else |
| 1323 | drop = generic_drop_inode(inode); |
| 1324 | |
| 1325 | if (!drop) { |
Christoph Hellwig | acb0c85 | 2007-05-08 00:25:52 -0700 | [diff] [blame] | 1326 | if (sb->s_flags & MS_ACTIVE) { |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 1327 | inode->i_state |= I_REFERENCED; |
| 1328 | if (!(inode->i_state & (I_DIRTY|I_SYNC))) { |
| 1329 | inode_lru_list_add(inode); |
| 1330 | } |
Alexander Viro | 991114c | 2005-06-23 00:09:01 -0700 | [diff] [blame] | 1331 | spin_unlock(&inode_lock); |
Al Viro | 45321ac | 2010-06-07 13:43:19 -0400 | [diff] [blame] | 1332 | return; |
Alexander Viro | 991114c | 2005-06-23 00:09:01 -0700 | [diff] [blame] | 1333 | } |
Nick Piggin | 7ef0d73 | 2009-03-12 14:31:38 -0700 | [diff] [blame] | 1334 | WARN_ON(inode->i_state & I_NEW); |
Alexander Viro | 991114c | 2005-06-23 00:09:01 -0700 | [diff] [blame] | 1335 | inode->i_state |= I_WILL_FREE; |
| 1336 | spin_unlock(&inode_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | write_inode_now(inode, 1); |
| 1338 | spin_lock(&inode_lock); |
Nick Piggin | 7ef0d73 | 2009-03-12 14:31:38 -0700 | [diff] [blame] | 1339 | WARN_ON(inode->i_state & I_NEW); |
Alexander Viro | 991114c | 2005-06-23 00:09:01 -0700 | [diff] [blame] | 1340 | inode->i_state &= ~I_WILL_FREE; |
Dave Chinner | 4c51acb | 2010-10-23 06:58:09 -0400 | [diff] [blame] | 1341 | __remove_inode_hash(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1342 | } |
Nick Piggin | 7ef0d73 | 2009-03-12 14:31:38 -0700 | [diff] [blame] | 1343 | WARN_ON(inode->i_state & I_NEW); |
Alexander Viro | 991114c | 2005-06-23 00:09:01 -0700 | [diff] [blame] | 1344 | inode->i_state |= I_FREEING; |
Nick Piggin | 9e38d86 | 2010-10-23 06:55:17 -0400 | [diff] [blame] | 1345 | |
| 1346 | /* |
| 1347 | * After we delete the inode from the LRU here, we avoid moving dirty |
| 1348 | * inodes back onto the LRU now because I_FREEING is set and hence |
| 1349 | * writeback_single_inode() won't move the inode around. |
| 1350 | */ |
| 1351 | inode_lru_list_del(inode); |
| 1352 | |
Christoph Hellwig | 646ec46 | 2010-10-23 07:15:32 -0400 | [diff] [blame] | 1353 | __inode_sb_list_del(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | spin_unlock(&inode_lock); |
Al Viro | 644da59 | 2010-06-07 13:21:05 -0400 | [diff] [blame] | 1355 | evict(inode); |
Dave Chinner | 4c51acb | 2010-10-23 06:58:09 -0400 | [diff] [blame] | 1356 | remove_inode_hash(inode); |
Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 1357 | wake_up_inode(inode); |
Al Viro | 45321ac | 2010-06-07 13:43:19 -0400 | [diff] [blame] | 1358 | BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | destroy_inode(inode); |
| 1360 | } |
| 1361 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | /** |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 1363 | * iput - put an inode |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1364 | * @inode: inode to put |
| 1365 | * |
| 1366 | * Puts an inode, dropping its usage count. If the inode use count hits |
| 1367 | * zero, the inode is then freed and may also be destroyed. |
| 1368 | * |
| 1369 | * Consequently, iput() can sleep. |
| 1370 | */ |
| 1371 | void iput(struct inode *inode) |
| 1372 | { |
| 1373 | if (inode) { |
Al Viro | a4ffdde | 2010-06-02 17:38:30 -0400 | [diff] [blame] | 1374 | BUG_ON(inode->i_state & I_CLEAR); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | if (atomic_dec_and_lock(&inode->i_count, &inode_lock)) |
| 1377 | iput_final(inode); |
| 1378 | } |
| 1379 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1380 | EXPORT_SYMBOL(iput); |
| 1381 | |
| 1382 | /** |
| 1383 | * bmap - find a block number in a file |
| 1384 | * @inode: inode of file |
| 1385 | * @block: block to find |
| 1386 | * |
| 1387 | * Returns the block number on the device holding the inode that |
| 1388 | * is the disk block number for the block of the file requested. |
| 1389 | * That is, asked for block 4 of inode 1 the function will return the |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 1390 | * disk block relative to the disk start that holds that block of the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 | * file. |
| 1392 | */ |
Manish Katiyar | 6b3304b | 2009-03-31 19:35:54 +0530 | [diff] [blame] | 1393 | sector_t bmap(struct inode *inode, sector_t block) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 | { |
| 1395 | sector_t res = 0; |
| 1396 | if (inode->i_mapping->a_ops->bmap) |
| 1397 | res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); |
| 1398 | return res; |
| 1399 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1400 | EXPORT_SYMBOL(bmap); |
| 1401 | |
Matthew Garrett | 11ff6f05 | 2009-03-26 17:32:14 +0000 | [diff] [blame] | 1402 | /* |
| 1403 | * With relative atime, only update atime if the previous atime is |
| 1404 | * earlier than either the ctime or mtime or if at least a day has |
| 1405 | * passed since the last atime update. |
| 1406 | */ |
| 1407 | static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, |
| 1408 | struct timespec now) |
| 1409 | { |
| 1410 | |
| 1411 | if (!(mnt->mnt_flags & MNT_RELATIME)) |
| 1412 | return 1; |
| 1413 | /* |
| 1414 | * Is mtime younger than atime? If yes, update atime: |
| 1415 | */ |
| 1416 | if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) |
| 1417 | return 1; |
| 1418 | /* |
| 1419 | * Is ctime younger than atime? If yes, update atime: |
| 1420 | */ |
| 1421 | if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) |
| 1422 | return 1; |
| 1423 | |
| 1424 | /* |
| 1425 | * Is the previous atime value older than a day? If yes, |
| 1426 | * update atime: |
| 1427 | */ |
| 1428 | if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) |
| 1429 | return 1; |
| 1430 | /* |
| 1431 | * Good, we can skip the atime update: |
| 1432 | */ |
| 1433 | return 0; |
| 1434 | } |
| 1435 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1436 | /** |
Christoph Hellwig | 869243a | 2006-01-09 20:52:03 -0800 | [diff] [blame] | 1437 | * touch_atime - update the access time |
| 1438 | * @mnt: mount the inode is accessed on |
Martin Waitz | 7045f37 | 2006-02-01 03:06:57 -0800 | [diff] [blame] | 1439 | * @dentry: dentry accessed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1440 | * |
| 1441 | * Update the accessed time on an inode and mark it for writeback. |
| 1442 | * This function automatically handles read only file systems and media, |
| 1443 | * as well as the "noatime" flag and inode specific "noatime" markers. |
| 1444 | */ |
Christoph Hellwig | 869243a | 2006-01-09 20:52:03 -0800 | [diff] [blame] | 1445 | void touch_atime(struct vfsmount *mnt, struct dentry *dentry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1446 | { |
Christoph Hellwig | 869243a | 2006-01-09 20:52:03 -0800 | [diff] [blame] | 1447 | struct inode *inode = dentry->d_inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1448 | struct timespec now; |
| 1449 | |
Andrew Morton | b227613 | 2006-12-13 00:34:33 -0800 | [diff] [blame] | 1450 | if (inode->i_flags & S_NOATIME) |
Andi Kleen | b12536c | 2009-09-18 13:05:47 -0700 | [diff] [blame] | 1451 | return; |
Eric Dumazet | 37756ce | 2007-02-10 01:44:49 -0800 | [diff] [blame] | 1452 | if (IS_NOATIME(inode)) |
Andi Kleen | b12536c | 2009-09-18 13:05:47 -0700 | [diff] [blame] | 1453 | return; |
Andrew Morton | b227613 | 2006-12-13 00:34:33 -0800 | [diff] [blame] | 1454 | if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) |
Andi Kleen | b12536c | 2009-09-18 13:05:47 -0700 | [diff] [blame] | 1455 | return; |
Christoph Hellwig | fc33a7b | 2006-01-09 20:52:17 -0800 | [diff] [blame] | 1456 | |
Dave Hansen | cdb70f3 | 2008-02-15 14:37:41 -0800 | [diff] [blame] | 1457 | if (mnt->mnt_flags & MNT_NOATIME) |
Andi Kleen | b12536c | 2009-09-18 13:05:47 -0700 | [diff] [blame] | 1458 | return; |
Dave Hansen | cdb70f3 | 2008-02-15 14:37:41 -0800 | [diff] [blame] | 1459 | if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) |
Andi Kleen | b12536c | 2009-09-18 13:05:47 -0700 | [diff] [blame] | 1460 | return; |
Christoph Hellwig | fc33a7b | 2006-01-09 20:52:17 -0800 | [diff] [blame] | 1461 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1462 | now = current_fs_time(inode->i_sb); |
Matthew Garrett | 11ff6f05 | 2009-03-26 17:32:14 +0000 | [diff] [blame] | 1463 | |
| 1464 | if (!relatime_need_update(mnt, inode, now)) |
Andi Kleen | b12536c | 2009-09-18 13:05:47 -0700 | [diff] [blame] | 1465 | return; |
Matthew Garrett | 11ff6f05 | 2009-03-26 17:32:14 +0000 | [diff] [blame] | 1466 | |
Valerie Henson | 47ae32d | 2006-12-13 00:34:34 -0800 | [diff] [blame] | 1467 | if (timespec_equal(&inode->i_atime, &now)) |
Andi Kleen | b12536c | 2009-09-18 13:05:47 -0700 | [diff] [blame] | 1468 | return; |
| 1469 | |
| 1470 | if (mnt_want_write(mnt)) |
| 1471 | return; |
Valerie Henson | 47ae32d | 2006-12-13 00:34:34 -0800 | [diff] [blame] | 1472 | |
| 1473 | inode->i_atime = now; |
| 1474 | mark_inode_dirty_sync(inode); |
Dave Hansen | cdb70f3 | 2008-02-15 14:37:41 -0800 | [diff] [blame] | 1475 | mnt_drop_write(mnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1476 | } |
Christoph Hellwig | 869243a | 2006-01-09 20:52:03 -0800 | [diff] [blame] | 1477 | EXPORT_SYMBOL(touch_atime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1478 | |
| 1479 | /** |
Christoph Hellwig | 870f481 | 2006-01-09 20:52:01 -0800 | [diff] [blame] | 1480 | * file_update_time - update mtime and ctime time |
| 1481 | * @file: file accessed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | * |
Christoph Hellwig | 870f481 | 2006-01-09 20:52:01 -0800 | [diff] [blame] | 1483 | * Update the mtime and ctime members of an inode and mark the inode |
| 1484 | * for writeback. Note that this function is meant exclusively for |
| 1485 | * usage in the file write path of filesystems, and filesystems may |
| 1486 | * choose to explicitly ignore update via this function with the |
Wolfram Sang | 2eadfc0 | 2009-04-02 15:23:37 +0200 | [diff] [blame] | 1487 | * S_NOCMTIME inode flag, e.g. for network filesystem where these |
Christoph Hellwig | 870f481 | 2006-01-09 20:52:01 -0800 | [diff] [blame] | 1488 | * timestamps are handled by the server. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1489 | */ |
| 1490 | |
Christoph Hellwig | 870f481 | 2006-01-09 20:52:01 -0800 | [diff] [blame] | 1491 | void file_update_time(struct file *file) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1492 | { |
Josef "Jeff" Sipek | 0f7fc9e | 2006-12-08 02:36:35 -0800 | [diff] [blame] | 1493 | struct inode *inode = file->f_path.dentry->d_inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1494 | struct timespec now; |
Andi Kleen | ce06e0b | 2009-09-18 13:05:48 -0700 | [diff] [blame] | 1495 | enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1496 | |
Andi Kleen | ce06e0b | 2009-09-18 13:05:48 -0700 | [diff] [blame] | 1497 | /* First try to exhaust all avenues to not sync */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | if (IS_NOCMTIME(inode)) |
| 1499 | return; |
Dave Hansen | 20ddee2 | 2008-02-15 14:37:43 -0800 | [diff] [blame] | 1500 | |
Andi Kleen | ce06e0b | 2009-09-18 13:05:48 -0700 | [diff] [blame] | 1501 | now = current_fs_time(inode->i_sb); |
| 1502 | if (!timespec_equal(&inode->i_mtime, &now)) |
| 1503 | sync_it = S_MTIME; |
| 1504 | |
| 1505 | if (!timespec_equal(&inode->i_ctime, &now)) |
| 1506 | sync_it |= S_CTIME; |
| 1507 | |
| 1508 | if (IS_I_VERSION(inode)) |
| 1509 | sync_it |= S_VERSION; |
| 1510 | |
| 1511 | if (!sync_it) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | return; |
| 1513 | |
Andi Kleen | ce06e0b | 2009-09-18 13:05:48 -0700 | [diff] [blame] | 1514 | /* Finally allowed to write? Takes lock. */ |
| 1515 | if (mnt_want_write_file(file)) |
| 1516 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | |
Andi Kleen | ce06e0b | 2009-09-18 13:05:48 -0700 | [diff] [blame] | 1518 | /* Only change inode inside the lock region */ |
| 1519 | if (sync_it & S_VERSION) |
Jean Noel Cordenner | 7a22422 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 1520 | inode_inc_iversion(inode); |
Andi Kleen | ce06e0b | 2009-09-18 13:05:48 -0700 | [diff] [blame] | 1521 | if (sync_it & S_CTIME) |
| 1522 | inode->i_ctime = now; |
| 1523 | if (sync_it & S_MTIME) |
| 1524 | inode->i_mtime = now; |
| 1525 | mark_inode_dirty_sync(inode); |
Dave Hansen | 20ddee2 | 2008-02-15 14:37:43 -0800 | [diff] [blame] | 1526 | mnt_drop_write(file->f_path.mnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1527 | } |
Christoph Hellwig | 870f481 | 2006-01-09 20:52:01 -0800 | [diff] [blame] | 1528 | EXPORT_SYMBOL(file_update_time); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 | |
| 1530 | int inode_needs_sync(struct inode *inode) |
| 1531 | { |
| 1532 | if (IS_SYNC(inode)) |
| 1533 | return 1; |
| 1534 | if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) |
| 1535 | return 1; |
| 1536 | return 0; |
| 1537 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1538 | EXPORT_SYMBOL(inode_needs_sync); |
| 1539 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1540 | int inode_wait(void *word) |
| 1541 | { |
| 1542 | schedule(); |
| 1543 | return 0; |
| 1544 | } |
Stephen Rothwell | d44dab8 | 2008-11-10 17:06:05 +1100 | [diff] [blame] | 1545 | EXPORT_SYMBOL(inode_wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1546 | |
| 1547 | /* |
Miklos Szeredi | 168a9fd | 2005-07-12 13:58:10 -0700 | [diff] [blame] | 1548 | * If we try to find an inode in the inode hash while it is being |
| 1549 | * deleted, we have to wait until the filesystem completes its |
| 1550 | * deletion before reporting that it isn't found. This function waits |
| 1551 | * until the deletion _might_ have completed. Callers are responsible |
| 1552 | * to recheck inode state. |
| 1553 | * |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 1554 | * It doesn't matter if I_NEW is not set initially, a call to |
Miklos Szeredi | 168a9fd | 2005-07-12 13:58:10 -0700 | [diff] [blame] | 1555 | * wake_up_inode() after removing from the hash list will DTRT. |
| 1556 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 | * This is called with inode_lock held. |
| 1558 | */ |
| 1559 | static void __wait_on_freeing_inode(struct inode *inode) |
| 1560 | { |
| 1561 | wait_queue_head_t *wq; |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 1562 | DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); |
| 1563 | wq = bit_waitqueue(&inode->i_state, __I_NEW); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1564 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); |
| 1565 | spin_unlock(&inode_lock); |
| 1566 | schedule(); |
| 1567 | finish_wait(wq, &wait.wait); |
| 1568 | spin_lock(&inode_lock); |
| 1569 | } |
| 1570 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1571 | static __initdata unsigned long ihash_entries; |
| 1572 | static int __init set_ihash_entries(char *str) |
| 1573 | { |
| 1574 | if (!str) |
| 1575 | return 0; |
| 1576 | ihash_entries = simple_strtoul(str, &str, 0); |
| 1577 | return 1; |
| 1578 | } |
| 1579 | __setup("ihash_entries=", set_ihash_entries); |
| 1580 | |
| 1581 | /* |
| 1582 | * Initialize the waitqueues and inode hash table. |
| 1583 | */ |
| 1584 | void __init inode_init_early(void) |
| 1585 | { |
| 1586 | int loop; |
| 1587 | |
| 1588 | /* If hashes are distributed across NUMA nodes, defer |
| 1589 | * hash allocation until vmalloc space is available. |
| 1590 | */ |
| 1591 | if (hashdist) |
| 1592 | return; |
| 1593 | |
| 1594 | inode_hashtable = |
| 1595 | alloc_large_system_hash("Inode-cache", |
| 1596 | sizeof(struct hlist_head), |
| 1597 | ihash_entries, |
| 1598 | 14, |
| 1599 | HASH_EARLY, |
| 1600 | &i_hash_shift, |
| 1601 | &i_hash_mask, |
| 1602 | 0); |
| 1603 | |
| 1604 | for (loop = 0; loop < (1 << i_hash_shift); loop++) |
| 1605 | INIT_HLIST_HEAD(&inode_hashtable[loop]); |
| 1606 | } |
| 1607 | |
Denis Cheng | 74bf17c | 2007-10-16 23:26:30 -0700 | [diff] [blame] | 1608 | void __init inode_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1609 | { |
| 1610 | int loop; |
| 1611 | |
| 1612 | /* inode slab cache */ |
Paul Jackson | b019600 | 2006-03-24 03:16:09 -0800 | [diff] [blame] | 1613 | inode_cachep = kmem_cache_create("inode_cache", |
| 1614 | sizeof(struct inode), |
| 1615 | 0, |
| 1616 | (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| |
| 1617 | SLAB_MEM_SPREAD), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 1618 | init_once); |
Rusty Russell | 8e1f936 | 2007-07-17 04:03:17 -0700 | [diff] [blame] | 1619 | register_shrinker(&icache_shrinker); |
Dave Chinner | cffbc8a | 2010-10-23 05:03:02 -0400 | [diff] [blame] | 1620 | percpu_counter_init(&nr_inodes, 0); |
| 1621 | percpu_counter_init(&nr_inodes_unused, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | |
| 1623 | /* Hash may have been set up in inode_init_early */ |
| 1624 | if (!hashdist) |
| 1625 | return; |
| 1626 | |
| 1627 | inode_hashtable = |
| 1628 | alloc_large_system_hash("Inode-cache", |
| 1629 | sizeof(struct hlist_head), |
| 1630 | ihash_entries, |
| 1631 | 14, |
| 1632 | 0, |
| 1633 | &i_hash_shift, |
| 1634 | &i_hash_mask, |
| 1635 | 0); |
| 1636 | |
| 1637 | for (loop = 0; loop < (1 << i_hash_shift); loop++) |
| 1638 | INIT_HLIST_HEAD(&inode_hashtable[loop]); |
| 1639 | } |
| 1640 | |
| 1641 | void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) |
| 1642 | { |
| 1643 | inode->i_mode = mode; |
| 1644 | if (S_ISCHR(mode)) { |
| 1645 | inode->i_fop = &def_chr_fops; |
| 1646 | inode->i_rdev = rdev; |
| 1647 | } else if (S_ISBLK(mode)) { |
| 1648 | inode->i_fop = &def_blk_fops; |
| 1649 | inode->i_rdev = rdev; |
| 1650 | } else if (S_ISFIFO(mode)) |
| 1651 | inode->i_fop = &def_fifo_fops; |
| 1652 | else if (S_ISSOCK(mode)) |
| 1653 | inode->i_fop = &bad_sock_fops; |
| 1654 | else |
Manish Katiyar | af0d9ae | 2009-09-18 13:05:43 -0700 | [diff] [blame] | 1655 | printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" |
| 1656 | " inode %s:%lu\n", mode, inode->i_sb->s_id, |
| 1657 | inode->i_ino); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1658 | } |
| 1659 | EXPORT_SYMBOL(init_special_inode); |
Dmitry Monakhov | a1bd120 | 2010-03-04 17:29:14 +0300 | [diff] [blame] | 1660 | |
| 1661 | /** |
| 1662 | * Init uid,gid,mode for new inode according to posix standards |
| 1663 | * @inode: New inode |
| 1664 | * @dir: Directory inode |
| 1665 | * @mode: mode of the new inode |
| 1666 | */ |
| 1667 | void inode_init_owner(struct inode *inode, const struct inode *dir, |
| 1668 | mode_t mode) |
| 1669 | { |
| 1670 | inode->i_uid = current_fsuid(); |
| 1671 | if (dir && dir->i_mode & S_ISGID) { |
| 1672 | inode->i_gid = dir->i_gid; |
| 1673 | if (S_ISDIR(mode)) |
| 1674 | mode |= S_ISGID; |
| 1675 | } else |
| 1676 | inode->i_gid = current_fsgid(); |
| 1677 | inode->i_mode = mode; |
| 1678 | } |
| 1679 | EXPORT_SYMBOL(inode_init_owner); |