blob: 0f7e88a7803f39e52b778eb30652654d7d61d2d1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * (C) 1997 Linus Torvalds
Christoph Hellwig4b4563d2011-05-27 09:28:01 -04003 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/dcache.h>
8#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/slab.h>
10#include <linux/writeback.h>
11#include <linux/module.h>
12#include <linux/backing-dev.h>
13#include <linux/wait.h>
Nick Piggin88e0fbc2009-09-22 16:43:50 -070014#include <linux/rwsem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/hash.h>
16#include <linux/swap.h>
17#include <linux/security.h>
18#include <linux/pagemap.h>
19#include <linux/cdev.h>
20#include <linux/bootmem.h>
Eric Paris3be25f42009-05-21 17:01:26 -040021#include <linux/fsnotify.h>
Christoph Hellwigfc33a7b2006-01-09 20:52:17 -080022#include <linux/mount.h>
Arjan van de Venefaee192009-01-06 07:20:54 -080023#include <linux/async.h>
Al Virof19d4a82009-06-08 19:50:45 -040024#include <linux/posix_acl.h>
Heiko Carstens9ce6e0b2011-05-22 18:54:21 +020025#include <linux/prefetch.h>
Eric Parisa178d202010-10-25 14:41:59 -040026#include <linux/ima.h>
Serge E. Hallyne795b712011-03-23 16:43:25 -070027#include <linux/cred.h>
Christoph Hellwig4b4563d2011-05-27 09:28:01 -040028#include <linux/buffer_head.h> /* for inode_has_buffers */
Dave Chinnera66979a2011-03-22 22:23:41 +110029#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31/*
Christoph Hellwig4b4563d2011-05-27 09:28:01 -040032 * Inode locking rules:
Dave Chinner250df6e2011-03-22 22:23:36 +110033 *
34 * inode->i_lock protects:
35 * inode->i_state, inode->i_hash, __iget()
Dave Chinner02afc412011-03-22 22:23:38 +110036 * inode_lru_lock protects:
37 * inode_lru, inode->i_lru
Dave Chinner55fa6092011-03-22 22:23:40 +110038 * inode_sb_list_lock protects:
39 * sb->s_inodes, inode->i_sb_list
Dave Chinnera66979a2011-03-22 22:23:41 +110040 * inode_wb_list_lock protects:
41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
Dave Chinner67a23c42011-03-22 22:23:42 +110042 * inode_hash_lock protects:
43 * inode_hashtable, inode->i_hash
Dave Chinner250df6e2011-03-22 22:23:36 +110044 *
45 * Lock ordering:
Dave Chinner55fa6092011-03-22 22:23:40 +110046 *
47 * inode_sb_list_lock
48 * inode->i_lock
Dave Chinner02afc412011-03-22 22:23:38 +110049 * inode_lru_lock
Dave Chinnera66979a2011-03-22 22:23:41 +110050 *
51 * inode_wb_list_lock
52 * inode->i_lock
Dave Chinner67a23c42011-03-22 22:23:42 +110053 *
54 * inode_hash_lock
55 * inode_sb_list_lock
56 * inode->i_lock
57 *
58 * iunique_lock
59 * inode_hash_lock
Dave Chinner250df6e2011-03-22 22:23:36 +110060 */
61
Eric Dumazetfa3536c2006-03-26 01:37:24 -080062static unsigned int i_hash_mask __read_mostly;
63static unsigned int i_hash_shift __read_mostly;
Dave Chinner67a23c42011-03-22 22:23:42 +110064static struct hlist_head *inode_hashtable __read_mostly;
65static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Nick Piggin7ccf19a2010-10-21 11:49:30 +110067static LIST_HEAD(inode_lru);
Dave Chinner02afc412011-03-22 22:23:38 +110068static DEFINE_SPINLOCK(inode_lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Dave Chinner55fa6092011-03-22 22:23:40 +110070__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
Dave Chinnera66979a2011-03-22 22:23:41 +110071__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
Dave Chinner55fa6092011-03-22 22:23:40 +110072
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/*
Christoph Hellwigbab1d942011-03-15 21:51:24 +010074 * iprune_sem provides exclusion between the icache shrinking and the
75 * umount path.
Nick Piggin88e0fbc2009-09-22 16:43:50 -070076 *
Christoph Hellwigbab1d942011-03-15 21:51:24 +010077 * We don't actually need it to protect anything in the umount path,
78 * but only need to cycle through it to make sure any inode that
79 * prune_icache took off the LRU list has been fully torn down by the
80 * time we are past evict_inodes.
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 */
Nick Piggin88e0fbc2009-09-22 16:43:50 -070082static DECLARE_RWSEM(iprune_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/*
Jens Axboe7dcda1c2011-04-05 23:51:48 +020085 * Empty aops. Can be used for the cases where the user does not
86 * define any of the address_space operations.
87 */
88const struct address_space_operations empty_aops = {
89};
90EXPORT_SYMBOL(empty_aops);
91
92/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 * Statistics gathering..
94 */
95struct inodes_stat_t inodes_stat;
96
Nick Piggin3e880fb2011-01-07 17:49:19 +110097static DEFINE_PER_CPU(unsigned int, nr_inodes);
Dave Chinnercffbc8a2010-10-23 05:03:02 -040098
Manish Katiyar6b3304b2009-03-31 19:35:54 +053099static struct kmem_cache *inode_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Nick Piggin3e880fb2011-01-07 17:49:19 +1100101static int get_nr_inodes(void)
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400102{
Nick Piggin3e880fb2011-01-07 17:49:19 +1100103 int i;
104 int sum = 0;
105 for_each_possible_cpu(i)
106 sum += per_cpu(nr_inodes, i);
107 return sum < 0 ? 0 : sum;
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400108}
109
110static inline int get_nr_inodes_unused(void)
111{
Nick Piggin86c87492011-01-07 17:49:18 +1100112 return inodes_stat.nr_unused;
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400113}
114
115int get_nr_dirty_inodes(void)
116{
Nick Piggin3e880fb2011-01-07 17:49:19 +1100117 /* not actually dirty inodes, but a wild approximation */
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400118 int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
119 return nr_dirty > 0 ? nr_dirty : 0;
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400120}
121
122/*
123 * Handle nr_inode sysctl
124 */
125#ifdef CONFIG_SYSCTL
126int proc_nr_inodes(ctl_table *table, int write,
127 void __user *buffer, size_t *lenp, loff_t *ppos)
128{
129 inodes_stat.nr_inodes = get_nr_inodes();
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400130 return proc_dointvec(table, write, buffer, lenp, ppos);
131}
132#endif
133
David Chinner2cb15992008-10-30 17:32:23 +1100134/**
135 * inode_init_always - perform inode structure intialisation
Randy Dunlap0bc02f32009-01-06 14:41:13 -0800136 * @sb: superblock inode belongs to
137 * @inode: inode to initialise
David Chinner2cb15992008-10-30 17:32:23 +1100138 *
139 * These are initializations that need to be done on every inode
140 * allocation as the fields are not initialised by slab allocation.
141 */
Christoph Hellwig54e34622009-08-07 14:38:25 -0300142int inode_init_always(struct super_block *sb, struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
Alexey Dobriyan6e1d5dc2009-09-21 17:01:11 -0700144 static const struct inode_operations empty_iops;
Arjan van de Ven99ac48f2006-03-28 01:56:41 -0800145 static const struct file_operations empty_fops;
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530146 struct address_space *const mapping = &inode->i_data;
David Chinner2cb15992008-10-30 17:32:23 +1100147
148 inode->i_sb = sb;
149 inode->i_blkbits = sb->s_blocksize_bits;
150 inode->i_flags = 0;
151 atomic_set(&inode->i_count, 1);
152 inode->i_op = &empty_iops;
153 inode->i_fop = &empty_fops;
154 inode->i_nlink = 1;
Al Viro56ff5ef2008-12-09 09:34:39 -0500155 inode->i_uid = 0;
156 inode->i_gid = 0;
David Chinner2cb15992008-10-30 17:32:23 +1100157 atomic_set(&inode->i_writecount, 0);
158 inode->i_size = 0;
159 inode->i_blocks = 0;
160 inode->i_bytes = 0;
161 inode->i_generation = 0;
162#ifdef CONFIG_QUOTA
163 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
164#endif
165 inode->i_pipe = NULL;
166 inode->i_bdev = NULL;
167 inode->i_cdev = NULL;
168 inode->i_rdev = 0;
169 inode->dirtied_when = 0;
Mimi Zohar6146f0d2009-02-04 09:06:57 -0500170
171 if (security_inode_alloc(inode))
Christoph Hellwig54e34622009-08-07 14:38:25 -0300172 goto out;
David Chinner2cb15992008-10-30 17:32:23 +1100173 spin_lock_init(&inode->i_lock);
174 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
175
176 mutex_init(&inode->i_mutex);
177 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
178
179 init_rwsem(&inode->i_alloc_sem);
180 lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
181
182 mapping->a_ops = &empty_aops;
183 mapping->host = inode;
184 mapping->flags = 0;
Hugh Dickins3c1d4372009-01-06 14:39:23 -0800185 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
David Chinner2cb15992008-10-30 17:32:23 +1100186 mapping->assoc_mapping = NULL;
187 mapping->backing_dev_info = &default_backing_dev_info;
188 mapping->writeback_index = 0;
189
190 /*
191 * If the block_device provides a backing_dev_info for client
192 * inodes then use that. Otherwise the inode share the bdev's
193 * backing_dev_info.
194 */
195 if (sb->s_bdev) {
196 struct backing_dev_info *bdi;
197
Jens Axboe2c96ce92009-09-15 09:43:56 +0200198 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
David Chinner2cb15992008-10-30 17:32:23 +1100199 mapping->backing_dev_info = bdi;
200 }
201 inode->i_private = NULL;
202 inode->i_mapping = mapping;
Al Virof19d4a82009-06-08 19:50:45 -0400203#ifdef CONFIG_FS_POSIX_ACL
204 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
205#endif
David Chinner2cb15992008-10-30 17:32:23 +1100206
Eric Paris3be25f42009-05-21 17:01:26 -0400207#ifdef CONFIG_FSNOTIFY
208 inode->i_fsnotify_mask = 0;
209#endif
210
Nick Piggin3e880fb2011-01-07 17:49:19 +1100211 this_cpu_inc(nr_inodes);
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400212
Christoph Hellwig54e34622009-08-07 14:38:25 -0300213 return 0;
Christoph Hellwig54e34622009-08-07 14:38:25 -0300214out:
215 return -ENOMEM;
David Chinner2cb15992008-10-30 17:32:23 +1100216}
217EXPORT_SYMBOL(inode_init_always);
218
219static struct inode *alloc_inode(struct super_block *sb)
220{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 struct inode *inode;
222
223 if (sb->s_op->alloc_inode)
224 inode = sb->s_op->alloc_inode(sb);
225 else
David Chinner2cb15992008-10-30 17:32:23 +1100226 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Christoph Hellwig54e34622009-08-07 14:38:25 -0300228 if (!inode)
229 return NULL;
230
231 if (unlikely(inode_init_always(sb, inode))) {
232 if (inode->i_sb->s_op->destroy_inode)
233 inode->i_sb->s_op->destroy_inode(inode);
234 else
235 kmem_cache_free(inode_cachep, inode);
236 return NULL;
237 }
238
239 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Nick Pigginff0c7d12011-01-07 17:49:50 +1100242void free_inode_nonrcu(struct inode *inode)
243{
244 kmem_cache_free(inode_cachep, inode);
245}
246EXPORT_SYMBOL(free_inode_nonrcu);
247
Christoph Hellwig2e00c972009-08-07 14:38:29 -0300248void __destroy_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249{
Eric Sesterhennb7542f82006-04-02 13:38:18 +0200250 BUG_ON(inode_has_buffers(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 security_inode_free(inode);
Eric Paris3be25f42009-05-21 17:01:26 -0400252 fsnotify_inode_delete(inode);
Al Virof19d4a82009-06-08 19:50:45 -0400253#ifdef CONFIG_FS_POSIX_ACL
254 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
255 posix_acl_release(inode->i_acl);
256 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
257 posix_acl_release(inode->i_default_acl);
258#endif
Nick Piggin3e880fb2011-01-07 17:49:19 +1100259 this_cpu_dec(nr_inodes);
Christoph Hellwig2e00c972009-08-07 14:38:29 -0300260}
261EXPORT_SYMBOL(__destroy_inode);
262
Nick Pigginfa0d7e32011-01-07 17:49:49 +1100263static void i_callback(struct rcu_head *head)
264{
265 struct inode *inode = container_of(head, struct inode, i_rcu);
266 INIT_LIST_HEAD(&inode->i_dentry);
267 kmem_cache_free(inode_cachep, inode);
268}
269
Christoph Hellwig56b0dac2010-10-06 10:48:55 +0200270static void destroy_inode(struct inode *inode)
Christoph Hellwig2e00c972009-08-07 14:38:29 -0300271{
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100272 BUG_ON(!list_empty(&inode->i_lru));
Christoph Hellwig2e00c972009-08-07 14:38:29 -0300273 __destroy_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 if (inode->i_sb->s_op->destroy_inode)
275 inode->i_sb->s_op->destroy_inode(inode);
276 else
Nick Pigginfa0d7e32011-01-07 17:49:49 +1100277 call_rcu(&inode->i_rcu, i_callback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Miklos Szeredi2aa15892011-02-23 13:49:47 +0100280void address_space_init_once(struct address_space *mapping)
281{
282 memset(mapping, 0, sizeof(*mapping));
283 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
284 spin_lock_init(&mapping->tree_lock);
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700285 mutex_init(&mapping->i_mmap_mutex);
Miklos Szeredi2aa15892011-02-23 13:49:47 +0100286 INIT_LIST_HEAD(&mapping->private_list);
287 spin_lock_init(&mapping->private_lock);
288 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
289 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
Miklos Szeredi2aa15892011-02-23 13:49:47 +0100290}
291EXPORT_SYMBOL(address_space_init_once);
292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293/*
294 * These are initializations that only need to be done
295 * once, because the fields are idempotent across use
296 * of the inode, so let the slab aware of that.
297 */
298void inode_init_once(struct inode *inode)
299{
300 memset(inode, 0, sizeof(*inode));
301 INIT_HLIST_NODE(&inode->i_hash);
302 INIT_LIST_HEAD(&inode->i_dentry);
303 INIT_LIST_HEAD(&inode->i_devices);
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100304 INIT_LIST_HEAD(&inode->i_wb_list);
305 INIT_LIST_HEAD(&inode->i_lru);
Miklos Szeredi2aa15892011-02-23 13:49:47 +0100306 address_space_init_once(&inode->i_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 i_size_ordered_init(inode);
Eric Paris3be25f42009-05-21 17:01:26 -0400308#ifdef CONFIG_FSNOTIFY
Eric Parise61ce862009-12-17 21:24:24 -0500309 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
Eric Paris3be25f42009-05-21 17:01:26 -0400310#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312EXPORT_SYMBOL(inode_init_once);
313
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700314static void init_once(void *foo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315{
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530316 struct inode *inode = (struct inode *) foo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Christoph Lametera35afb82007-05-16 22:10:57 -0700318 inode_init_once(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319}
320
321/*
Dave Chinner250df6e2011-03-22 22:23:36 +1100322 * inode->i_lock must be held
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 */
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530324void __iget(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325{
Nick Piggin9e38d862010-10-23 06:55:17 -0400326 atomic_inc(&inode->i_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
Al Viro7de9c6e2010-10-23 11:11:40 -0400329/*
330 * get additional reference to inode; caller must already hold one.
331 */
332void ihold(struct inode *inode)
333{
334 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
335}
336EXPORT_SYMBOL(ihold);
337
Nick Piggin9e38d862010-10-23 06:55:17 -0400338static void inode_lru_list_add(struct inode *inode)
339{
Dave Chinner02afc412011-03-22 22:23:38 +1100340 spin_lock(&inode_lru_lock);
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100341 if (list_empty(&inode->i_lru)) {
342 list_add(&inode->i_lru, &inode_lru);
Nick Piggin86c87492011-01-07 17:49:18 +1100343 inodes_stat.nr_unused++;
Nick Piggin9e38d862010-10-23 06:55:17 -0400344 }
Dave Chinner02afc412011-03-22 22:23:38 +1100345 spin_unlock(&inode_lru_lock);
Nick Piggin9e38d862010-10-23 06:55:17 -0400346}
347
348static void inode_lru_list_del(struct inode *inode)
349{
Dave Chinner02afc412011-03-22 22:23:38 +1100350 spin_lock(&inode_lru_lock);
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100351 if (!list_empty(&inode->i_lru)) {
352 list_del_init(&inode->i_lru);
Nick Piggin86c87492011-01-07 17:49:18 +1100353 inodes_stat.nr_unused--;
Nick Piggin9e38d862010-10-23 06:55:17 -0400354 }
Dave Chinner02afc412011-03-22 22:23:38 +1100355 spin_unlock(&inode_lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356}
357
Christoph Hellwig646ec462010-10-23 07:15:32 -0400358/**
359 * inode_sb_list_add - add inode to the superblock list of inodes
360 * @inode: inode to add
361 */
362void inode_sb_list_add(struct inode *inode)
363{
Dave Chinner55fa6092011-03-22 22:23:40 +1100364 spin_lock(&inode_sb_list_lock);
365 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
366 spin_unlock(&inode_sb_list_lock);
Christoph Hellwig646ec462010-10-23 07:15:32 -0400367}
368EXPORT_SYMBOL_GPL(inode_sb_list_add);
369
Dave Chinner55fa6092011-03-22 22:23:40 +1100370static inline void inode_sb_list_del(struct inode *inode)
Christoph Hellwig646ec462010-10-23 07:15:32 -0400371{
Dave Chinner55fa6092011-03-22 22:23:40 +1100372 spin_lock(&inode_sb_list_lock);
Christoph Hellwig646ec462010-10-23 07:15:32 -0400373 list_del_init(&inode->i_sb_list);
Dave Chinner55fa6092011-03-22 22:23:40 +1100374 spin_unlock(&inode_sb_list_lock);
Christoph Hellwig646ec462010-10-23 07:15:32 -0400375}
376
Dave Chinner4c51acb2010-10-23 06:58:09 -0400377static unsigned long hash(struct super_block *sb, unsigned long hashval)
378{
379 unsigned long tmp;
380
381 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
382 L1_CACHE_BYTES;
Christoph Hellwig4b4563d2011-05-27 09:28:01 -0400383 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
384 return tmp & i_hash_mask;
Dave Chinner4c51acb2010-10-23 06:58:09 -0400385}
386
387/**
388 * __insert_inode_hash - hash an inode
389 * @inode: unhashed inode
390 * @hashval: unsigned long value used to locate this object in the
391 * inode_hashtable.
392 *
393 * Add an inode to the inode hash for this superblock.
394 */
395void __insert_inode_hash(struct inode *inode, unsigned long hashval)
396{
Christoph Hellwig646ec462010-10-23 07:15:32 -0400397 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
398
Dave Chinner67a23c42011-03-22 22:23:42 +1100399 spin_lock(&inode_hash_lock);
Dave Chinner250df6e2011-03-22 22:23:36 +1100400 spin_lock(&inode->i_lock);
Christoph Hellwig646ec462010-10-23 07:15:32 -0400401 hlist_add_head(&inode->i_hash, b);
Dave Chinner250df6e2011-03-22 22:23:36 +1100402 spin_unlock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +1100403 spin_unlock(&inode_hash_lock);
Dave Chinner4c51acb2010-10-23 06:58:09 -0400404}
405EXPORT_SYMBOL(__insert_inode_hash);
406
407/**
Dave Chinner4c51acb2010-10-23 06:58:09 -0400408 * remove_inode_hash - remove an inode from the hash
409 * @inode: inode to unhash
410 *
411 * Remove an inode from the superblock.
412 */
413void remove_inode_hash(struct inode *inode)
414{
Dave Chinner67a23c42011-03-22 22:23:42 +1100415 spin_lock(&inode_hash_lock);
Dave Chinner250df6e2011-03-22 22:23:36 +1100416 spin_lock(&inode->i_lock);
Dave Chinner4c51acb2010-10-23 06:58:09 -0400417 hlist_del_init(&inode->i_hash);
Dave Chinner250df6e2011-03-22 22:23:36 +1100418 spin_unlock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +1100419 spin_unlock(&inode_hash_lock);
Dave Chinner4c51acb2010-10-23 06:58:09 -0400420}
421EXPORT_SYMBOL(remove_inode_hash);
422
Al Virob0683aa2010-06-04 20:55:25 -0400423void end_writeback(struct inode *inode)
424{
425 might_sleep();
426 BUG_ON(inode->i_data.nrpages);
427 BUG_ON(!list_empty(&inode->i_data.private_list));
428 BUG_ON(!(inode->i_state & I_FREEING));
429 BUG_ON(inode->i_state & I_CLEAR);
430 inode_sync_wait(inode);
Nick Pigginfa0d7e32011-01-07 17:49:49 +1100431 /* don't need i_lock here, no concurrent mods to i_state */
Al Virob0683aa2010-06-04 20:55:25 -0400432 inode->i_state = I_FREEING | I_CLEAR;
433}
434EXPORT_SYMBOL(end_writeback);
435
Dave Chinnerb2b2af82011-03-22 22:23:37 +1100436/*
437 * Free the inode passed in, removing it from the lists it is still connected
438 * to. We remove any pages still attached to the inode and wait for any IO that
439 * is still in progress before finally destroying the inode.
440 *
441 * An inode must already be marked I_FREEING so that we avoid the inode being
442 * moved back onto lists if we race with other code that manipulates the lists
443 * (e.g. writeback_single_inode). The caller is responsible for setting this.
444 *
445 * An inode must already be removed from the LRU list before being evicted from
446 * the cache. This should occur atomically with setting the I_FREEING state
447 * flag, so no inodes here should ever be on the LRU when being evicted.
448 */
Al Viro644da592010-06-07 13:21:05 -0400449static void evict(struct inode *inode)
Al Virob4272d42010-06-04 19:33:20 -0400450{
451 const struct super_operations *op = inode->i_sb->s_op;
452
Dave Chinnerb2b2af82011-03-22 22:23:37 +1100453 BUG_ON(!(inode->i_state & I_FREEING));
454 BUG_ON(!list_empty(&inode->i_lru));
455
Dave Chinnera66979a2011-03-22 22:23:41 +1100456 inode_wb_list_del(inode);
Dave Chinner55fa6092011-03-22 22:23:40 +1100457 inode_sb_list_del(inode);
458
Al Virobe7ce412010-06-04 19:40:39 -0400459 if (op->evict_inode) {
460 op->evict_inode(inode);
Al Virob4272d42010-06-04 19:33:20 -0400461 } else {
462 if (inode->i_data.nrpages)
463 truncate_inode_pages(&inode->i_data, 0);
Al Viro30140832010-06-07 13:23:20 -0400464 end_writeback(inode);
Al Virob4272d42010-06-04 19:33:20 -0400465 }
Al Viro661074e2010-06-04 20:19:55 -0400466 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
467 bd_forget(inode);
468 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
469 cd_forget(inode);
Dave Chinnerb2b2af82011-03-22 22:23:37 +1100470
471 remove_inode_hash(inode);
472
473 spin_lock(&inode->i_lock);
474 wake_up_bit(&inode->i_state, __I_NEW);
475 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
476 spin_unlock(&inode->i_lock);
477
478 destroy_inode(inode);
Al Virob4272d42010-06-04 19:33:20 -0400479}
480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481/*
482 * dispose_list - dispose of the contents of a local list
483 * @head: the head of the list to free
484 *
485 * Dispose-list gets a local list with local inodes in it, so it doesn't
486 * need to worry about list corruption and SMP locks.
487 */
488static void dispose_list(struct list_head *head)
489{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 while (!list_empty(head)) {
491 struct inode *inode;
492
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100493 inode = list_first_entry(head, struct inode, i_lru);
494 list_del_init(&inode->i_lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
Al Viro644da592010-06-07 13:21:05 -0400496 evict(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498}
499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500/**
Al Viro63997e92010-10-25 20:49:35 -0400501 * evict_inodes - evict all evictable inodes for a superblock
502 * @sb: superblock to operate on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 *
Al Viro63997e92010-10-25 20:49:35 -0400504 * Make sure that no inodes with zero refcount are retained. This is
505 * called by superblock shutdown after having MS_ACTIVE flag removed,
506 * so any inode reaching zero refcount during or after that call will
507 * be immediately evicted.
508 */
509void evict_inodes(struct super_block *sb)
510{
511 struct inode *inode, *next;
512 LIST_HEAD(dispose);
513
Dave Chinner55fa6092011-03-22 22:23:40 +1100514 spin_lock(&inode_sb_list_lock);
Al Viro63997e92010-10-25 20:49:35 -0400515 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
516 if (atomic_read(&inode->i_count))
517 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +1100518
519 spin_lock(&inode->i_lock);
520 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
521 spin_unlock(&inode->i_lock);
Al Viro63997e92010-10-25 20:49:35 -0400522 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +1100523 }
Al Viro63997e92010-10-25 20:49:35 -0400524
525 inode->i_state |= I_FREEING;
Dave Chinner02afc412011-03-22 22:23:38 +1100526 inode_lru_list_del(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +1100527 spin_unlock(&inode->i_lock);
Dave Chinner02afc412011-03-22 22:23:38 +1100528 list_add(&inode->i_lru, &dispose);
Al Viro63997e92010-10-25 20:49:35 -0400529 }
Dave Chinner55fa6092011-03-22 22:23:40 +1100530 spin_unlock(&inode_sb_list_lock);
Al Viro63997e92010-10-25 20:49:35 -0400531
532 dispose_list(&dispose);
Christoph Hellwigbab1d942011-03-15 21:51:24 +0100533
534 /*
535 * Cycle through iprune_sem to make sure any inode that prune_icache
536 * moved off the list before we took the lock has been fully torn
537 * down.
538 */
539 down_write(&iprune_sem);
Al Viro63997e92010-10-25 20:49:35 -0400540 up_write(&iprune_sem);
541}
542
543/**
Christoph Hellwiga0318782010-10-24 19:40:33 +0200544 * invalidate_inodes - attempt to free all inodes on a superblock
545 * @sb: superblock to operate on
NeilBrown93b270f2011-02-24 17:25:47 +1100546 * @kill_dirty: flag to guide handling of dirty inodes
Christoph Hellwiga0318782010-10-24 19:40:33 +0200547 *
548 * Attempts to free all inodes for a given superblock. If there were any
549 * busy inodes return a non-zero value, else zero.
NeilBrown93b270f2011-02-24 17:25:47 +1100550 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
551 * them as busy.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 */
NeilBrown93b270f2011-02-24 17:25:47 +1100553int invalidate_inodes(struct super_block *sb, bool kill_dirty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400555 int busy = 0;
Christoph Hellwiga0318782010-10-24 19:40:33 +0200556 struct inode *inode, *next;
557 LIST_HEAD(dispose);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Dave Chinner55fa6092011-03-22 22:23:40 +1100559 spin_lock(&inode_sb_list_lock);
Christoph Hellwiga0318782010-10-24 19:40:33 +0200560 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
Dave Chinner250df6e2011-03-22 22:23:36 +1100561 spin_lock(&inode->i_lock);
562 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
563 spin_unlock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +1100565 }
NeilBrown93b270f2011-02-24 17:25:47 +1100566 if (inode->i_state & I_DIRTY && !kill_dirty) {
Dave Chinner250df6e2011-03-22 22:23:36 +1100567 spin_unlock(&inode->i_lock);
NeilBrown93b270f2011-02-24 17:25:47 +1100568 busy = 1;
569 continue;
570 }
Christoph Hellwig99a38912010-10-23 19:07:20 +0200571 if (atomic_read(&inode->i_count)) {
Dave Chinner250df6e2011-03-22 22:23:36 +1100572 spin_unlock(&inode->i_lock);
Christoph Hellwig99a38912010-10-23 19:07:20 +0200573 busy = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 continue;
575 }
Christoph Hellwig99a38912010-10-23 19:07:20 +0200576
Christoph Hellwig99a38912010-10-23 19:07:20 +0200577 inode->i_state |= I_FREEING;
Dave Chinner02afc412011-03-22 22:23:38 +1100578 inode_lru_list_del(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +1100579 spin_unlock(&inode->i_lock);
Dave Chinner02afc412011-03-22 22:23:38 +1100580 list_add(&inode->i_lru, &dispose);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 }
Dave Chinner55fa6092011-03-22 22:23:40 +1100582 spin_unlock(&inode_sb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
Christoph Hellwiga0318782010-10-24 19:40:33 +0200584 dispose_list(&dispose);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
586 return busy;
587}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589static int can_unuse(struct inode *inode)
590{
Nick Piggin9e38d862010-10-23 06:55:17 -0400591 if (inode->i_state & ~I_REFERENCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 return 0;
593 if (inode_has_buffers(inode))
594 return 0;
595 if (atomic_read(&inode->i_count))
596 return 0;
597 if (inode->i_data.nrpages)
598 return 0;
599 return 1;
600}
601
602/*
Nick Piggin9e38d862010-10-23 06:55:17 -0400603 * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
Dave Chinner02afc412011-03-22 22:23:38 +1100604 * temporary list and then are freed outside inode_lru_lock by dispose_list().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 *
606 * Any inodes which are pinned purely because of attached pagecache have their
Nick Piggin9e38d862010-10-23 06:55:17 -0400607 * pagecache removed. If the inode has metadata buffers attached to
608 * mapping->private_list then try to remove them.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 *
Nick Piggin9e38d862010-10-23 06:55:17 -0400610 * If the inode has the I_REFERENCED flag set, then it means that it has been
611 * used recently - the flag is set in iput_final(). When we encounter such an
612 * inode, clear the flag and move it to the back of the LRU so it gets another
613 * pass through the LRU before it gets reclaimed. This is necessary because of
614 * the fact we are doing lazy LRU updates to minimise lock contention so the
615 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
616 * with this flag set because they are the inodes that are out of order.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 */
618static void prune_icache(int nr_to_scan)
619{
620 LIST_HEAD(freeable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 int nr_scanned;
622 unsigned long reap = 0;
623
Nick Piggin88e0fbc2009-09-22 16:43:50 -0700624 down_read(&iprune_sem);
Dave Chinner02afc412011-03-22 22:23:38 +1100625 spin_lock(&inode_lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
627 struct inode *inode;
628
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100629 if (list_empty(&inode_lru))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 break;
631
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100632 inode = list_entry(inode_lru.prev, struct inode, i_lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
Nick Piggin9e38d862010-10-23 06:55:17 -0400634 /*
Dave Chinner02afc412011-03-22 22:23:38 +1100635 * we are inverting the inode_lru_lock/inode->i_lock here,
636 * so use a trylock. If we fail to get the lock, just move the
637 * inode to the back of the list so we don't spin on it.
638 */
639 if (!spin_trylock(&inode->i_lock)) {
640 list_move(&inode->i_lru, &inode_lru);
641 continue;
642 }
643
644 /*
Nick Piggin9e38d862010-10-23 06:55:17 -0400645 * Referenced or dirty inodes are still in use. Give them
646 * another pass through the LRU as we canot reclaim them now.
647 */
648 if (atomic_read(&inode->i_count) ||
649 (inode->i_state & ~I_REFERENCED)) {
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100650 list_del_init(&inode->i_lru);
Dave Chinnerf283c862011-03-22 22:23:39 +1100651 spin_unlock(&inode->i_lock);
Nick Piggin86c87492011-01-07 17:49:18 +1100652 inodes_stat.nr_unused--;
Nick Piggin9e38d862010-10-23 06:55:17 -0400653 continue;
654 }
655
656 /* recently referenced inodes get one more pass */
657 if (inode->i_state & I_REFERENCED) {
Nick Piggin9e38d862010-10-23 06:55:17 -0400658 inode->i_state &= ~I_REFERENCED;
Dave Chinner250df6e2011-03-22 22:23:36 +1100659 list_move(&inode->i_lru, &inode_lru);
Dave Chinnerf283c862011-03-22 22:23:39 +1100660 spin_unlock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 continue;
662 }
663 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
664 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +1100665 spin_unlock(&inode->i_lock);
Dave Chinner02afc412011-03-22 22:23:38 +1100666 spin_unlock(&inode_lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (remove_inode_buffers(inode))
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800668 reap += invalidate_mapping_pages(&inode->i_data,
669 0, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 iput(inode);
Dave Chinner02afc412011-03-22 22:23:38 +1100671 spin_lock(&inode_lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100673 if (inode != list_entry(inode_lru.next,
674 struct inode, i_lru))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 continue; /* wrong inode or list_empty */
Dave Chinner02afc412011-03-22 22:23:38 +1100676 /* avoid lock inversions with trylock */
677 if (!spin_trylock(&inode->i_lock))
678 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +1100679 if (!can_unuse(inode)) {
680 spin_unlock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +1100682 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 }
Nick Piggin7ef0d732009-03-12 14:31:38 -0700684 WARN_ON(inode->i_state & I_NEW);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 inode->i_state |= I_FREEING;
Dave Chinner250df6e2011-03-22 22:23:36 +1100686 spin_unlock(&inode->i_lock);
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100687
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100688 list_move(&inode->i_lru, &freeable);
Nick Piggin86c87492011-01-07 17:49:18 +1100689 inodes_stat.nr_unused--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 }
Christoph Lameterf8891e52006-06-30 01:55:45 -0700691 if (current_is_kswapd())
692 __count_vm_events(KSWAPD_INODESTEAL, reap);
693 else
694 __count_vm_events(PGINODESTEAL, reap);
Dave Chinner02afc412011-03-22 22:23:38 +1100695 spin_unlock(&inode_lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
697 dispose_list(&freeable);
Nick Piggin88e0fbc2009-09-22 16:43:50 -0700698 up_read(&iprune_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699}
700
701/*
702 * shrink_icache_memory() will attempt to reclaim some unused inodes. Here,
703 * "unused" means that no dentries are referring to the inodes: the files are
704 * not open and the dcache references to those inodes have already been
705 * reclaimed.
706 *
707 * This function is passed the number of inodes to scan, and it returns the
708 * total number of remaining possibly-reclaimable inodes.
709 */
Ying Han1495f232011-05-24 17:12:27 -0700710static int shrink_icache_memory(struct shrinker *shrink,
711 struct shrink_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
Ying Han1495f232011-05-24 17:12:27 -0700713 int nr = sc->nr_to_scan;
714 gfp_t gfp_mask = sc->gfp_mask;
715
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 if (nr) {
717 /*
718 * Nasty deadlock avoidance. We may hold various FS locks,
719 * and we don't want to recurse into the FS that called us
720 * in clear_inode() and friends..
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530721 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 if (!(gfp_mask & __GFP_FS))
723 return -1;
724 prune_icache(nr);
725 }
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400726 return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727}
728
Rusty Russell8e1f9362007-07-17 04:03:17 -0700729static struct shrinker icache_shrinker = {
730 .shrink = shrink_icache_memory,
731 .seeks = DEFAULT_SEEKS,
732};
733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734static void __wait_on_freeing_inode(struct inode *inode);
735/*
736 * Called with the inode lock held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 */
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530738static struct inode *find_inode(struct super_block *sb,
739 struct hlist_head *head,
740 int (*test)(struct inode *, void *),
741 void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742{
743 struct hlist_node *node;
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530744 struct inode *inode = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746repeat:
Matthias Kaehlckec5c8be32008-04-29 00:59:40 -0700747 hlist_for_each_entry(inode, node, head, i_hash) {
Dave Chinner250df6e2011-03-22 22:23:36 +1100748 spin_lock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +1100749 if (inode->i_sb != sb) {
750 spin_unlock(&inode->i_lock);
751 continue;
752 }
753 if (!test(inode, data)) {
754 spin_unlock(&inode->i_lock);
755 continue;
756 }
Al Viroa4ffdde2010-06-02 17:38:30 -0400757 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 __wait_on_freeing_inode(inode);
759 goto repeat;
760 }
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400761 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +1100762 spin_unlock(&inode->i_lock);
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400763 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 }
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400765 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766}
767
768/*
769 * find_inode_fast is the fast path version of find_inode, see the comment at
770 * iget_locked for details.
771 */
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530772static struct inode *find_inode_fast(struct super_block *sb,
773 struct hlist_head *head, unsigned long ino)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 struct hlist_node *node;
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530776 struct inode *inode = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778repeat:
Matthias Kaehlckec5c8be32008-04-29 00:59:40 -0700779 hlist_for_each_entry(inode, node, head, i_hash) {
Dave Chinner250df6e2011-03-22 22:23:36 +1100780 spin_lock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +1100781 if (inode->i_ino != ino) {
782 spin_unlock(&inode->i_lock);
783 continue;
784 }
785 if (inode->i_sb != sb) {
786 spin_unlock(&inode->i_lock);
787 continue;
788 }
Al Viroa4ffdde2010-06-02 17:38:30 -0400789 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 __wait_on_freeing_inode(inode);
791 goto repeat;
792 }
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400793 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +1100794 spin_unlock(&inode->i_lock);
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400795 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 }
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400797 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798}
799
Eric Dumazetf991bd22010-10-23 11:18:01 -0400800/*
801 * Each cpu owns a range of LAST_INO_BATCH numbers.
802 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
803 * to renew the exhausted range.
David Chinner8290c352008-10-30 17:35:24 +1100804 *
Eric Dumazetf991bd22010-10-23 11:18:01 -0400805 * This does not significantly increase overflow rate because every CPU can
806 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
807 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
808 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
809 * overflow rate by 2x, which does not seem too significant.
810 *
811 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
812 * error if st_ino won't fit in target struct field. Use 32bit counter
813 * here to attempt to avoid that.
David Chinner8290c352008-10-30 17:35:24 +1100814 */
Eric Dumazetf991bd22010-10-23 11:18:01 -0400815#define LAST_INO_BATCH 1024
816static DEFINE_PER_CPU(unsigned int, last_ino);
David Chinner8290c352008-10-30 17:35:24 +1100817
Christoph Hellwig85fe4022010-10-23 11:19:54 -0400818unsigned int get_next_ino(void)
Eric Dumazetf991bd22010-10-23 11:18:01 -0400819{
820 unsigned int *p = &get_cpu_var(last_ino);
821 unsigned int res = *p;
822
823#ifdef CONFIG_SMP
824 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
825 static atomic_t shared_last_ino;
826 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
827
828 res = next - LAST_INO_BATCH;
829 }
830#endif
831
832 *p = ++res;
833 put_cpu_var(last_ino);
834 return res;
David Chinner8290c352008-10-30 17:35:24 +1100835}
Christoph Hellwig85fe4022010-10-23 11:19:54 -0400836EXPORT_SYMBOL(get_next_ino);
David Chinner8290c352008-10-30 17:35:24 +1100837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838/**
839 * new_inode - obtain an inode
840 * @sb: superblock
841 *
Mel Gorman769848c2007-07-17 04:03:05 -0700842 * Allocates a new inode for given superblock. The default gfp_mask
Hugh Dickins3c1d4372009-01-06 14:39:23 -0800843 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
Mel Gorman769848c2007-07-17 04:03:05 -0700844 * If HIGHMEM pages are unsuitable or it is known that pages allocated
845 * for the page cache are not reclaimable or migratable,
846 * mapping_set_gfp_mask() must be called with suitable flags on the
847 * newly created inode's mapping
848 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
850struct inode *new_inode(struct super_block *sb)
851{
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530852 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
Dave Chinner55fa6092011-03-22 22:23:40 +1100854 spin_lock_prefetch(&inode_sb_list_lock);
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 inode = alloc_inode(sb);
857 if (inode) {
Dave Chinner250df6e2011-03-22 22:23:36 +1100858 spin_lock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 inode->i_state = 0;
Dave Chinner250df6e2011-03-22 22:23:36 +1100860 spin_unlock(&inode->i_lock);
Dave Chinner55fa6092011-03-22 22:23:40 +1100861 inode_sb_list_add(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 }
863 return inode;
864}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865EXPORT_SYMBOL(new_inode);
866
Dave Chinner250df6e2011-03-22 22:23:36 +1100867/**
868 * unlock_new_inode - clear the I_NEW state and wake up any waiters
869 * @inode: new inode to unlock
870 *
871 * Called when the inode is fully initialised to clear the new state of the
872 * inode and wake up anyone waiting for the inode to finish initialisation.
873 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874void unlock_new_inode(struct inode *inode)
875{
Peter Zijlstra14358e62007-10-14 01:38:33 +0200876#ifdef CONFIG_DEBUG_LOCK_ALLOC
Namhyung Kima3314a02010-10-11 22:38:00 +0900877 if (S_ISDIR(inode->i_mode)) {
Peter Zijlstra1e89a5e2007-10-16 06:47:54 +0200878 struct file_system_type *type = inode->i_sb->s_type;
879
Jan Kara9a7aa122009-06-04 15:26:49 +0200880 /* Set new key only if filesystem hasn't already changed it */
881 if (!lockdep_match_class(&inode->i_mutex,
882 &type->i_mutex_key)) {
883 /*
884 * ensure nobody is actually holding i_mutex
885 */
886 mutex_destroy(&inode->i_mutex);
887 mutex_init(&inode->i_mutex);
888 lockdep_set_class(&inode->i_mutex,
889 &type->i_mutex_dir_key);
890 }
Peter Zijlstra1e89a5e2007-10-16 06:47:54 +0200891 }
Peter Zijlstra14358e62007-10-14 01:38:33 +0200892#endif
Dave Chinner250df6e2011-03-22 22:23:36 +1100893 spin_lock(&inode->i_lock);
Christoph Hellwigeaff8072009-12-17 14:25:01 +0100894 WARN_ON(!(inode->i_state & I_NEW));
895 inode->i_state &= ~I_NEW;
Dave Chinner250df6e2011-03-22 22:23:36 +1100896 wake_up_bit(&inode->i_state, __I_NEW);
897 spin_unlock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899EXPORT_SYMBOL(unlock_new_inode);
900
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400901/**
902 * iget5_locked - obtain an inode from a mounted file system
903 * @sb: super block of file system
904 * @hashval: hash value (usually inode number) to get
905 * @test: callback used for comparisons between inodes
906 * @set: callback used to initialize a new struct inode
907 * @data: opaque data pointer to pass to @test and @set
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400909 * Search for the inode specified by @hashval and @data in the inode cache,
910 * and if present it is return it with an increased reference count. This is
911 * a generalized version of iget_locked() for file systems where the inode
912 * number is not sufficient for unique identification of an inode.
913 *
914 * If the inode is not in cache, allocate a new inode and return it locked,
915 * hashed, and with the I_NEW flag set. The file system gets to fill it in
916 * before unlocking it via unlock_new_inode().
917 *
918 * Note both @test and @set are called with the inode_hash_lock held, so can't
919 * sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 */
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400921struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
922 int (*test)(struct inode *, void *),
923 int (*set)(struct inode *, void *), void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924{
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400925 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530926 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400928 spin_lock(&inode_hash_lock);
929 inode = find_inode(sb, head, test, data);
930 spin_unlock(&inode_hash_lock);
931
932 if (inode) {
933 wait_on_inode(inode);
934 return inode;
935 }
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 inode = alloc_inode(sb);
938 if (inode) {
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530939 struct inode *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
Dave Chinner67a23c42011-03-22 22:23:42 +1100941 spin_lock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 /* We released the lock, so.. */
943 old = find_inode(sb, head, test, data);
944 if (!old) {
945 if (set(inode, data))
946 goto set_failed;
947
Dave Chinner250df6e2011-03-22 22:23:36 +1100948 spin_lock(&inode->i_lock);
Christoph Hellwigeaff8072009-12-17 14:25:01 +0100949 inode->i_state = I_NEW;
Dave Chinner250df6e2011-03-22 22:23:36 +1100950 hlist_add_head(&inode->i_hash, head);
951 spin_unlock(&inode->i_lock);
Dave Chinner55fa6092011-03-22 22:23:40 +1100952 inode_sb_list_add(inode);
Dave Chinner67a23c42011-03-22 22:23:42 +1100953 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955 /* Return the locked inode with I_NEW set, the
956 * caller is responsible for filling in the contents
957 */
958 return inode;
959 }
960
961 /*
962 * Uhhuh, somebody else created the same inode under
963 * us. Use the old inode instead of the one we just
964 * allocated.
965 */
Dave Chinner67a23c42011-03-22 22:23:42 +1100966 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 destroy_inode(inode);
968 inode = old;
969 wait_on_inode(inode);
970 }
971 return inode;
972
973set_failed:
Dave Chinner67a23c42011-03-22 22:23:42 +1100974 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 destroy_inode(inode);
976 return NULL;
977}
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400978EXPORT_SYMBOL(iget5_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400980/**
981 * iget_locked - obtain an inode from a mounted file system
982 * @sb: super block of file system
983 * @ino: inode number to get
984 *
985 * Search for the inode specified by @ino in the inode cache and if present
986 * return it with an increased reference count. This is for file systems
987 * where the inode number is sufficient for unique identification of an inode.
988 *
989 * If the inode is not in cache, allocate a new inode and return it locked,
990 * hashed, and with the I_NEW flag set. The file system gets to fill it in
991 * before unlocking it via unlock_new_inode().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 */
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400993struct inode *iget_locked(struct super_block *sb, unsigned long ino)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994{
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400995 struct hlist_head *head = inode_hashtable + hash(sb, ino);
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530996 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400998 spin_lock(&inode_hash_lock);
999 inode = find_inode_fast(sb, head, ino);
1000 spin_unlock(&inode_hash_lock);
1001 if (inode) {
1002 wait_on_inode(inode);
1003 return inode;
1004 }
1005
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 inode = alloc_inode(sb);
1007 if (inode) {
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301008 struct inode *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
Dave Chinner67a23c42011-03-22 22:23:42 +11001010 spin_lock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 /* We released the lock, so.. */
1012 old = find_inode_fast(sb, head, ino);
1013 if (!old) {
1014 inode->i_ino = ino;
Dave Chinner250df6e2011-03-22 22:23:36 +11001015 spin_lock(&inode->i_lock);
Christoph Hellwigeaff8072009-12-17 14:25:01 +01001016 inode->i_state = I_NEW;
Dave Chinner250df6e2011-03-22 22:23:36 +11001017 hlist_add_head(&inode->i_hash, head);
1018 spin_unlock(&inode->i_lock);
Dave Chinner55fa6092011-03-22 22:23:40 +11001019 inode_sb_list_add(inode);
Dave Chinner67a23c42011-03-22 22:23:42 +11001020 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
1022 /* Return the locked inode with I_NEW set, the
1023 * caller is responsible for filling in the contents
1024 */
1025 return inode;
1026 }
1027
1028 /*
1029 * Uhhuh, somebody else created the same inode under
1030 * us. Use the old inode instead of the one we just
1031 * allocated.
1032 */
Dave Chinner67a23c42011-03-22 22:23:42 +11001033 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 destroy_inode(inode);
1035 inode = old;
1036 wait_on_inode(inode);
1037 }
1038 return inode;
1039}
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001040EXPORT_SYMBOL(iget_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001042/*
1043 * search the inode cache for a matching inode number.
1044 * If we find one, then the inode number we are trying to
1045 * allocate is not unique and so we should not use it.
1046 *
1047 * Returns 1 if the inode number is unique, 0 if it is not.
1048 */
1049static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1050{
1051 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1052 struct hlist_node *node;
1053 struct inode *inode;
1054
Dave Chinner67a23c42011-03-22 22:23:42 +11001055 spin_lock(&inode_hash_lock);
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001056 hlist_for_each_entry(inode, node, b, i_hash) {
Dave Chinner67a23c42011-03-22 22:23:42 +11001057 if (inode->i_ino == ino && inode->i_sb == sb) {
1058 spin_unlock(&inode_hash_lock);
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001059 return 0;
Dave Chinner67a23c42011-03-22 22:23:42 +11001060 }
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001061 }
Dave Chinner67a23c42011-03-22 22:23:42 +11001062 spin_unlock(&inode_hash_lock);
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001063
1064 return 1;
1065}
1066
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067/**
1068 * iunique - get a unique inode number
1069 * @sb: superblock
1070 * @max_reserved: highest reserved inode number
1071 *
1072 * Obtain an inode number that is unique on the system for a given
1073 * superblock. This is used by file systems that have no natural
1074 * permanent inode numbering system. An inode number is returned that
1075 * is higher than the reserved limit but unique.
1076 *
1077 * BUGS:
1078 * With a large number of inodes live on the file system this function
1079 * currently becomes quite slow.
1080 */
1081ino_t iunique(struct super_block *sb, ino_t max_reserved)
1082{
Jeff Layton866b04f2007-05-08 00:32:29 -07001083 /*
1084 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1085 * error if st_ino won't fit in target struct field. Use 32bit counter
1086 * here to attempt to avoid that.
1087 */
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001088 static DEFINE_SPINLOCK(iunique_lock);
Jeff Layton866b04f2007-05-08 00:32:29 -07001089 static unsigned int counter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 ino_t res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001092 spin_lock(&iunique_lock);
Jeffrey Layton3361c7b2007-05-08 00:29:48 -07001093 do {
1094 if (counter <= max_reserved)
1095 counter = max_reserved + 1;
1096 res = counter++;
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001097 } while (!test_inode_iunique(sb, res));
1098 spin_unlock(&iunique_lock);
Jeffrey Layton3361c7b2007-05-08 00:29:48 -07001099
1100 return res;
1101}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102EXPORT_SYMBOL(iunique);
1103
1104struct inode *igrab(struct inode *inode)
1105{
Dave Chinner250df6e2011-03-22 22:23:36 +11001106 spin_lock(&inode->i_lock);
1107 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +11001109 spin_unlock(&inode->i_lock);
1110 } else {
1111 spin_unlock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 /*
1113 * Handle the case where s_op->clear_inode is not been
1114 * called yet, and somebody is calling igrab
1115 * while the inode is getting freed.
1116 */
1117 inode = NULL;
Dave Chinner250df6e2011-03-22 22:23:36 +11001118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 return inode;
1120}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121EXPORT_SYMBOL(igrab);
1122
1123/**
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001124 * ilookup5_nowait - search for an inode in the inode cache
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 * @sb: super block of file system to search
1126 * @hashval: hash value (usually inode number) to search for
1127 * @test: callback used for comparisons between inodes
1128 * @data: opaque data pointer to pass to @test
1129 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001130 * Search for the inode specified by @hashval and @data in the inode cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 * If the inode is in the cache, the inode is returned with an incremented
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001132 * reference count.
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001133 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001134 * Note: I_NEW is not waited upon so you have to be very careful what you do
1135 * with the returned inode. You probably should be using ilookup5() instead.
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001136 *
Randy Dunlapb6d0ad62011-03-26 13:27:47 -07001137 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001138 */
1139struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1140 int (*test)(struct inode *, void *), void *data)
1141{
1142 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001143 struct inode *inode;
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001144
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001145 spin_lock(&inode_hash_lock);
1146 inode = find_inode(sb, head, test, data);
1147 spin_unlock(&inode_hash_lock);
1148
1149 return inode;
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001150}
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001151EXPORT_SYMBOL(ilookup5_nowait);
1152
1153/**
1154 * ilookup5 - search for an inode in the inode cache
1155 * @sb: super block of file system to search
1156 * @hashval: hash value (usually inode number) to search for
1157 * @test: callback used for comparisons between inodes
1158 * @data: opaque data pointer to pass to @test
1159 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001160 * Search for the inode specified by @hashval and @data in the inode cache,
1161 * and if the inode is in the cache, return the inode with an incremented
1162 * reference count. Waits on I_NEW before returning the inode.
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001163 * returned with an incremented reference count.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001165 * This is a generalized version of ilookup() for file systems where the
1166 * inode number is not sufficient for unique identification of an inode.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001168 * Note: @test is called with the inode_hash_lock held, so can't sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 */
1170struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1171 int (*test)(struct inode *, void *), void *data)
1172{
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001173 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001175 if (inode)
1176 wait_on_inode(inode);
1177 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179EXPORT_SYMBOL(ilookup5);
1180
1181/**
1182 * ilookup - search for an inode in the inode cache
1183 * @sb: super block of file system to search
1184 * @ino: inode number to search for
1185 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001186 * Search for the inode @ino in the inode cache, and if the inode is in the
1187 * cache, the inode is returned with an incremented reference count.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 */
1189struct inode *ilookup(struct super_block *sb, unsigned long ino)
1190{
1191 struct hlist_head *head = inode_hashtable + hash(sb, ino);
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001192 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001194 spin_lock(&inode_hash_lock);
1195 inode = find_inode_fast(sb, head, ino);
1196 spin_unlock(&inode_hash_lock);
1197
1198 if (inode)
1199 wait_on_inode(inode);
1200 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202EXPORT_SYMBOL(ilookup);
1203
Al Viro261bca82008-12-30 01:48:21 -05001204int insert_inode_locked(struct inode *inode)
1205{
1206 struct super_block *sb = inode->i_sb;
1207 ino_t ino = inode->i_ino;
1208 struct hlist_head *head = inode_hashtable + hash(sb, ino);
Al Viro261bca82008-12-30 01:48:21 -05001209
Al Viro261bca82008-12-30 01:48:21 -05001210 while (1) {
Al Viro72a43d62009-05-13 19:13:40 +01001211 struct hlist_node *node;
1212 struct inode *old = NULL;
Dave Chinner67a23c42011-03-22 22:23:42 +11001213 spin_lock(&inode_hash_lock);
Al Viro72a43d62009-05-13 19:13:40 +01001214 hlist_for_each_entry(old, node, head, i_hash) {
1215 if (old->i_ino != ino)
1216 continue;
1217 if (old->i_sb != sb)
1218 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +11001219 spin_lock(&old->i_lock);
1220 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1221 spin_unlock(&old->i_lock);
Al Viro72a43d62009-05-13 19:13:40 +01001222 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +11001223 }
Al Viro72a43d62009-05-13 19:13:40 +01001224 break;
1225 }
1226 if (likely(!node)) {
Dave Chinner250df6e2011-03-22 22:23:36 +11001227 spin_lock(&inode->i_lock);
1228 inode->i_state |= I_NEW;
Al Viro261bca82008-12-30 01:48:21 -05001229 hlist_add_head(&inode->i_hash, head);
Dave Chinner250df6e2011-03-22 22:23:36 +11001230 spin_unlock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +11001231 spin_unlock(&inode_hash_lock);
Al Viro261bca82008-12-30 01:48:21 -05001232 return 0;
1233 }
1234 __iget(old);
Dave Chinner250df6e2011-03-22 22:23:36 +11001235 spin_unlock(&old->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +11001236 spin_unlock(&inode_hash_lock);
Al Viro261bca82008-12-30 01:48:21 -05001237 wait_on_inode(old);
Al Viro1d3382c2010-10-23 15:19:20 -04001238 if (unlikely(!inode_unhashed(old))) {
Al Viro261bca82008-12-30 01:48:21 -05001239 iput(old);
1240 return -EBUSY;
1241 }
1242 iput(old);
1243 }
1244}
Al Viro261bca82008-12-30 01:48:21 -05001245EXPORT_SYMBOL(insert_inode_locked);
1246
1247int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1248 int (*test)(struct inode *, void *), void *data)
1249{
1250 struct super_block *sb = inode->i_sb;
1251 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
Al Viro261bca82008-12-30 01:48:21 -05001252
Al Viro261bca82008-12-30 01:48:21 -05001253 while (1) {
Al Viro72a43d62009-05-13 19:13:40 +01001254 struct hlist_node *node;
1255 struct inode *old = NULL;
1256
Dave Chinner67a23c42011-03-22 22:23:42 +11001257 spin_lock(&inode_hash_lock);
Al Viro72a43d62009-05-13 19:13:40 +01001258 hlist_for_each_entry(old, node, head, i_hash) {
1259 if (old->i_sb != sb)
1260 continue;
1261 if (!test(old, data))
1262 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +11001263 spin_lock(&old->i_lock);
1264 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1265 spin_unlock(&old->i_lock);
Al Viro72a43d62009-05-13 19:13:40 +01001266 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +11001267 }
Al Viro72a43d62009-05-13 19:13:40 +01001268 break;
1269 }
1270 if (likely(!node)) {
Dave Chinner250df6e2011-03-22 22:23:36 +11001271 spin_lock(&inode->i_lock);
1272 inode->i_state |= I_NEW;
Al Viro261bca82008-12-30 01:48:21 -05001273 hlist_add_head(&inode->i_hash, head);
Dave Chinner250df6e2011-03-22 22:23:36 +11001274 spin_unlock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +11001275 spin_unlock(&inode_hash_lock);
Al Viro261bca82008-12-30 01:48:21 -05001276 return 0;
1277 }
1278 __iget(old);
Dave Chinner250df6e2011-03-22 22:23:36 +11001279 spin_unlock(&old->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +11001280 spin_unlock(&inode_hash_lock);
Al Viro261bca82008-12-30 01:48:21 -05001281 wait_on_inode(old);
Al Viro1d3382c2010-10-23 15:19:20 -04001282 if (unlikely(!inode_unhashed(old))) {
Al Viro261bca82008-12-30 01:48:21 -05001283 iput(old);
1284 return -EBUSY;
1285 }
1286 iput(old);
1287 }
1288}
Al Viro261bca82008-12-30 01:48:21 -05001289EXPORT_SYMBOL(insert_inode_locked4);
1290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Al Viro45321ac2010-06-07 13:43:19 -04001292int generic_delete_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293{
Al Viro45321ac2010-06-07 13:43:19 -04001294 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296EXPORT_SYMBOL(generic_delete_inode);
1297
Al Viro45321ac2010-06-07 13:43:19 -04001298/*
1299 * Normal UNIX filesystem behaviour: delete the
1300 * inode when the usage count drops to zero, and
1301 * i_nlink is zero.
Jan Kara22fe40422009-09-18 13:05:44 -07001302 */
Al Viro45321ac2010-06-07 13:43:19 -04001303int generic_drop_inode(struct inode *inode)
1304{
Al Viro1d3382c2010-10-23 15:19:20 -04001305 return !inode->i_nlink || inode_unhashed(inode);
Al Viro45321ac2010-06-07 13:43:19 -04001306}
1307EXPORT_SYMBOL_GPL(generic_drop_inode);
1308
1309/*
1310 * Called when we're dropping the last reference
1311 * to an inode.
1312 *
1313 * Call the FS "drop_inode()" function, defaulting to
1314 * the legacy UNIX filesystem behaviour. If it tells
1315 * us to evict inode, do so. Otherwise, retain inode
1316 * in cache if fs is alive, sync and evict if fs is
1317 * shutting down.
1318 */
1319static void iput_final(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320{
1321 struct super_block *sb = inode->i_sb;
Al Viro45321ac2010-06-07 13:43:19 -04001322 const struct super_operations *op = inode->i_sb->s_op;
1323 int drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Dave Chinner250df6e2011-03-22 22:23:36 +11001325 WARN_ON(inode->i_state & I_NEW);
1326
Al Viro45321ac2010-06-07 13:43:19 -04001327 if (op && op->drop_inode)
1328 drop = op->drop_inode(inode);
1329 else
1330 drop = generic_drop_inode(inode);
1331
Dave Chinnerb2b2af82011-03-22 22:23:37 +11001332 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1333 inode->i_state |= I_REFERENCED;
1334 if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1335 inode_lru_list_add(inode);
1336 spin_unlock(&inode->i_lock);
Dave Chinnerb2b2af82011-03-22 22:23:37 +11001337 return;
1338 }
1339
Al Viro45321ac2010-06-07 13:43:19 -04001340 if (!drop) {
Alexander Viro991114c2005-06-23 00:09:01 -07001341 inode->i_state |= I_WILL_FREE;
Dave Chinner250df6e2011-03-22 22:23:36 +11001342 spin_unlock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 write_inode_now(inode, 1);
Dave Chinner250df6e2011-03-22 22:23:36 +11001344 spin_lock(&inode->i_lock);
Nick Piggin7ef0d732009-03-12 14:31:38 -07001345 WARN_ON(inode->i_state & I_NEW);
Alexander Viro991114c2005-06-23 00:09:01 -07001346 inode->i_state &= ~I_WILL_FREE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 }
Nick Piggin7ccf19a2010-10-21 11:49:30 +11001348
Alexander Viro991114c2005-06-23 00:09:01 -07001349 inode->i_state |= I_FREEING;
Nick Piggin9e38d862010-10-23 06:55:17 -04001350 inode_lru_list_del(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +11001351 spin_unlock(&inode->i_lock);
Dave Chinnerb2b2af82011-03-22 22:23:37 +11001352
1353 evict(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354}
1355
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356/**
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301357 * iput - put an inode
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 * @inode: inode to put
1359 *
1360 * Puts an inode, dropping its usage count. If the inode use count hits
1361 * zero, the inode is then freed and may also be destroyed.
1362 *
1363 * Consequently, iput() can sleep.
1364 */
1365void iput(struct inode *inode)
1366{
1367 if (inode) {
Al Viroa4ffdde2010-06-02 17:38:30 -04001368 BUG_ON(inode->i_state & I_CLEAR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
Dave Chinnerf283c862011-03-22 22:23:39 +11001370 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 iput_final(inode);
1372 }
1373}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374EXPORT_SYMBOL(iput);
1375
1376/**
1377 * bmap - find a block number in a file
1378 * @inode: inode of file
1379 * @block: block to find
1380 *
1381 * Returns the block number on the device holding the inode that
1382 * is the disk block number for the block of the file requested.
1383 * That is, asked for block 4 of inode 1 the function will return the
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301384 * disk block relative to the disk start that holds that block of the
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 * file.
1386 */
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301387sector_t bmap(struct inode *inode, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388{
1389 sector_t res = 0;
1390 if (inode->i_mapping->a_ops->bmap)
1391 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1392 return res;
1393}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394EXPORT_SYMBOL(bmap);
1395
Matthew Garrett11ff6f052009-03-26 17:32:14 +00001396/*
1397 * With relative atime, only update atime if the previous atime is
1398 * earlier than either the ctime or mtime or if at least a day has
1399 * passed since the last atime update.
1400 */
1401static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1402 struct timespec now)
1403{
1404
1405 if (!(mnt->mnt_flags & MNT_RELATIME))
1406 return 1;
1407 /*
1408 * Is mtime younger than atime? If yes, update atime:
1409 */
1410 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1411 return 1;
1412 /*
1413 * Is ctime younger than atime? If yes, update atime:
1414 */
1415 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1416 return 1;
1417
1418 /*
1419 * Is the previous atime value older than a day? If yes,
1420 * update atime:
1421 */
1422 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1423 return 1;
1424 /*
1425 * Good, we can skip the atime update:
1426 */
1427 return 0;
1428}
1429
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430/**
Christoph Hellwig869243a2006-01-09 20:52:03 -08001431 * touch_atime - update the access time
1432 * @mnt: mount the inode is accessed on
Martin Waitz7045f372006-02-01 03:06:57 -08001433 * @dentry: dentry accessed
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 *
1435 * Update the accessed time on an inode and mark it for writeback.
1436 * This function automatically handles read only file systems and media,
1437 * as well as the "noatime" flag and inode specific "noatime" markers.
1438 */
Christoph Hellwig869243a2006-01-09 20:52:03 -08001439void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440{
Christoph Hellwig869243a2006-01-09 20:52:03 -08001441 struct inode *inode = dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 struct timespec now;
1443
Andrew Mortonb2276132006-12-13 00:34:33 -08001444 if (inode->i_flags & S_NOATIME)
Andi Kleenb12536c2009-09-18 13:05:47 -07001445 return;
Eric Dumazet37756ce2007-02-10 01:44:49 -08001446 if (IS_NOATIME(inode))
Andi Kleenb12536c2009-09-18 13:05:47 -07001447 return;
Andrew Mortonb2276132006-12-13 00:34:33 -08001448 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
Andi Kleenb12536c2009-09-18 13:05:47 -07001449 return;
Christoph Hellwigfc33a7b2006-01-09 20:52:17 -08001450
Dave Hansencdb70f32008-02-15 14:37:41 -08001451 if (mnt->mnt_flags & MNT_NOATIME)
Andi Kleenb12536c2009-09-18 13:05:47 -07001452 return;
Dave Hansencdb70f32008-02-15 14:37:41 -08001453 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
Andi Kleenb12536c2009-09-18 13:05:47 -07001454 return;
Christoph Hellwigfc33a7b2006-01-09 20:52:17 -08001455
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 now = current_fs_time(inode->i_sb);
Matthew Garrett11ff6f052009-03-26 17:32:14 +00001457
1458 if (!relatime_need_update(mnt, inode, now))
Andi Kleenb12536c2009-09-18 13:05:47 -07001459 return;
Matthew Garrett11ff6f052009-03-26 17:32:14 +00001460
Valerie Henson47ae32d2006-12-13 00:34:34 -08001461 if (timespec_equal(&inode->i_atime, &now))
Andi Kleenb12536c2009-09-18 13:05:47 -07001462 return;
1463
1464 if (mnt_want_write(mnt))
1465 return;
Valerie Henson47ae32d2006-12-13 00:34:34 -08001466
1467 inode->i_atime = now;
1468 mark_inode_dirty_sync(inode);
Dave Hansencdb70f32008-02-15 14:37:41 -08001469 mnt_drop_write(mnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470}
Christoph Hellwig869243a2006-01-09 20:52:03 -08001471EXPORT_SYMBOL(touch_atime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
1473/**
Christoph Hellwig870f4812006-01-09 20:52:01 -08001474 * file_update_time - update mtime and ctime time
1475 * @file: file accessed
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 *
Christoph Hellwig870f4812006-01-09 20:52:01 -08001477 * Update the mtime and ctime members of an inode and mark the inode
1478 * for writeback. Note that this function is meant exclusively for
1479 * usage in the file write path of filesystems, and filesystems may
1480 * choose to explicitly ignore update via this function with the
Wolfram Sang2eadfc02009-04-02 15:23:37 +02001481 * S_NOCMTIME inode flag, e.g. for network filesystem where these
Christoph Hellwig870f4812006-01-09 20:52:01 -08001482 * timestamps are handled by the server.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 */
1484
Christoph Hellwig870f4812006-01-09 20:52:01 -08001485void file_update_time(struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486{
Josef "Jeff" Sipek0f7fc9e2006-12-08 02:36:35 -08001487 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 struct timespec now;
Andi Kleence06e0b2009-09-18 13:05:48 -07001489 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
Andi Kleence06e0b2009-09-18 13:05:48 -07001491 /* First try to exhaust all avenues to not sync */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 if (IS_NOCMTIME(inode))
1493 return;
Dave Hansen20ddee22008-02-15 14:37:43 -08001494
Andi Kleence06e0b2009-09-18 13:05:48 -07001495 now = current_fs_time(inode->i_sb);
1496 if (!timespec_equal(&inode->i_mtime, &now))
1497 sync_it = S_MTIME;
1498
1499 if (!timespec_equal(&inode->i_ctime, &now))
1500 sync_it |= S_CTIME;
1501
1502 if (IS_I_VERSION(inode))
1503 sync_it |= S_VERSION;
1504
1505 if (!sync_it)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 return;
1507
Andi Kleence06e0b2009-09-18 13:05:48 -07001508 /* Finally allowed to write? Takes lock. */
1509 if (mnt_want_write_file(file))
1510 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511
Andi Kleence06e0b2009-09-18 13:05:48 -07001512 /* Only change inode inside the lock region */
1513 if (sync_it & S_VERSION)
Jean Noel Cordenner7a224222008-01-28 23:58:27 -05001514 inode_inc_iversion(inode);
Andi Kleence06e0b2009-09-18 13:05:48 -07001515 if (sync_it & S_CTIME)
1516 inode->i_ctime = now;
1517 if (sync_it & S_MTIME)
1518 inode->i_mtime = now;
1519 mark_inode_dirty_sync(inode);
Dave Hansen20ddee22008-02-15 14:37:43 -08001520 mnt_drop_write(file->f_path.mnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521}
Christoph Hellwig870f4812006-01-09 20:52:01 -08001522EXPORT_SYMBOL(file_update_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523
1524int inode_needs_sync(struct inode *inode)
1525{
1526 if (IS_SYNC(inode))
1527 return 1;
1528 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1529 return 1;
1530 return 0;
1531}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532EXPORT_SYMBOL(inode_needs_sync);
1533
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534int inode_wait(void *word)
1535{
1536 schedule();
1537 return 0;
1538}
Stephen Rothwelld44dab82008-11-10 17:06:05 +11001539EXPORT_SYMBOL(inode_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541/*
Miklos Szeredi168a9fd2005-07-12 13:58:10 -07001542 * If we try to find an inode in the inode hash while it is being
1543 * deleted, we have to wait until the filesystem completes its
1544 * deletion before reporting that it isn't found. This function waits
1545 * until the deletion _might_ have completed. Callers are responsible
1546 * to recheck inode state.
1547 *
Christoph Hellwigeaff8072009-12-17 14:25:01 +01001548 * It doesn't matter if I_NEW is not set initially, a call to
Dave Chinner250df6e2011-03-22 22:23:36 +11001549 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1550 * will DTRT.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 */
1552static void __wait_on_freeing_inode(struct inode *inode)
1553{
1554 wait_queue_head_t *wq;
Christoph Hellwigeaff8072009-12-17 14:25:01 +01001555 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1556 wq = bit_waitqueue(&inode->i_state, __I_NEW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
Dave Chinner250df6e2011-03-22 22:23:36 +11001558 spin_unlock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +11001559 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 schedule();
1561 finish_wait(wq, &wait.wait);
Dave Chinner67a23c42011-03-22 22:23:42 +11001562 spin_lock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563}
1564
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565static __initdata unsigned long ihash_entries;
1566static int __init set_ihash_entries(char *str)
1567{
1568 if (!str)
1569 return 0;
1570 ihash_entries = simple_strtoul(str, &str, 0);
1571 return 1;
1572}
1573__setup("ihash_entries=", set_ihash_entries);
1574
1575/*
1576 * Initialize the waitqueues and inode hash table.
1577 */
1578void __init inode_init_early(void)
1579{
1580 int loop;
1581
1582 /* If hashes are distributed across NUMA nodes, defer
1583 * hash allocation until vmalloc space is available.
1584 */
1585 if (hashdist)
1586 return;
1587
1588 inode_hashtable =
1589 alloc_large_system_hash("Inode-cache",
1590 sizeof(struct hlist_head),
1591 ihash_entries,
1592 14,
1593 HASH_EARLY,
1594 &i_hash_shift,
1595 &i_hash_mask,
1596 0);
1597
1598 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1599 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1600}
1601
Denis Cheng74bf17c2007-10-16 23:26:30 -07001602void __init inode_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603{
1604 int loop;
1605
1606 /* inode slab cache */
Paul Jacksonb0196002006-03-24 03:16:09 -08001607 inode_cachep = kmem_cache_create("inode_cache",
1608 sizeof(struct inode),
1609 0,
1610 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1611 SLAB_MEM_SPREAD),
Paul Mundt20c2df82007-07-20 10:11:58 +09001612 init_once);
Rusty Russell8e1f9362007-07-17 04:03:17 -07001613 register_shrinker(&icache_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
1615 /* Hash may have been set up in inode_init_early */
1616 if (!hashdist)
1617 return;
1618
1619 inode_hashtable =
1620 alloc_large_system_hash("Inode-cache",
1621 sizeof(struct hlist_head),
1622 ihash_entries,
1623 14,
1624 0,
1625 &i_hash_shift,
1626 &i_hash_mask,
1627 0);
1628
1629 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1630 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1631}
1632
1633void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1634{
1635 inode->i_mode = mode;
1636 if (S_ISCHR(mode)) {
1637 inode->i_fop = &def_chr_fops;
1638 inode->i_rdev = rdev;
1639 } else if (S_ISBLK(mode)) {
1640 inode->i_fop = &def_blk_fops;
1641 inode->i_rdev = rdev;
1642 } else if (S_ISFIFO(mode))
1643 inode->i_fop = &def_fifo_fops;
1644 else if (S_ISSOCK(mode))
1645 inode->i_fop = &bad_sock_fops;
1646 else
Manish Katiyaraf0d9ae2009-09-18 13:05:43 -07001647 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1648 " inode %s:%lu\n", mode, inode->i_sb->s_id,
1649 inode->i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650}
1651EXPORT_SYMBOL(init_special_inode);
Dmitry Monakhova1bd1202010-03-04 17:29:14 +03001652
1653/**
Ben Hutchingseaae6682011-02-15 12:48:09 +00001654 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
Dmitry Monakhova1bd1202010-03-04 17:29:14 +03001655 * @inode: New inode
1656 * @dir: Directory inode
1657 * @mode: mode of the new inode
1658 */
1659void inode_init_owner(struct inode *inode, const struct inode *dir,
1660 mode_t mode)
1661{
1662 inode->i_uid = current_fsuid();
1663 if (dir && dir->i_mode & S_ISGID) {
1664 inode->i_gid = dir->i_gid;
1665 if (S_ISDIR(mode))
1666 mode |= S_ISGID;
1667 } else
1668 inode->i_gid = current_fsgid();
1669 inode->i_mode = mode;
1670}
1671EXPORT_SYMBOL(inode_init_owner);
Serge E. Hallyne795b712011-03-23 16:43:25 -07001672
Serge E. Hallyn2e149672011-03-23 16:43:26 -07001673/**
1674 * inode_owner_or_capable - check current task permissions to inode
1675 * @inode: inode being checked
1676 *
1677 * Return true if current either has CAP_FOWNER to the inode, or
1678 * owns the file.
Serge E. Hallyne795b712011-03-23 16:43:25 -07001679 */
Serge E. Hallyn2e149672011-03-23 16:43:26 -07001680bool inode_owner_or_capable(const struct inode *inode)
Serge E. Hallyne795b712011-03-23 16:43:25 -07001681{
1682 struct user_namespace *ns = inode_userns(inode);
1683
1684 if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
1685 return true;
1686 if (ns_capable(ns, CAP_FOWNER))
1687 return true;
1688 return false;
1689}
Serge E. Hallyn2e149672011-03-23 16:43:26 -07001690EXPORT_SYMBOL(inode_owner_or_capable);