blob: bb86852202927132b14410c021ce3434611b8c64 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * (C) 1997 Linus Torvalds
Christoph Hellwig4b4563d2011-05-27 09:28:01 -04003 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 */
Al Viroe59cc472011-12-07 13:17:19 -05005#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/fs.h>
7#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/hash.h>
10#include <linux/swap.h>
11#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/cdev.h>
13#include <linux/bootmem.h>
Eric Paris3be25f42009-05-21 17:01:26 -040014#include <linux/fsnotify.h>
Christoph Hellwigfc33a7b2006-01-09 20:52:17 -080015#include <linux/mount.h>
Al Virof19d4a82009-06-08 19:50:45 -040016#include <linux/posix_acl.h>
Heiko Carstens9ce6e0b2011-05-22 18:54:21 +020017#include <linux/prefetch.h>
Christoph Hellwig4b4563d2011-05-27 09:28:01 -040018#include <linux/buffer_head.h> /* for inode_has_buffers */
Miklos Szeredi7ada4db2011-11-21 12:11:32 +010019#include <linux/ratelimit.h>
Dave Chinnerbc3b14c2013-08-28 10:17:58 +100020#include <linux/list_lru.h>
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050021#include <trace/events/writeback.h>
Dave Chinnera66979a2011-03-22 22:23:41 +110022#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24/*
Christoph Hellwig4b4563d2011-05-27 09:28:01 -040025 * Inode locking rules:
Dave Chinner250df6e2011-03-22 22:23:36 +110026 *
27 * inode->i_lock protects:
28 * inode->i_state, inode->i_hash, __iget()
Dave Chinnerbc3b14c2013-08-28 10:17:58 +100029 * Inode LRU list locks protect:
Dave Chinner98b745c2011-07-08 14:14:39 +100030 * inode->i_sb->s_inode_lru, inode->i_lru
Dave Chinner74278da2015-03-04 12:37:22 -050031 * inode->i_sb->s_inode_list_lock protects:
32 * inode->i_sb->s_inodes, inode->i_sb_list
Christoph Hellwigf758eea2011-04-21 18:19:44 -060033 * bdi->wb.list_lock protects:
Dave Chinnerc7f54082015-03-04 14:07:22 -050034 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
Dave Chinner67a23c42011-03-22 22:23:42 +110035 * inode_hash_lock protects:
36 * inode_hashtable, inode->i_hash
Dave Chinner250df6e2011-03-22 22:23:36 +110037 *
38 * Lock ordering:
Dave Chinner55fa6092011-03-22 22:23:40 +110039 *
Dave Chinner74278da2015-03-04 12:37:22 -050040 * inode->i_sb->s_inode_list_lock
Dave Chinner55fa6092011-03-22 22:23:40 +110041 * inode->i_lock
Dave Chinnerbc3b14c2013-08-28 10:17:58 +100042 * Inode LRU list locks
Dave Chinnera66979a2011-03-22 22:23:41 +110043 *
Christoph Hellwigf758eea2011-04-21 18:19:44 -060044 * bdi->wb.list_lock
Dave Chinnera66979a2011-03-22 22:23:41 +110045 * inode->i_lock
Dave Chinner67a23c42011-03-22 22:23:42 +110046 *
47 * inode_hash_lock
Dave Chinner74278da2015-03-04 12:37:22 -050048 * inode->i_sb->s_inode_list_lock
Dave Chinner67a23c42011-03-22 22:23:42 +110049 * inode->i_lock
50 *
51 * iunique_lock
52 * inode_hash_lock
Dave Chinner250df6e2011-03-22 22:23:36 +110053 */
54
Eric Dumazetfa3536c2006-03-26 01:37:24 -080055static unsigned int i_hash_mask __read_mostly;
56static unsigned int i_hash_shift __read_mostly;
Dave Chinner67a23c42011-03-22 22:23:42 +110057static struct hlist_head *inode_hashtable __read_mostly;
58static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/*
Jens Axboe7dcda1c2011-04-05 23:51:48 +020061 * Empty aops. Can be used for the cases where the user does not
62 * define any of the address_space operations.
63 */
64const struct address_space_operations empty_aops = {
65};
66EXPORT_SYMBOL(empty_aops);
67
68/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 * Statistics gathering..
70 */
71struct inodes_stat_t inodes_stat;
72
Glauber Costa3942c072013-08-28 10:17:53 +100073static DEFINE_PER_CPU(unsigned long, nr_inodes);
74static DEFINE_PER_CPU(unsigned long, nr_unused);
Dave Chinnercffbc8a2010-10-23 05:03:02 -040075
Manish Katiyar6b3304b2009-03-31 19:35:54 +053076static struct kmem_cache *inode_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Glauber Costa3942c072013-08-28 10:17:53 +100078static long get_nr_inodes(void)
Dave Chinnercffbc8a2010-10-23 05:03:02 -040079{
Nick Piggin3e880fb2011-01-07 17:49:19 +110080 int i;
Glauber Costa3942c072013-08-28 10:17:53 +100081 long sum = 0;
Nick Piggin3e880fb2011-01-07 17:49:19 +110082 for_each_possible_cpu(i)
83 sum += per_cpu(nr_inodes, i);
84 return sum < 0 ? 0 : sum;
Dave Chinnercffbc8a2010-10-23 05:03:02 -040085}
86
Glauber Costa3942c072013-08-28 10:17:53 +100087static inline long get_nr_inodes_unused(void)
Dave Chinnercffbc8a2010-10-23 05:03:02 -040088{
Dave Chinnerfcb94f72011-07-08 14:14:38 +100089 int i;
Glauber Costa3942c072013-08-28 10:17:53 +100090 long sum = 0;
Dave Chinnerfcb94f72011-07-08 14:14:38 +100091 for_each_possible_cpu(i)
92 sum += per_cpu(nr_unused, i);
93 return sum < 0 ? 0 : sum;
Dave Chinnercffbc8a2010-10-23 05:03:02 -040094}
95
Glauber Costa3942c072013-08-28 10:17:53 +100096long get_nr_dirty_inodes(void)
Dave Chinnercffbc8a2010-10-23 05:03:02 -040097{
Nick Piggin3e880fb2011-01-07 17:49:19 +110098 /* not actually dirty inodes, but a wild approximation */
Glauber Costa3942c072013-08-28 10:17:53 +100099 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400100 return nr_dirty > 0 ? nr_dirty : 0;
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400101}
102
103/*
104 * Handle nr_inode sysctl
105 */
106#ifdef CONFIG_SYSCTL
Joe Perches1f7e0612014-06-06 14:38:05 -0700107int proc_nr_inodes(struct ctl_table *table, int write,
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400108 void __user *buffer, size_t *lenp, loff_t *ppos)
109{
110 inodes_stat.nr_inodes = get_nr_inodes();
Dave Chinnerfcb94f72011-07-08 14:14:38 +1000111 inodes_stat.nr_unused = get_nr_inodes_unused();
Glauber Costa3942c072013-08-28 10:17:53 +1000112 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400113}
114#endif
115
Al Virobd9b51e2014-11-18 23:38:21 -0500116static int no_open(struct inode *inode, struct file *file)
117{
118 return -ENXIO;
119}
120
David Chinner2cb15992008-10-30 17:32:23 +1100121/**
122 * inode_init_always - perform inode structure intialisation
Randy Dunlap0bc02f32009-01-06 14:41:13 -0800123 * @sb: superblock inode belongs to
124 * @inode: inode to initialise
David Chinner2cb15992008-10-30 17:32:23 +1100125 *
126 * These are initializations that need to be done on every inode
127 * allocation as the fields are not initialised by slab allocation.
128 */
Christoph Hellwig54e34622009-08-07 14:38:25 -0300129int inode_init_always(struct super_block *sb, struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
Alexey Dobriyan6e1d5dc2009-09-21 17:01:11 -0700131 static const struct inode_operations empty_iops;
Al Virobd9b51e2014-11-18 23:38:21 -0500132 static const struct file_operations no_open_fops = {.open = no_open};
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530133 struct address_space *const mapping = &inode->i_data;
David Chinner2cb15992008-10-30 17:32:23 +1100134
135 inode->i_sb = sb;
136 inode->i_blkbits = sb->s_blocksize_bits;
137 inode->i_flags = 0;
138 atomic_set(&inode->i_count, 1);
139 inode->i_op = &empty_iops;
Al Virobd9b51e2014-11-18 23:38:21 -0500140 inode->i_fop = &no_open_fops;
Miklos Szeredia78ef702011-10-28 14:13:30 +0200141 inode->__i_nlink = 1;
Linus Torvalds3ddcd052011-08-06 22:45:50 -0700142 inode->i_opflags = 0;
Eric W. Biederman92361632012-02-08 07:07:50 -0800143 i_uid_write(inode, 0);
144 i_gid_write(inode, 0);
David Chinner2cb15992008-10-30 17:32:23 +1100145 atomic_set(&inode->i_writecount, 0);
146 inode->i_size = 0;
147 inode->i_blocks = 0;
148 inode->i_bytes = 0;
149 inode->i_generation = 0;
David Chinner2cb15992008-10-30 17:32:23 +1100150 inode->i_pipe = NULL;
151 inode->i_bdev = NULL;
152 inode->i_cdev = NULL;
Al Viro61ba64f2015-05-02 09:54:06 -0400153 inode->i_link = NULL;
David Chinner2cb15992008-10-30 17:32:23 +1100154 inode->i_rdev = 0;
155 inode->dirtied_when = 0;
Mimi Zohar6146f0d2009-02-04 09:06:57 -0500156
157 if (security_inode_alloc(inode))
Christoph Hellwig54e34622009-08-07 14:38:25 -0300158 goto out;
David Chinner2cb15992008-10-30 17:32:23 +1100159 spin_lock_init(&inode->i_lock);
160 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
161
162 mutex_init(&inode->i_mutex);
163 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
164
Christoph Hellwigbd5fe6c2011-06-24 14:29:43 -0400165 atomic_set(&inode->i_dio_count, 0);
David Chinner2cb15992008-10-30 17:32:23 +1100166
167 mapping->a_ops = &empty_aops;
168 mapping->host = inode;
169 mapping->flags = 0;
David Herrmann4bb5f5d2014-08-08 14:25:25 -0700170 atomic_set(&mapping->i_mmap_writable, 0);
Hugh Dickins3c1d4372009-01-06 14:39:23 -0800171 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
Rafael Aquini252aa6f2012-12-11 16:02:35 -0800172 mapping->private_data = NULL;
David Chinner2cb15992008-10-30 17:32:23 +1100173 mapping->writeback_index = 0;
David Chinner2cb15992008-10-30 17:32:23 +1100174 inode->i_private = NULL;
175 inode->i_mapping = mapping;
Al Virob3d9b7a2012-06-09 13:51:19 -0400176 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
Al Virof19d4a82009-06-08 19:50:45 -0400177#ifdef CONFIG_FS_POSIX_ACL
178 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
179#endif
David Chinner2cb15992008-10-30 17:32:23 +1100180
Eric Paris3be25f42009-05-21 17:01:26 -0400181#ifdef CONFIG_FSNOTIFY
182 inode->i_fsnotify_mask = 0;
183#endif
Jeff Layton4a075e32015-01-16 15:05:54 -0500184 inode->i_flctx = NULL;
Nick Piggin3e880fb2011-01-07 17:49:19 +1100185 this_cpu_inc(nr_inodes);
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400186
Christoph Hellwig54e34622009-08-07 14:38:25 -0300187 return 0;
Christoph Hellwig54e34622009-08-07 14:38:25 -0300188out:
189 return -ENOMEM;
David Chinner2cb15992008-10-30 17:32:23 +1100190}
191EXPORT_SYMBOL(inode_init_always);
192
193static struct inode *alloc_inode(struct super_block *sb)
194{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 struct inode *inode;
196
197 if (sb->s_op->alloc_inode)
198 inode = sb->s_op->alloc_inode(sb);
199 else
David Chinner2cb15992008-10-30 17:32:23 +1100200 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Christoph Hellwig54e34622009-08-07 14:38:25 -0300202 if (!inode)
203 return NULL;
204
205 if (unlikely(inode_init_always(sb, inode))) {
206 if (inode->i_sb->s_op->destroy_inode)
207 inode->i_sb->s_op->destroy_inode(inode);
208 else
209 kmem_cache_free(inode_cachep, inode);
210 return NULL;
211 }
212
213 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
Nick Pigginff0c7d12011-01-07 17:49:50 +1100216void free_inode_nonrcu(struct inode *inode)
217{
218 kmem_cache_free(inode_cachep, inode);
219}
220EXPORT_SYMBOL(free_inode_nonrcu);
221
Christoph Hellwig2e00c972009-08-07 14:38:29 -0300222void __destroy_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223{
Eric Sesterhennb7542f82006-04-02 13:38:18 +0200224 BUG_ON(inode_has_buffers(inode));
Tejun Heo52ebea72015-05-22 17:13:37 -0400225 inode_detach_wb(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 security_inode_free(inode);
Eric Paris3be25f42009-05-21 17:01:26 -0400227 fsnotify_inode_delete(inode);
Jeff Laytonf27a0fe2016-01-07 15:08:51 -0500228 locks_free_lock_context(inode);
Miklos Szeredi7ada4db2011-11-21 12:11:32 +0100229 if (!inode->i_nlink) {
230 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
231 atomic_long_dec(&inode->i_sb->s_remove_count);
232 }
233
Al Virof19d4a82009-06-08 19:50:45 -0400234#ifdef CONFIG_FS_POSIX_ACL
235 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
236 posix_acl_release(inode->i_acl);
237 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
238 posix_acl_release(inode->i_default_acl);
239#endif
Nick Piggin3e880fb2011-01-07 17:49:19 +1100240 this_cpu_dec(nr_inodes);
Christoph Hellwig2e00c972009-08-07 14:38:29 -0300241}
242EXPORT_SYMBOL(__destroy_inode);
243
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100244static void i_callback(struct rcu_head *head)
245{
246 struct inode *inode = container_of(head, struct inode, i_rcu);
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100247 kmem_cache_free(inode_cachep, inode);
248}
249
Christoph Hellwig56b0dac2010-10-06 10:48:55 +0200250static void destroy_inode(struct inode *inode)
Christoph Hellwig2e00c972009-08-07 14:38:29 -0300251{
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100252 BUG_ON(!list_empty(&inode->i_lru));
Christoph Hellwig2e00c972009-08-07 14:38:29 -0300253 __destroy_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 if (inode->i_sb->s_op->destroy_inode)
255 inode->i_sb->s_op->destroy_inode(inode);
256 else
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100257 call_rcu(&inode->i_rcu, i_callback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Miklos Szeredi7ada4db2011-11-21 12:11:32 +0100260/**
261 * drop_nlink - directly drop an inode's link count
262 * @inode: inode
263 *
264 * This is a low-level filesystem helper to replace any
265 * direct filesystem manipulation of i_nlink. In cases
266 * where we are attempting to track writes to the
267 * filesystem, a decrement to zero means an imminent
268 * write when the file is truncated and actually unlinked
269 * on the filesystem.
270 */
271void drop_nlink(struct inode *inode)
272{
273 WARN_ON(inode->i_nlink == 0);
274 inode->__i_nlink--;
275 if (!inode->i_nlink)
276 atomic_long_inc(&inode->i_sb->s_remove_count);
277}
278EXPORT_SYMBOL(drop_nlink);
279
280/**
281 * clear_nlink - directly zero an inode's link count
282 * @inode: inode
283 *
284 * This is a low-level filesystem helper to replace any
285 * direct filesystem manipulation of i_nlink. See
286 * drop_nlink() for why we care about i_nlink hitting zero.
287 */
288void clear_nlink(struct inode *inode)
289{
290 if (inode->i_nlink) {
291 inode->__i_nlink = 0;
292 atomic_long_inc(&inode->i_sb->s_remove_count);
293 }
294}
295EXPORT_SYMBOL(clear_nlink);
296
297/**
298 * set_nlink - directly set an inode's link count
299 * @inode: inode
300 * @nlink: new nlink (should be non-zero)
301 *
302 * This is a low-level filesystem helper to replace any
303 * direct filesystem manipulation of i_nlink.
304 */
305void set_nlink(struct inode *inode, unsigned int nlink)
306{
307 if (!nlink) {
Miklos Szeredi7ada4db2011-11-21 12:11:32 +0100308 clear_nlink(inode);
309 } else {
310 /* Yes, some filesystems do change nlink from zero to one */
311 if (inode->i_nlink == 0)
312 atomic_long_dec(&inode->i_sb->s_remove_count);
313
314 inode->__i_nlink = nlink;
315 }
316}
317EXPORT_SYMBOL(set_nlink);
318
319/**
320 * inc_nlink - directly increment an inode's link count
321 * @inode: inode
322 *
323 * This is a low-level filesystem helper to replace any
324 * direct filesystem manipulation of i_nlink. Currently,
325 * it is only here for parity with dec_nlink().
326 */
327void inc_nlink(struct inode *inode)
328{
Al Virof4e0c302013-06-11 08:34:36 +0400329 if (unlikely(inode->i_nlink == 0)) {
330 WARN_ON(!(inode->i_state & I_LINKABLE));
Miklos Szeredi7ada4db2011-11-21 12:11:32 +0100331 atomic_long_dec(&inode->i_sb->s_remove_count);
Al Virof4e0c302013-06-11 08:34:36 +0400332 }
Miklos Szeredi7ada4db2011-11-21 12:11:32 +0100333
334 inode->__i_nlink++;
335}
336EXPORT_SYMBOL(inc_nlink);
337
Miklos Szeredi2aa15892011-02-23 13:49:47 +0100338void address_space_init_once(struct address_space *mapping)
339{
340 memset(mapping, 0, sizeof(*mapping));
341 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
342 spin_lock_init(&mapping->tree_lock);
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800343 init_rwsem(&mapping->i_mmap_rwsem);
Miklos Szeredi2aa15892011-02-23 13:49:47 +0100344 INIT_LIST_HEAD(&mapping->private_list);
345 spin_lock_init(&mapping->private_lock);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700346 mapping->i_mmap = RB_ROOT;
Miklos Szeredi2aa15892011-02-23 13:49:47 +0100347}
348EXPORT_SYMBOL(address_space_init_once);
349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350/*
351 * These are initializations that only need to be done
352 * once, because the fields are idempotent across use
353 * of the inode, so let the slab aware of that.
354 */
355void inode_init_once(struct inode *inode)
356{
357 memset(inode, 0, sizeof(*inode));
358 INIT_HLIST_NODE(&inode->i_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 INIT_LIST_HEAD(&inode->i_devices);
Dave Chinnerc7f54082015-03-04 14:07:22 -0500360 INIT_LIST_HEAD(&inode->i_io_list);
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100361 INIT_LIST_HEAD(&inode->i_lru);
Miklos Szeredi2aa15892011-02-23 13:49:47 +0100362 address_space_init_once(&inode->i_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 i_size_ordered_init(inode);
Eric Paris3be25f42009-05-21 17:01:26 -0400364#ifdef CONFIG_FSNOTIFY
Eric Parise61ce862009-12-17 21:24:24 -0500365 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
Eric Paris3be25f42009-05-21 17:01:26 -0400366#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368EXPORT_SYMBOL(inode_init_once);
369
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700370static void init_once(void *foo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530372 struct inode *inode = (struct inode *) foo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
Christoph Lametera35afb82007-05-16 22:10:57 -0700374 inode_init_once(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375}
376
377/*
Dave Chinner250df6e2011-03-22 22:23:36 +1100378 * inode->i_lock must be held
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 */
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530380void __iget(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
Nick Piggin9e38d862010-10-23 06:55:17 -0400382 atomic_inc(&inode->i_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383}
384
Al Viro7de9c6ee2010-10-23 11:11:40 -0400385/*
386 * get additional reference to inode; caller must already hold one.
387 */
388void ihold(struct inode *inode)
389{
390 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
391}
392EXPORT_SYMBOL(ihold);
393
Nick Piggin9e38d862010-10-23 06:55:17 -0400394static void inode_lru_list_add(struct inode *inode)
395{
Dave Chinnerbc3b14c2013-08-28 10:17:58 +1000396 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
Dave Chinnerfcb94f72011-07-08 14:14:38 +1000397 this_cpu_inc(nr_unused);
Nick Piggin9e38d862010-10-23 06:55:17 -0400398}
399
Jan Kara4eff96d2012-11-26 16:29:51 -0800400/*
401 * Add inode to LRU if needed (inode is unused and clean).
402 *
403 * Needs inode->i_lock held.
404 */
405void inode_add_lru(struct inode *inode)
406{
Theodore Ts'o0ae45f62015-02-02 00:37:00 -0500407 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
408 I_FREEING | I_WILL_FREE)) &&
Jan Kara4eff96d2012-11-26 16:29:51 -0800409 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
410 inode_lru_list_add(inode);
411}
412
413
Nick Piggin9e38d862010-10-23 06:55:17 -0400414static void inode_lru_list_del(struct inode *inode)
415{
Dave Chinnerbc3b14c2013-08-28 10:17:58 +1000416
417 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
Dave Chinnerfcb94f72011-07-08 14:14:38 +1000418 this_cpu_dec(nr_unused);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419}
420
Christoph Hellwig646ec462010-10-23 07:15:32 -0400421/**
422 * inode_sb_list_add - add inode to the superblock list of inodes
423 * @inode: inode to add
424 */
425void inode_sb_list_add(struct inode *inode)
426{
Dave Chinner74278da2015-03-04 12:37:22 -0500427 spin_lock(&inode->i_sb->s_inode_list_lock);
Dave Chinner55fa6092011-03-22 22:23:40 +1100428 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
Dave Chinner74278da2015-03-04 12:37:22 -0500429 spin_unlock(&inode->i_sb->s_inode_list_lock);
Christoph Hellwig646ec462010-10-23 07:15:32 -0400430}
431EXPORT_SYMBOL_GPL(inode_sb_list_add);
432
Dave Chinner55fa6092011-03-22 22:23:40 +1100433static inline void inode_sb_list_del(struct inode *inode)
Christoph Hellwig646ec462010-10-23 07:15:32 -0400434{
Eric Dumazeta209dfc2011-07-26 11:36:34 +0200435 if (!list_empty(&inode->i_sb_list)) {
Dave Chinner74278da2015-03-04 12:37:22 -0500436 spin_lock(&inode->i_sb->s_inode_list_lock);
Eric Dumazeta209dfc2011-07-26 11:36:34 +0200437 list_del_init(&inode->i_sb_list);
Dave Chinner74278da2015-03-04 12:37:22 -0500438 spin_unlock(&inode->i_sb->s_inode_list_lock);
Eric Dumazeta209dfc2011-07-26 11:36:34 +0200439 }
Christoph Hellwig646ec462010-10-23 07:15:32 -0400440}
441
Dave Chinner4c51acb2010-10-23 06:58:09 -0400442static unsigned long hash(struct super_block *sb, unsigned long hashval)
443{
444 unsigned long tmp;
445
446 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
447 L1_CACHE_BYTES;
Christoph Hellwig4b4563d2011-05-27 09:28:01 -0400448 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
449 return tmp & i_hash_mask;
Dave Chinner4c51acb2010-10-23 06:58:09 -0400450}
451
452/**
453 * __insert_inode_hash - hash an inode
454 * @inode: unhashed inode
455 * @hashval: unsigned long value used to locate this object in the
456 * inode_hashtable.
457 *
458 * Add an inode to the inode hash for this superblock.
459 */
460void __insert_inode_hash(struct inode *inode, unsigned long hashval)
461{
Christoph Hellwig646ec462010-10-23 07:15:32 -0400462 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
463
Dave Chinner67a23c42011-03-22 22:23:42 +1100464 spin_lock(&inode_hash_lock);
Dave Chinner250df6e2011-03-22 22:23:36 +1100465 spin_lock(&inode->i_lock);
Christoph Hellwig646ec462010-10-23 07:15:32 -0400466 hlist_add_head(&inode->i_hash, b);
Dave Chinner250df6e2011-03-22 22:23:36 +1100467 spin_unlock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +1100468 spin_unlock(&inode_hash_lock);
Dave Chinner4c51acb2010-10-23 06:58:09 -0400469}
470EXPORT_SYMBOL(__insert_inode_hash);
471
472/**
Eric Dumazetf2ee7ab2011-07-28 06:41:09 +0200473 * __remove_inode_hash - remove an inode from the hash
Dave Chinner4c51acb2010-10-23 06:58:09 -0400474 * @inode: inode to unhash
475 *
476 * Remove an inode from the superblock.
477 */
Eric Dumazetf2ee7ab2011-07-28 06:41:09 +0200478void __remove_inode_hash(struct inode *inode)
Dave Chinner4c51acb2010-10-23 06:58:09 -0400479{
Dave Chinner67a23c42011-03-22 22:23:42 +1100480 spin_lock(&inode_hash_lock);
Dave Chinner250df6e2011-03-22 22:23:36 +1100481 spin_lock(&inode->i_lock);
Dave Chinner4c51acb2010-10-23 06:58:09 -0400482 hlist_del_init(&inode->i_hash);
Dave Chinner250df6e2011-03-22 22:23:36 +1100483 spin_unlock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +1100484 spin_unlock(&inode_hash_lock);
Dave Chinner4c51acb2010-10-23 06:58:09 -0400485}
Eric Dumazetf2ee7ab2011-07-28 06:41:09 +0200486EXPORT_SYMBOL(__remove_inode_hash);
Dave Chinner4c51acb2010-10-23 06:58:09 -0400487
Jan Karadbd57682012-05-03 14:48:02 +0200488void clear_inode(struct inode *inode)
Al Virob0683aa2010-06-04 20:55:25 -0400489{
490 might_sleep();
Jan Kara08142572011-06-27 16:18:10 -0700491 /*
492 * We have to cycle tree_lock here because reclaim can be still in the
493 * process of removing the last page (in __delete_from_page_cache())
494 * and we must not free mapping under it.
495 */
496 spin_lock_irq(&inode->i_data.tree_lock);
Al Virob0683aa2010-06-04 20:55:25 -0400497 BUG_ON(inode->i_data.nrpages);
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700498 BUG_ON(inode->i_data.nrshadows);
Jan Kara08142572011-06-27 16:18:10 -0700499 spin_unlock_irq(&inode->i_data.tree_lock);
Al Virob0683aa2010-06-04 20:55:25 -0400500 BUG_ON(!list_empty(&inode->i_data.private_list));
501 BUG_ON(!(inode->i_state & I_FREEING));
502 BUG_ON(inode->i_state & I_CLEAR);
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100503 /* don't need i_lock here, no concurrent mods to i_state */
Al Virob0683aa2010-06-04 20:55:25 -0400504 inode->i_state = I_FREEING | I_CLEAR;
505}
Jan Karadbd57682012-05-03 14:48:02 +0200506EXPORT_SYMBOL(clear_inode);
Al Virob0683aa2010-06-04 20:55:25 -0400507
Dave Chinnerb2b2af82011-03-22 22:23:37 +1100508/*
509 * Free the inode passed in, removing it from the lists it is still connected
510 * to. We remove any pages still attached to the inode and wait for any IO that
511 * is still in progress before finally destroying the inode.
512 *
513 * An inode must already be marked I_FREEING so that we avoid the inode being
514 * moved back onto lists if we race with other code that manipulates the lists
515 * (e.g. writeback_single_inode). The caller is responsible for setting this.
516 *
517 * An inode must already be removed from the LRU list before being evicted from
518 * the cache. This should occur atomically with setting the I_FREEING state
519 * flag, so no inodes here should ever be on the LRU when being evicted.
520 */
Al Viro644da592010-06-07 13:21:05 -0400521static void evict(struct inode *inode)
Al Virob4272d42010-06-04 19:33:20 -0400522{
523 const struct super_operations *op = inode->i_sb->s_op;
524
Dave Chinnerb2b2af82011-03-22 22:23:37 +1100525 BUG_ON(!(inode->i_state & I_FREEING));
526 BUG_ON(!list_empty(&inode->i_lru));
527
Dave Chinnerc7f54082015-03-04 14:07:22 -0500528 if (!list_empty(&inode->i_io_list))
529 inode_io_list_del(inode);
Eric Dumazetb12362b2011-07-28 06:11:47 +0200530
Dave Chinner55fa6092011-03-22 22:23:40 +1100531 inode_sb_list_del(inode);
532
Jan Kara169ebd92012-05-03 14:48:03 +0200533 /*
534 * Wait for flusher thread to be done with the inode so that filesystem
535 * does not start destroying it while writeback is still running. Since
536 * the inode has I_FREEING set, flusher thread won't start new work on
537 * the inode. We just have to wait for running writeback to finish.
538 */
539 inode_wait_for_writeback(inode);
Jan Kara7994e6f2012-05-03 14:48:01 +0200540
Al Virobe7ce412010-06-04 19:40:39 -0400541 if (op->evict_inode) {
542 op->evict_inode(inode);
Al Virob4272d42010-06-04 19:33:20 -0400543 } else {
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700544 truncate_inode_pages_final(&inode->i_data);
Jan Karadbd57682012-05-03 14:48:02 +0200545 clear_inode(inode);
Al Virob4272d42010-06-04 19:33:20 -0400546 }
Al Viro661074e2010-06-04 20:19:55 -0400547 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
548 bd_forget(inode);
549 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
550 cd_forget(inode);
Dave Chinnerb2b2af82011-03-22 22:23:37 +1100551
552 remove_inode_hash(inode);
553
554 spin_lock(&inode->i_lock);
555 wake_up_bit(&inode->i_state, __I_NEW);
556 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
557 spin_unlock(&inode->i_lock);
558
559 destroy_inode(inode);
Al Virob4272d42010-06-04 19:33:20 -0400560}
561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562/*
563 * dispose_list - dispose of the contents of a local list
564 * @head: the head of the list to free
565 *
566 * Dispose-list gets a local list with local inodes in it, so it doesn't
567 * need to worry about list corruption and SMP locks.
568 */
569static void dispose_list(struct list_head *head)
570{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 while (!list_empty(head)) {
572 struct inode *inode;
573
Nick Piggin7ccf19a2010-10-21 11:49:30 +1100574 inode = list_first_entry(head, struct inode, i_lru);
575 list_del_init(&inode->i_lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Al Viro644da592010-06-07 13:21:05 -0400577 evict(inode);
Josef Bacikac05fbb2015-03-04 16:52:52 -0500578 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580}
581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582/**
Al Viro63997e92010-10-25 20:49:35 -0400583 * evict_inodes - evict all evictable inodes for a superblock
584 * @sb: superblock to operate on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 *
Al Viro63997e92010-10-25 20:49:35 -0400586 * Make sure that no inodes with zero refcount are retained. This is
587 * called by superblock shutdown after having MS_ACTIVE flag removed,
588 * so any inode reaching zero refcount during or after that call will
589 * be immediately evicted.
590 */
591void evict_inodes(struct super_block *sb)
592{
593 struct inode *inode, *next;
594 LIST_HEAD(dispose);
595
Josef Bacikac05fbb2015-03-04 16:52:52 -0500596again:
Dave Chinner74278da2015-03-04 12:37:22 -0500597 spin_lock(&sb->s_inode_list_lock);
Al Viro63997e92010-10-25 20:49:35 -0400598 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
599 if (atomic_read(&inode->i_count))
600 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +1100601
602 spin_lock(&inode->i_lock);
603 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
604 spin_unlock(&inode->i_lock);
Al Viro63997e92010-10-25 20:49:35 -0400605 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +1100606 }
Al Viro63997e92010-10-25 20:49:35 -0400607
608 inode->i_state |= I_FREEING;
Dave Chinner02afc412011-03-22 22:23:38 +1100609 inode_lru_list_del(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +1100610 spin_unlock(&inode->i_lock);
Dave Chinner02afc412011-03-22 22:23:38 +1100611 list_add(&inode->i_lru, &dispose);
Josef Bacikac05fbb2015-03-04 16:52:52 -0500612
613 /*
614 * We can have a ton of inodes to evict at unmount time given
615 * enough memory, check to see if we need to go to sleep for a
616 * bit so we don't livelock.
617 */
618 if (need_resched()) {
619 spin_unlock(&sb->s_inode_list_lock);
620 cond_resched();
621 dispose_list(&dispose);
622 goto again;
623 }
Al Viro63997e92010-10-25 20:49:35 -0400624 }
Dave Chinner74278da2015-03-04 12:37:22 -0500625 spin_unlock(&sb->s_inode_list_lock);
Al Viro63997e92010-10-25 20:49:35 -0400626
627 dispose_list(&dispose);
Al Viro63997e92010-10-25 20:49:35 -0400628}
629
630/**
Christoph Hellwiga0318782010-10-24 19:40:33 +0200631 * invalidate_inodes - attempt to free all inodes on a superblock
632 * @sb: superblock to operate on
NeilBrown93b270f2011-02-24 17:25:47 +1100633 * @kill_dirty: flag to guide handling of dirty inodes
Christoph Hellwiga0318782010-10-24 19:40:33 +0200634 *
635 * Attempts to free all inodes for a given superblock. If there were any
636 * busy inodes return a non-zero value, else zero.
NeilBrown93b270f2011-02-24 17:25:47 +1100637 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
638 * them as busy.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 */
NeilBrown93b270f2011-02-24 17:25:47 +1100640int invalidate_inodes(struct super_block *sb, bool kill_dirty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641{
Dave Chinnercffbc8a2010-10-23 05:03:02 -0400642 int busy = 0;
Christoph Hellwiga0318782010-10-24 19:40:33 +0200643 struct inode *inode, *next;
644 LIST_HEAD(dispose);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
Dave Chinner74278da2015-03-04 12:37:22 -0500646 spin_lock(&sb->s_inode_list_lock);
Christoph Hellwiga0318782010-10-24 19:40:33 +0200647 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
Dave Chinner250df6e2011-03-22 22:23:36 +1100648 spin_lock(&inode->i_lock);
649 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
650 spin_unlock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +1100652 }
Theodore Ts'o0ae45f62015-02-02 00:37:00 -0500653 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
Dave Chinner250df6e2011-03-22 22:23:36 +1100654 spin_unlock(&inode->i_lock);
NeilBrown93b270f2011-02-24 17:25:47 +1100655 busy = 1;
656 continue;
657 }
Christoph Hellwig99a38912010-10-23 19:07:20 +0200658 if (atomic_read(&inode->i_count)) {
Dave Chinner250df6e2011-03-22 22:23:36 +1100659 spin_unlock(&inode->i_lock);
Christoph Hellwig99a38912010-10-23 19:07:20 +0200660 busy = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 continue;
662 }
Christoph Hellwig99a38912010-10-23 19:07:20 +0200663
Christoph Hellwig99a38912010-10-23 19:07:20 +0200664 inode->i_state |= I_FREEING;
Dave Chinner02afc412011-03-22 22:23:38 +1100665 inode_lru_list_del(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +1100666 spin_unlock(&inode->i_lock);
Dave Chinner02afc412011-03-22 22:23:38 +1100667 list_add(&inode->i_lru, &dispose);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 }
Dave Chinner74278da2015-03-04 12:37:22 -0500669 spin_unlock(&sb->s_inode_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
Christoph Hellwiga0318782010-10-24 19:40:33 +0200671 dispose_list(&dispose);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
673 return busy;
674}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676/*
Dave Chinnerbc3b14c2013-08-28 10:17:58 +1000677 * Isolate the inode from the LRU in preparation for freeing it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 *
679 * Any inodes which are pinned purely because of attached pagecache have their
Nick Piggin9e38d862010-10-23 06:55:17 -0400680 * pagecache removed. If the inode has metadata buffers attached to
681 * mapping->private_list then try to remove them.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 *
Nick Piggin9e38d862010-10-23 06:55:17 -0400683 * If the inode has the I_REFERENCED flag set, then it means that it has been
684 * used recently - the flag is set in iput_final(). When we encounter such an
685 * inode, clear the flag and move it to the back of the LRU so it gets another
686 * pass through the LRU before it gets reclaimed. This is necessary because of
687 * the fact we are doing lazy LRU updates to minimise lock contention so the
688 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
689 * with this flag set because they are the inodes that are out of order.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 */
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800691static enum lru_status inode_lru_isolate(struct list_head *item,
692 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
Dave Chinnerbc3b14c2013-08-28 10:17:58 +1000693{
694 struct list_head *freeable = arg;
695 struct inode *inode = container_of(item, struct inode, i_lru);
696
697 /*
698 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
699 * If we fail to get the lock, just skip it.
700 */
701 if (!spin_trylock(&inode->i_lock))
702 return LRU_SKIP;
703
704 /*
705 * Referenced or dirty inodes are still in use. Give them another pass
706 * through the LRU as we canot reclaim them now.
707 */
708 if (atomic_read(&inode->i_count) ||
709 (inode->i_state & ~I_REFERENCED)) {
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800710 list_lru_isolate(lru, &inode->i_lru);
Dave Chinnerbc3b14c2013-08-28 10:17:58 +1000711 spin_unlock(&inode->i_lock);
712 this_cpu_dec(nr_unused);
713 return LRU_REMOVED;
714 }
715
716 /* recently referenced inodes get one more pass */
717 if (inode->i_state & I_REFERENCED) {
718 inode->i_state &= ~I_REFERENCED;
719 spin_unlock(&inode->i_lock);
720 return LRU_ROTATE;
721 }
722
723 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
724 __iget(inode);
725 spin_unlock(&inode->i_lock);
726 spin_unlock(lru_lock);
727 if (remove_inode_buffers(inode)) {
728 unsigned long reap;
729 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
730 if (current_is_kswapd())
731 __count_vm_events(KSWAPD_INODESTEAL, reap);
732 else
733 __count_vm_events(PGINODESTEAL, reap);
734 if (current->reclaim_state)
735 current->reclaim_state->reclaimed_slab += reap;
736 }
737 iput(inode);
738 spin_lock(lru_lock);
739 return LRU_RETRY;
740 }
741
742 WARN_ON(inode->i_state & I_NEW);
743 inode->i_state |= I_FREEING;
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800744 list_lru_isolate_move(lru, &inode->i_lru, freeable);
Dave Chinnerbc3b14c2013-08-28 10:17:58 +1000745 spin_unlock(&inode->i_lock);
746
Dave Chinnerbc3b14c2013-08-28 10:17:58 +1000747 this_cpu_dec(nr_unused);
748 return LRU_REMOVED;
749}
750
751/*
752 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
753 * This is called from the superblock shrinker function with a number of inodes
754 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
755 * then are freed outside inode_lock by dispose_list().
756 */
Vladimir Davydov503c3582015-02-12 14:58:47 -0800757long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758{
759 LIST_HEAD(freeable);
Dave Chinnerbc3b14c2013-08-28 10:17:58 +1000760 long freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Vladimir Davydov503c3582015-02-12 14:58:47 -0800762 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
763 inode_lru_isolate, &freeable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 dispose_list(&freeable);
Dave Chinner0a234c62013-08-28 10:17:57 +1000765 return freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766}
767
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768static void __wait_on_freeing_inode(struct inode *inode);
769/*
770 * Called with the inode lock held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 */
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530772static struct inode *find_inode(struct super_block *sb,
773 struct hlist_head *head,
774 int (*test)(struct inode *, void *),
775 void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776{
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530777 struct inode *inode = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779repeat:
Sasha Levinb67bfe02013-02-27 17:06:00 -0800780 hlist_for_each_entry(inode, head, i_hash) {
Al Viro5a3cd99282013-11-06 09:54:52 -0500781 if (inode->i_sb != sb)
782 continue;
783 if (!test(inode, data))
784 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +1100785 spin_lock(&inode->i_lock);
Al Viroa4ffdde2010-06-02 17:38:30 -0400786 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 __wait_on_freeing_inode(inode);
788 goto repeat;
789 }
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400790 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +1100791 spin_unlock(&inode->i_lock);
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400792 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 }
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400794 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795}
796
797/*
798 * find_inode_fast is the fast path version of find_inode, see the comment at
799 * iget_locked for details.
800 */
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530801static struct inode *find_inode_fast(struct super_block *sb,
802 struct hlist_head *head, unsigned long ino)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803{
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530804 struct inode *inode = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
806repeat:
Sasha Levinb67bfe02013-02-27 17:06:00 -0800807 hlist_for_each_entry(inode, head, i_hash) {
Al Viro5a3cd99282013-11-06 09:54:52 -0500808 if (inode->i_ino != ino)
809 continue;
810 if (inode->i_sb != sb)
811 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +1100812 spin_lock(&inode->i_lock);
Al Viroa4ffdde2010-06-02 17:38:30 -0400813 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 __wait_on_freeing_inode(inode);
815 goto repeat;
816 }
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400817 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +1100818 spin_unlock(&inode->i_lock);
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400819 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 }
Christoph Hellwigf7899bd2010-10-23 07:09:06 -0400821 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822}
823
Eric Dumazetf991bd22010-10-23 11:18:01 -0400824/*
825 * Each cpu owns a range of LAST_INO_BATCH numbers.
826 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
827 * to renew the exhausted range.
David Chinner8290c352008-10-30 17:35:24 +1100828 *
Eric Dumazetf991bd22010-10-23 11:18:01 -0400829 * This does not significantly increase overflow rate because every CPU can
830 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
831 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
832 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
833 * overflow rate by 2x, which does not seem too significant.
834 *
835 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
836 * error if st_ino won't fit in target struct field. Use 32bit counter
837 * here to attempt to avoid that.
David Chinner8290c352008-10-30 17:35:24 +1100838 */
Eric Dumazetf991bd22010-10-23 11:18:01 -0400839#define LAST_INO_BATCH 1024
840static DEFINE_PER_CPU(unsigned int, last_ino);
David Chinner8290c352008-10-30 17:35:24 +1100841
Christoph Hellwig85fe4022010-10-23 11:19:54 -0400842unsigned int get_next_ino(void)
Eric Dumazetf991bd22010-10-23 11:18:01 -0400843{
844 unsigned int *p = &get_cpu_var(last_ino);
845 unsigned int res = *p;
846
847#ifdef CONFIG_SMP
848 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
849 static atomic_t shared_last_ino;
850 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
851
852 res = next - LAST_INO_BATCH;
853 }
854#endif
855
Carlos Maiolino2adc3762015-06-25 12:25:58 -0300856 res++;
857 /* get_next_ino should not provide a 0 inode number */
858 if (unlikely(!res))
859 res++;
860 *p = res;
Eric Dumazetf991bd22010-10-23 11:18:01 -0400861 put_cpu_var(last_ino);
862 return res;
David Chinner8290c352008-10-30 17:35:24 +1100863}
Christoph Hellwig85fe4022010-10-23 11:19:54 -0400864EXPORT_SYMBOL(get_next_ino);
David Chinner8290c352008-10-30 17:35:24 +1100865
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866/**
Eric Dumazeta209dfc2011-07-26 11:36:34 +0200867 * new_inode_pseudo - obtain an inode
868 * @sb: superblock
869 *
870 * Allocates a new inode for given superblock.
871 * Inode wont be chained in superblock s_inodes list
872 * This means :
873 * - fs can't be unmount
874 * - quotas, fsnotify, writeback can't work
875 */
876struct inode *new_inode_pseudo(struct super_block *sb)
877{
878 struct inode *inode = alloc_inode(sb);
879
880 if (inode) {
881 spin_lock(&inode->i_lock);
882 inode->i_state = 0;
883 spin_unlock(&inode->i_lock);
884 INIT_LIST_HEAD(&inode->i_sb_list);
885 }
886 return inode;
887}
888
889/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 * new_inode - obtain an inode
891 * @sb: superblock
892 *
Mel Gorman769848c2007-07-17 04:03:05 -0700893 * Allocates a new inode for given superblock. The default gfp_mask
Hugh Dickins3c1d4372009-01-06 14:39:23 -0800894 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
Mel Gorman769848c2007-07-17 04:03:05 -0700895 * If HIGHMEM pages are unsuitable or it is known that pages allocated
896 * for the page cache are not reclaimable or migratable,
897 * mapping_set_gfp_mask() must be called with suitable flags on the
898 * newly created inode's mapping
899 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 */
901struct inode *new_inode(struct super_block *sb)
902{
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530903 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Dave Chinner74278da2015-03-04 12:37:22 -0500905 spin_lock_prefetch(&sb->s_inode_list_lock);
Manish Katiyar6b3304b2009-03-31 19:35:54 +0530906
Eric Dumazeta209dfc2011-07-26 11:36:34 +0200907 inode = new_inode_pseudo(sb);
908 if (inode)
Dave Chinner55fa6092011-03-22 22:23:40 +1100909 inode_sb_list_add(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 return inode;
911}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912EXPORT_SYMBOL(new_inode);
913
Peter Zijlstra14358e62007-10-14 01:38:33 +0200914#ifdef CONFIG_DEBUG_LOCK_ALLOC
Josh Boyere096d0c2011-08-25 07:48:12 -0400915void lockdep_annotate_inode_mutex_key(struct inode *inode)
916{
Namhyung Kima3314a02010-10-11 22:38:00 +0900917 if (S_ISDIR(inode->i_mode)) {
Peter Zijlstra1e89a5e2007-10-16 06:47:54 +0200918 struct file_system_type *type = inode->i_sb->s_type;
919
Jan Kara9a7aa122009-06-04 15:26:49 +0200920 /* Set new key only if filesystem hasn't already changed it */
Tyler Hicks978d6d82011-12-12 10:02:30 -0600921 if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
Jan Kara9a7aa122009-06-04 15:26:49 +0200922 /*
923 * ensure nobody is actually holding i_mutex
924 */
925 mutex_destroy(&inode->i_mutex);
926 mutex_init(&inode->i_mutex);
927 lockdep_set_class(&inode->i_mutex,
928 &type->i_mutex_dir_key);
929 }
Peter Zijlstra1e89a5e2007-10-16 06:47:54 +0200930 }
Josh Boyere096d0c2011-08-25 07:48:12 -0400931}
932EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
Peter Zijlstra14358e62007-10-14 01:38:33 +0200933#endif
Josh Boyere096d0c2011-08-25 07:48:12 -0400934
935/**
936 * unlock_new_inode - clear the I_NEW state and wake up any waiters
937 * @inode: new inode to unlock
938 *
939 * Called when the inode is fully initialised to clear the new state of the
940 * inode and wake up anyone waiting for the inode to finish initialisation.
941 */
942void unlock_new_inode(struct inode *inode)
943{
944 lockdep_annotate_inode_mutex_key(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +1100945 spin_lock(&inode->i_lock);
Christoph Hellwigeaff8072009-12-17 14:25:01 +0100946 WARN_ON(!(inode->i_state & I_NEW));
947 inode->i_state &= ~I_NEW;
Al Viro310fa7a2012-03-10 17:07:28 -0500948 smp_mb();
Dave Chinner250df6e2011-03-22 22:23:36 +1100949 wake_up_bit(&inode->i_state, __I_NEW);
950 spin_unlock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952EXPORT_SYMBOL(unlock_new_inode);
953
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400954/**
J. Bruce Fields375e2892012-04-18 15:16:33 -0400955 * lock_two_nondirectories - take two i_mutexes on non-directory objects
J. Bruce Fields4fd699a2014-04-01 17:08:43 +0200956 *
957 * Lock any non-NULL argument that is not a directory.
958 * Zero, one or two objects may be locked by this function.
959 *
J. Bruce Fields375e2892012-04-18 15:16:33 -0400960 * @inode1: first inode to lock
961 * @inode2: second inode to lock
962 */
963void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
964{
J. Bruce Fields4fd699a2014-04-01 17:08:43 +0200965 if (inode1 > inode2)
966 swap(inode1, inode2);
967
968 if (inode1 && !S_ISDIR(inode1->i_mode))
Al Viro59551022016-01-22 15:40:57 -0500969 inode_lock(inode1);
J. Bruce Fields4fd699a2014-04-01 17:08:43 +0200970 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
Al Viro59551022016-01-22 15:40:57 -0500971 inode_lock_nested(inode2, I_MUTEX_NONDIR2);
J. Bruce Fields375e2892012-04-18 15:16:33 -0400972}
973EXPORT_SYMBOL(lock_two_nondirectories);
974
975/**
976 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
977 * @inode1: first inode to unlock
978 * @inode2: second inode to unlock
979 */
980void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
981{
J. Bruce Fields4fd699a2014-04-01 17:08:43 +0200982 if (inode1 && !S_ISDIR(inode1->i_mode))
Al Viro59551022016-01-22 15:40:57 -0500983 inode_unlock(inode1);
J. Bruce Fields4fd699a2014-04-01 17:08:43 +0200984 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
Al Viro59551022016-01-22 15:40:57 -0500985 inode_unlock(inode2);
J. Bruce Fields375e2892012-04-18 15:16:33 -0400986}
987EXPORT_SYMBOL(unlock_two_nondirectories);
988
989/**
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400990 * iget5_locked - obtain an inode from a mounted file system
991 * @sb: super block of file system
992 * @hashval: hash value (usually inode number) to get
993 * @test: callback used for comparisons between inodes
994 * @set: callback used to initialize a new struct inode
995 * @data: opaque data pointer to pass to @test and @set
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -0400997 * Search for the inode specified by @hashval and @data in the inode cache,
998 * and if present it is return it with an increased reference count. This is
999 * a generalized version of iget_locked() for file systems where the inode
1000 * number is not sufficient for unique identification of an inode.
1001 *
1002 * If the inode is not in cache, allocate a new inode and return it locked,
1003 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1004 * before unlocking it via unlock_new_inode().
1005 *
1006 * Note both @test and @set are called with the inode_hash_lock held, so can't
1007 * sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 */
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001009struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1010 int (*test)(struct inode *, void *),
1011 int (*set)(struct inode *, void *), void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012{
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001013 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301014 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001016 spin_lock(&inode_hash_lock);
1017 inode = find_inode(sb, head, test, data);
1018 spin_unlock(&inode_hash_lock);
1019
1020 if (inode) {
1021 wait_on_inode(inode);
1022 return inode;
1023 }
1024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 inode = alloc_inode(sb);
1026 if (inode) {
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301027 struct inode *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
Dave Chinner67a23c42011-03-22 22:23:42 +11001029 spin_lock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 /* We released the lock, so.. */
1031 old = find_inode(sb, head, test, data);
1032 if (!old) {
1033 if (set(inode, data))
1034 goto set_failed;
1035
Dave Chinner250df6e2011-03-22 22:23:36 +11001036 spin_lock(&inode->i_lock);
Christoph Hellwigeaff8072009-12-17 14:25:01 +01001037 inode->i_state = I_NEW;
Dave Chinner250df6e2011-03-22 22:23:36 +11001038 hlist_add_head(&inode->i_hash, head);
1039 spin_unlock(&inode->i_lock);
Dave Chinner55fa6092011-03-22 22:23:40 +11001040 inode_sb_list_add(inode);
Dave Chinner67a23c42011-03-22 22:23:42 +11001041 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
1043 /* Return the locked inode with I_NEW set, the
1044 * caller is responsible for filling in the contents
1045 */
1046 return inode;
1047 }
1048
1049 /*
1050 * Uhhuh, somebody else created the same inode under
1051 * us. Use the old inode instead of the one we just
1052 * allocated.
1053 */
Dave Chinner67a23c42011-03-22 22:23:42 +11001054 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 destroy_inode(inode);
1056 inode = old;
1057 wait_on_inode(inode);
1058 }
1059 return inode;
1060
1061set_failed:
Dave Chinner67a23c42011-03-22 22:23:42 +11001062 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 destroy_inode(inode);
1064 return NULL;
1065}
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001066EXPORT_SYMBOL(iget5_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001068/**
1069 * iget_locked - obtain an inode from a mounted file system
1070 * @sb: super block of file system
1071 * @ino: inode number to get
1072 *
1073 * Search for the inode specified by @ino in the inode cache and if present
1074 * return it with an increased reference count. This is for file systems
1075 * where the inode number is sufficient for unique identification of an inode.
1076 *
1077 * If the inode is not in cache, allocate a new inode and return it locked,
1078 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1079 * before unlocking it via unlock_new_inode().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 */
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001081struct inode *iget_locked(struct super_block *sb, unsigned long ino)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082{
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001083 struct hlist_head *head = inode_hashtable + hash(sb, ino);
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301084 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001086 spin_lock(&inode_hash_lock);
1087 inode = find_inode_fast(sb, head, ino);
1088 spin_unlock(&inode_hash_lock);
1089 if (inode) {
1090 wait_on_inode(inode);
1091 return inode;
1092 }
1093
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 inode = alloc_inode(sb);
1095 if (inode) {
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301096 struct inode *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Dave Chinner67a23c42011-03-22 22:23:42 +11001098 spin_lock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 /* We released the lock, so.. */
1100 old = find_inode_fast(sb, head, ino);
1101 if (!old) {
1102 inode->i_ino = ino;
Dave Chinner250df6e2011-03-22 22:23:36 +11001103 spin_lock(&inode->i_lock);
Christoph Hellwigeaff8072009-12-17 14:25:01 +01001104 inode->i_state = I_NEW;
Dave Chinner250df6e2011-03-22 22:23:36 +11001105 hlist_add_head(&inode->i_hash, head);
1106 spin_unlock(&inode->i_lock);
Dave Chinner55fa6092011-03-22 22:23:40 +11001107 inode_sb_list_add(inode);
Dave Chinner67a23c42011-03-22 22:23:42 +11001108 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
1110 /* Return the locked inode with I_NEW set, the
1111 * caller is responsible for filling in the contents
1112 */
1113 return inode;
1114 }
1115
1116 /*
1117 * Uhhuh, somebody else created the same inode under
1118 * us. Use the old inode instead of the one we just
1119 * allocated.
1120 */
Dave Chinner67a23c42011-03-22 22:23:42 +11001121 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 destroy_inode(inode);
1123 inode = old;
1124 wait_on_inode(inode);
1125 }
1126 return inode;
1127}
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001128EXPORT_SYMBOL(iget_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001130/*
1131 * search the inode cache for a matching inode number.
1132 * If we find one, then the inode number we are trying to
1133 * allocate is not unique and so we should not use it.
1134 *
1135 * Returns 1 if the inode number is unique, 0 if it is not.
1136 */
1137static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1138{
1139 struct hlist_head *b = inode_hashtable + hash(sb, ino);
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001140 struct inode *inode;
1141
Dave Chinner67a23c42011-03-22 22:23:42 +11001142 spin_lock(&inode_hash_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001143 hlist_for_each_entry(inode, b, i_hash) {
Dave Chinner67a23c42011-03-22 22:23:42 +11001144 if (inode->i_ino == ino && inode->i_sb == sb) {
1145 spin_unlock(&inode_hash_lock);
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001146 return 0;
Dave Chinner67a23c42011-03-22 22:23:42 +11001147 }
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001148 }
Dave Chinner67a23c42011-03-22 22:23:42 +11001149 spin_unlock(&inode_hash_lock);
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001150
1151 return 1;
1152}
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154/**
1155 * iunique - get a unique inode number
1156 * @sb: superblock
1157 * @max_reserved: highest reserved inode number
1158 *
1159 * Obtain an inode number that is unique on the system for a given
1160 * superblock. This is used by file systems that have no natural
1161 * permanent inode numbering system. An inode number is returned that
1162 * is higher than the reserved limit but unique.
1163 *
1164 * BUGS:
1165 * With a large number of inodes live on the file system this function
1166 * currently becomes quite slow.
1167 */
1168ino_t iunique(struct super_block *sb, ino_t max_reserved)
1169{
Jeff Layton866b04f2007-05-08 00:32:29 -07001170 /*
1171 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1172 * error if st_ino won't fit in target struct field. Use 32bit counter
1173 * here to attempt to avoid that.
1174 */
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001175 static DEFINE_SPINLOCK(iunique_lock);
Jeff Layton866b04f2007-05-08 00:32:29 -07001176 static unsigned int counter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 ino_t res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001179 spin_lock(&iunique_lock);
Jeffrey Layton3361c7b2007-05-08 00:29:48 -07001180 do {
1181 if (counter <= max_reserved)
1182 counter = max_reserved + 1;
1183 res = counter++;
Christoph Hellwigad5e1952010-10-23 07:00:16 -04001184 } while (!test_inode_iunique(sb, res));
1185 spin_unlock(&iunique_lock);
Jeffrey Layton3361c7b2007-05-08 00:29:48 -07001186
1187 return res;
1188}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189EXPORT_SYMBOL(iunique);
1190
1191struct inode *igrab(struct inode *inode)
1192{
Dave Chinner250df6e2011-03-22 22:23:36 +11001193 spin_lock(&inode->i_lock);
1194 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +11001196 spin_unlock(&inode->i_lock);
1197 } else {
1198 spin_unlock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 /*
1200 * Handle the case where s_op->clear_inode is not been
1201 * called yet, and somebody is calling igrab
1202 * while the inode is getting freed.
1203 */
1204 inode = NULL;
Dave Chinner250df6e2011-03-22 22:23:36 +11001205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 return inode;
1207}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208EXPORT_SYMBOL(igrab);
1209
1210/**
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001211 * ilookup5_nowait - search for an inode in the inode cache
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 * @sb: super block of file system to search
1213 * @hashval: hash value (usually inode number) to search for
1214 * @test: callback used for comparisons between inodes
1215 * @data: opaque data pointer to pass to @test
1216 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001217 * Search for the inode specified by @hashval and @data in the inode cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 * If the inode is in the cache, the inode is returned with an incremented
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001219 * reference count.
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001220 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001221 * Note: I_NEW is not waited upon so you have to be very careful what you do
1222 * with the returned inode. You probably should be using ilookup5() instead.
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001223 *
Randy Dunlapb6d0ad62011-03-26 13:27:47 -07001224 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001225 */
1226struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1227 int (*test)(struct inode *, void *), void *data)
1228{
1229 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001230 struct inode *inode;
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001231
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001232 spin_lock(&inode_hash_lock);
1233 inode = find_inode(sb, head, test, data);
1234 spin_unlock(&inode_hash_lock);
1235
1236 return inode;
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001237}
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001238EXPORT_SYMBOL(ilookup5_nowait);
1239
1240/**
1241 * ilookup5 - search for an inode in the inode cache
1242 * @sb: super block of file system to search
1243 * @hashval: hash value (usually inode number) to search for
1244 * @test: callback used for comparisons between inodes
1245 * @data: opaque data pointer to pass to @test
1246 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001247 * Search for the inode specified by @hashval and @data in the inode cache,
1248 * and if the inode is in the cache, return the inode with an incremented
1249 * reference count. Waits on I_NEW before returning the inode.
Anton Altaparmakov88bd5122005-07-13 01:10:44 -07001250 * returned with an incremented reference count.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001252 * This is a generalized version of ilookup() for file systems where the
1253 * inode number is not sufficient for unique identification of an inode.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001255 * Note: @test is called with the inode_hash_lock held, so can't sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 */
1257struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1258 int (*test)(struct inode *, void *), void *data)
1259{
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001260 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001262 if (inode)
1263 wait_on_inode(inode);
1264 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266EXPORT_SYMBOL(ilookup5);
1267
1268/**
1269 * ilookup - search for an inode in the inode cache
1270 * @sb: super block of file system to search
1271 * @ino: inode number to search for
1272 *
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001273 * Search for the inode @ino in the inode cache, and if the inode is in the
1274 * cache, the inode is returned with an incremented reference count.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 */
1276struct inode *ilookup(struct super_block *sb, unsigned long ino)
1277{
1278 struct hlist_head *head = inode_hashtable + hash(sb, ino);
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001279 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
Christoph Hellwig0b2d0722011-03-23 15:03:28 -04001281 spin_lock(&inode_hash_lock);
1282 inode = find_inode_fast(sb, head, ino);
1283 spin_unlock(&inode_hash_lock);
1284
1285 if (inode)
1286 wait_on_inode(inode);
1287 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289EXPORT_SYMBOL(ilookup);
1290
Theodore Ts'ofe032c42015-02-02 00:37:01 -05001291/**
1292 * find_inode_nowait - find an inode in the inode cache
1293 * @sb: super block of file system to search
1294 * @hashval: hash value (usually inode number) to search for
1295 * @match: callback used for comparisons between inodes
1296 * @data: opaque data pointer to pass to @match
1297 *
1298 * Search for the inode specified by @hashval and @data in the inode
1299 * cache, where the helper function @match will return 0 if the inode
1300 * does not match, 1 if the inode does match, and -1 if the search
1301 * should be stopped. The @match function must be responsible for
1302 * taking the i_lock spin_lock and checking i_state for an inode being
1303 * freed or being initialized, and incrementing the reference count
1304 * before returning 1. It also must not sleep, since it is called with
1305 * the inode_hash_lock spinlock held.
1306 *
1307 * This is a even more generalized version of ilookup5() when the
1308 * function must never block --- find_inode() can block in
1309 * __wait_on_freeing_inode() --- or when the caller can not increment
1310 * the reference count because the resulting iput() might cause an
1311 * inode eviction. The tradeoff is that the @match funtion must be
1312 * very carefully implemented.
1313 */
1314struct inode *find_inode_nowait(struct super_block *sb,
1315 unsigned long hashval,
1316 int (*match)(struct inode *, unsigned long,
1317 void *),
1318 void *data)
1319{
1320 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1321 struct inode *inode, *ret_inode = NULL;
1322 int mval;
1323
1324 spin_lock(&inode_hash_lock);
1325 hlist_for_each_entry(inode, head, i_hash) {
1326 if (inode->i_sb != sb)
1327 continue;
1328 mval = match(inode, hashval, data);
1329 if (mval == 0)
1330 continue;
1331 if (mval == 1)
1332 ret_inode = inode;
1333 goto out;
1334 }
1335out:
1336 spin_unlock(&inode_hash_lock);
1337 return ret_inode;
1338}
1339EXPORT_SYMBOL(find_inode_nowait);
1340
Al Viro261bca82008-12-30 01:48:21 -05001341int insert_inode_locked(struct inode *inode)
1342{
1343 struct super_block *sb = inode->i_sb;
1344 ino_t ino = inode->i_ino;
1345 struct hlist_head *head = inode_hashtable + hash(sb, ino);
Al Viro261bca82008-12-30 01:48:21 -05001346
Al Viro261bca82008-12-30 01:48:21 -05001347 while (1) {
Al Viro72a43d62009-05-13 19:13:40 +01001348 struct inode *old = NULL;
Dave Chinner67a23c42011-03-22 22:23:42 +11001349 spin_lock(&inode_hash_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001350 hlist_for_each_entry(old, head, i_hash) {
Al Viro72a43d62009-05-13 19:13:40 +01001351 if (old->i_ino != ino)
1352 continue;
1353 if (old->i_sb != sb)
1354 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +11001355 spin_lock(&old->i_lock);
1356 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1357 spin_unlock(&old->i_lock);
Al Viro72a43d62009-05-13 19:13:40 +01001358 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +11001359 }
Al Viro72a43d62009-05-13 19:13:40 +01001360 break;
1361 }
Sasha Levinb67bfe02013-02-27 17:06:00 -08001362 if (likely(!old)) {
Dave Chinner250df6e2011-03-22 22:23:36 +11001363 spin_lock(&inode->i_lock);
1364 inode->i_state |= I_NEW;
Al Viro261bca82008-12-30 01:48:21 -05001365 hlist_add_head(&inode->i_hash, head);
Dave Chinner250df6e2011-03-22 22:23:36 +11001366 spin_unlock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +11001367 spin_unlock(&inode_hash_lock);
Al Viro261bca82008-12-30 01:48:21 -05001368 return 0;
1369 }
1370 __iget(old);
Dave Chinner250df6e2011-03-22 22:23:36 +11001371 spin_unlock(&old->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +11001372 spin_unlock(&inode_hash_lock);
Al Viro261bca82008-12-30 01:48:21 -05001373 wait_on_inode(old);
Al Viro1d3382cb2010-10-23 15:19:20 -04001374 if (unlikely(!inode_unhashed(old))) {
Al Viro261bca82008-12-30 01:48:21 -05001375 iput(old);
1376 return -EBUSY;
1377 }
1378 iput(old);
1379 }
1380}
Al Viro261bca82008-12-30 01:48:21 -05001381EXPORT_SYMBOL(insert_inode_locked);
1382
1383int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1384 int (*test)(struct inode *, void *), void *data)
1385{
1386 struct super_block *sb = inode->i_sb;
1387 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
Al Viro261bca82008-12-30 01:48:21 -05001388
Al Viro261bca82008-12-30 01:48:21 -05001389 while (1) {
Al Viro72a43d62009-05-13 19:13:40 +01001390 struct inode *old = NULL;
1391
Dave Chinner67a23c42011-03-22 22:23:42 +11001392 spin_lock(&inode_hash_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001393 hlist_for_each_entry(old, head, i_hash) {
Al Viro72a43d62009-05-13 19:13:40 +01001394 if (old->i_sb != sb)
1395 continue;
1396 if (!test(old, data))
1397 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +11001398 spin_lock(&old->i_lock);
1399 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1400 spin_unlock(&old->i_lock);
Al Viro72a43d62009-05-13 19:13:40 +01001401 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +11001402 }
Al Viro72a43d62009-05-13 19:13:40 +01001403 break;
1404 }
Sasha Levinb67bfe02013-02-27 17:06:00 -08001405 if (likely(!old)) {
Dave Chinner250df6e2011-03-22 22:23:36 +11001406 spin_lock(&inode->i_lock);
1407 inode->i_state |= I_NEW;
Al Viro261bca82008-12-30 01:48:21 -05001408 hlist_add_head(&inode->i_hash, head);
Dave Chinner250df6e2011-03-22 22:23:36 +11001409 spin_unlock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +11001410 spin_unlock(&inode_hash_lock);
Al Viro261bca82008-12-30 01:48:21 -05001411 return 0;
1412 }
1413 __iget(old);
Dave Chinner250df6e2011-03-22 22:23:36 +11001414 spin_unlock(&old->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +11001415 spin_unlock(&inode_hash_lock);
Al Viro261bca82008-12-30 01:48:21 -05001416 wait_on_inode(old);
Al Viro1d3382cb2010-10-23 15:19:20 -04001417 if (unlikely(!inode_unhashed(old))) {
Al Viro261bca82008-12-30 01:48:21 -05001418 iput(old);
1419 return -EBUSY;
1420 }
1421 iput(old);
1422 }
1423}
Al Viro261bca82008-12-30 01:48:21 -05001424EXPORT_SYMBOL(insert_inode_locked4);
1425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Al Viro45321ac2010-06-07 13:43:19 -04001427int generic_delete_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428{
Al Viro45321ac2010-06-07 13:43:19 -04001429 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431EXPORT_SYMBOL(generic_delete_inode);
1432
Al Viro45321ac2010-06-07 13:43:19 -04001433/*
Al Viro45321ac2010-06-07 13:43:19 -04001434 * Called when we're dropping the last reference
1435 * to an inode.
1436 *
1437 * Call the FS "drop_inode()" function, defaulting to
1438 * the legacy UNIX filesystem behaviour. If it tells
1439 * us to evict inode, do so. Otherwise, retain inode
1440 * in cache if fs is alive, sync and evict if fs is
1441 * shutting down.
1442 */
1443static void iput_final(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444{
1445 struct super_block *sb = inode->i_sb;
Al Viro45321ac2010-06-07 13:43:19 -04001446 const struct super_operations *op = inode->i_sb->s_op;
1447 int drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
Dave Chinner250df6e2011-03-22 22:23:36 +11001449 WARN_ON(inode->i_state & I_NEW);
1450
Al Viroe7f59092011-07-07 15:45:59 -04001451 if (op->drop_inode)
Al Viro45321ac2010-06-07 13:43:19 -04001452 drop = op->drop_inode(inode);
1453 else
1454 drop = generic_drop_inode(inode);
1455
Dave Chinnerb2b2af82011-03-22 22:23:37 +11001456 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1457 inode->i_state |= I_REFERENCED;
Jan Kara4eff96d2012-11-26 16:29:51 -08001458 inode_add_lru(inode);
Dave Chinnerb2b2af82011-03-22 22:23:37 +11001459 spin_unlock(&inode->i_lock);
Dave Chinnerb2b2af82011-03-22 22:23:37 +11001460 return;
1461 }
1462
Al Viro45321ac2010-06-07 13:43:19 -04001463 if (!drop) {
Alexander Viro991114c2005-06-23 00:09:01 -07001464 inode->i_state |= I_WILL_FREE;
Dave Chinner250df6e2011-03-22 22:23:36 +11001465 spin_unlock(&inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 write_inode_now(inode, 1);
Dave Chinner250df6e2011-03-22 22:23:36 +11001467 spin_lock(&inode->i_lock);
Nick Piggin7ef0d732009-03-12 14:31:38 -07001468 WARN_ON(inode->i_state & I_NEW);
Alexander Viro991114c2005-06-23 00:09:01 -07001469 inode->i_state &= ~I_WILL_FREE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 }
Nick Piggin7ccf19a2010-10-21 11:49:30 +11001471
Alexander Viro991114c2005-06-23 00:09:01 -07001472 inode->i_state |= I_FREEING;
Eric Dumazetc4ae0c62011-07-28 06:55:13 +02001473 if (!list_empty(&inode->i_lru))
1474 inode_lru_list_del(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +11001475 spin_unlock(&inode->i_lock);
Dave Chinnerb2b2af82011-03-22 22:23:37 +11001476
1477 evict(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478}
1479
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480/**
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301481 * iput - put an inode
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 * @inode: inode to put
1483 *
1484 * Puts an inode, dropping its usage count. If the inode use count hits
1485 * zero, the inode is then freed and may also be destroyed.
1486 *
1487 * Consequently, iput() can sleep.
1488 */
1489void iput(struct inode *inode)
1490{
Theodore Ts'o0ae45f62015-02-02 00:37:00 -05001491 if (!inode)
1492 return;
1493 BUG_ON(inode->i_state & I_CLEAR);
1494retry:
1495 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1496 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1497 atomic_inc(&inode->i_count);
1498 inode->i_state &= ~I_DIRTY_TIME;
1499 spin_unlock(&inode->i_lock);
1500 trace_writeback_lazytime_iput(inode);
1501 mark_inode_dirty_sync(inode);
1502 goto retry;
1503 }
1504 iput_final(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 }
1506}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507EXPORT_SYMBOL(iput);
1508
1509/**
1510 * bmap - find a block number in a file
1511 * @inode: inode of file
1512 * @block: block to find
1513 *
1514 * Returns the block number on the device holding the inode that
1515 * is the disk block number for the block of the file requested.
1516 * That is, asked for block 4 of inode 1 the function will return the
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301517 * disk block relative to the disk start that holds that block of the
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 * file.
1519 */
Manish Katiyar6b3304b2009-03-31 19:35:54 +05301520sector_t bmap(struct inode *inode, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521{
1522 sector_t res = 0;
1523 if (inode->i_mapping->a_ops->bmap)
1524 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1525 return res;
1526}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527EXPORT_SYMBOL(bmap);
1528
Matthew Garrett11ff6f052009-03-26 17:32:14 +00001529/*
1530 * With relative atime, only update atime if the previous atime is
1531 * earlier than either the ctime or mtime or if at least a day has
1532 * passed since the last atime update.
1533 */
1534static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1535 struct timespec now)
1536{
1537
1538 if (!(mnt->mnt_flags & MNT_RELATIME))
1539 return 1;
1540 /*
1541 * Is mtime younger than atime? If yes, update atime:
1542 */
1543 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1544 return 1;
1545 /*
1546 * Is ctime younger than atime? If yes, update atime:
1547 */
1548 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1549 return 1;
1550
1551 /*
1552 * Is the previous atime value older than a day? If yes,
1553 * update atime:
1554 */
1555 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1556 return 1;
1557 /*
1558 * Good, we can skip the atime update:
1559 */
1560 return 0;
1561}
1562
Theodore Ts'o0ae45f62015-02-02 00:37:00 -05001563int generic_update_time(struct inode *inode, struct timespec *time, int flags)
Josef Bacikc3b2da32012-03-26 09:59:21 -04001564{
Theodore Ts'o0ae45f62015-02-02 00:37:00 -05001565 int iflags = I_DIRTY_TIME;
Josef Bacikc3b2da32012-03-26 09:59:21 -04001566
1567 if (flags & S_ATIME)
1568 inode->i_atime = *time;
1569 if (flags & S_VERSION)
1570 inode_inc_iversion(inode);
1571 if (flags & S_CTIME)
1572 inode->i_ctime = *time;
1573 if (flags & S_MTIME)
1574 inode->i_mtime = *time;
Theodore Ts'o0ae45f62015-02-02 00:37:00 -05001575
1576 if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
1577 iflags |= I_DIRTY_SYNC;
1578 __mark_inode_dirty(inode, iflags);
Josef Bacikc3b2da32012-03-26 09:59:21 -04001579 return 0;
1580}
Theodore Ts'o0ae45f62015-02-02 00:37:00 -05001581EXPORT_SYMBOL(generic_update_time);
1582
1583/*
1584 * This does the actual work of updating an inodes time or version. Must have
1585 * had called mnt_want_write() before calling this.
1586 */
1587static int update_time(struct inode *inode, struct timespec *time, int flags)
1588{
1589 int (*update_time)(struct inode *, struct timespec *, int);
1590
1591 update_time = inode->i_op->update_time ? inode->i_op->update_time :
1592 generic_update_time;
1593
1594 return update_time(inode, time, flags);
1595}
Josef Bacikc3b2da32012-03-26 09:59:21 -04001596
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597/**
Christoph Hellwig869243a2006-01-09 20:52:03 -08001598 * touch_atime - update the access time
Randy Dunlap185553b2012-04-17 17:03:25 -07001599 * @path: the &struct path to update
Randy Dunlap30fdc8e2015-11-09 14:57:58 -08001600 * @inode: inode to update
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 *
1602 * Update the accessed time on an inode and mark it for writeback.
1603 * This function automatically handles read only file systems and media,
1604 * as well as the "noatime" flag and inode specific "noatime" markers.
1605 */
NeilBrown8fa9dd22015-03-23 13:37:40 +11001606bool atime_needs_update(const struct path *path, struct inode *inode)
1607{
1608 struct vfsmount *mnt = path->mnt;
1609 struct timespec now;
1610
1611 if (inode->i_flags & S_NOATIME)
1612 return false;
1613 if (IS_NOATIME(inode))
1614 return false;
1615 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1616 return false;
1617
1618 if (mnt->mnt_flags & MNT_NOATIME)
1619 return false;
1620 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1621 return false;
1622
1623 now = current_fs_time(inode->i_sb);
1624
1625 if (!relatime_need_update(mnt, inode, now))
1626 return false;
1627
1628 if (timespec_equal(&inode->i_atime, &now))
1629 return false;
1630
1631 return true;
1632}
1633
Al Virobadcf2b2013-07-16 18:15:46 +04001634void touch_atime(const struct path *path)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635{
Al Viro68ac1232012-03-15 08:21:57 -04001636 struct vfsmount *mnt = path->mnt;
David Howellsdf2b1af2015-03-17 22:26:15 +00001637 struct inode *inode = d_inode(path->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 struct timespec now;
1639
NeilBrown8fa9dd22015-03-23 13:37:40 +11001640 if (!atime_needs_update(path, inode))
Andi Kleenb12536c2009-09-18 13:05:47 -07001641 return;
1642
Jan Kara5d37e9e2012-06-12 16:20:36 +02001643 if (!sb_start_write_trylock(inode->i_sb))
Andi Kleenb12536c2009-09-18 13:05:47 -07001644 return;
Valerie Henson47ae32d2006-12-13 00:34:34 -08001645
NeilBrown8fa9dd22015-03-23 13:37:40 +11001646 if (__mnt_want_write(mnt) != 0)
Jan Kara5d37e9e2012-06-12 16:20:36 +02001647 goto skip_update;
Josef Bacikc3b2da32012-03-26 09:59:21 -04001648 /*
1649 * File systems can error out when updating inodes if they need to
1650 * allocate new space to modify an inode (such is the case for
1651 * Btrfs), but since we touch atime while walking down the path we
1652 * really don't care if we failed to update the atime of the file,
1653 * so just ignore the return value.
Alexander Block2bc5565282012-06-15 09:49:33 +02001654 * We may also fail on filesystems that have the ability to make parts
1655 * of the fs read only, e.g. subvolumes in Btrfs.
Josef Bacikc3b2da32012-03-26 09:59:21 -04001656 */
NeilBrown8fa9dd22015-03-23 13:37:40 +11001657 now = current_fs_time(inode->i_sb);
Josef Bacikc3b2da32012-03-26 09:59:21 -04001658 update_time(inode, &now, S_ATIME);
Jan Kara5d37e9e2012-06-12 16:20:36 +02001659 __mnt_drop_write(mnt);
1660skip_update:
1661 sb_end_write(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662}
Christoph Hellwig869243a2006-01-09 20:52:03 -08001663EXPORT_SYMBOL(touch_atime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664
Cong Wang3ed37642012-05-15 14:57:33 +08001665/*
1666 * The logic we want is
1667 *
1668 * if suid or (sgid and xgrp)
1669 * remove privs
1670 */
1671int should_remove_suid(struct dentry *dentry)
1672{
David Howellsdf2b1af2015-03-17 22:26:15 +00001673 umode_t mode = d_inode(dentry)->i_mode;
Cong Wang3ed37642012-05-15 14:57:33 +08001674 int kill = 0;
1675
1676 /* suid always must be killed */
1677 if (unlikely(mode & S_ISUID))
1678 kill = ATTR_KILL_SUID;
1679
1680 /*
1681 * sgid without any exec bits is just a mandatory locking mark; leave
1682 * it alone. If some exec bits are set, it's a real sgid; kill it.
1683 */
1684 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1685 kill |= ATTR_KILL_SGID;
1686
1687 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1688 return kill;
1689
1690 return 0;
1691}
1692EXPORT_SYMBOL(should_remove_suid);
1693
Jan Karadbfae0c2015-05-21 16:05:54 +02001694/*
1695 * Return mask of changes for notify_change() that need to be done as a
1696 * response to write or truncate. Return 0 if nothing has to be changed.
1697 * Negative value on error (change should be denied).
1698 */
Jan Kara45f147a2015-05-21 16:05:55 +02001699int dentry_needs_remove_privs(struct dentry *dentry)
Jan Karadbfae0c2015-05-21 16:05:54 +02001700{
Jan Karadbfae0c2015-05-21 16:05:54 +02001701 struct inode *inode = d_inode(dentry);
1702 int mask = 0;
1703 int ret;
1704
1705 if (IS_NOSEC(inode))
1706 return 0;
1707
1708 mask = should_remove_suid(dentry);
1709 ret = security_inode_need_killpriv(dentry);
1710 if (ret < 0)
1711 return ret;
1712 if (ret)
1713 mask |= ATTR_KILL_PRIV;
1714 return mask;
1715}
Jan Kara45f147a2015-05-21 16:05:55 +02001716EXPORT_SYMBOL(dentry_needs_remove_privs);
Jan Karadbfae0c2015-05-21 16:05:54 +02001717
1718static int __remove_privs(struct dentry *dentry, int kill)
Cong Wang3ed37642012-05-15 14:57:33 +08001719{
1720 struct iattr newattrs;
1721
1722 newattrs.ia_valid = ATTR_FORCE | kill;
J. Bruce Fields27ac0ff2011-09-20 17:19:26 -04001723 /*
1724 * Note we call this on write, so notify_change will not
1725 * encounter any conflicting delegations:
1726 */
1727 return notify_change(dentry, &newattrs, NULL);
Cong Wang3ed37642012-05-15 14:57:33 +08001728}
1729
Jan Kara5fa8e0a2015-05-21 16:05:53 +02001730/*
1731 * Remove special file priviledges (suid, capabilities) when file is written
1732 * to or truncated.
1733 */
1734int file_remove_privs(struct file *file)
Cong Wang3ed37642012-05-15 14:57:33 +08001735{
1736 struct dentry *dentry = file->f_path.dentry;
David Howellsdf2b1af2015-03-17 22:26:15 +00001737 struct inode *inode = d_inode(dentry);
Jan Karadbfae0c2015-05-21 16:05:54 +02001738 int kill;
Cong Wang3ed37642012-05-15 14:57:33 +08001739 int error = 0;
1740
1741 /* Fast path for nothing security related */
1742 if (IS_NOSEC(inode))
1743 return 0;
1744
Jan Karadbfae0c2015-05-21 16:05:54 +02001745 kill = file_needs_remove_privs(file);
1746 if (kill < 0)
1747 return kill;
1748 if (kill)
1749 error = __remove_privs(dentry, kill);
Jan Kara2426f392015-05-21 16:05:52 +02001750 if (!error)
1751 inode_has_no_xattr(inode);
Cong Wang3ed37642012-05-15 14:57:33 +08001752
1753 return error;
1754}
Jan Kara5fa8e0a2015-05-21 16:05:53 +02001755EXPORT_SYMBOL(file_remove_privs);
Cong Wang3ed37642012-05-15 14:57:33 +08001756
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757/**
Christoph Hellwig870f4812006-01-09 20:52:01 -08001758 * file_update_time - update mtime and ctime time
1759 * @file: file accessed
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 *
Christoph Hellwig870f4812006-01-09 20:52:01 -08001761 * Update the mtime and ctime members of an inode and mark the inode
1762 * for writeback. Note that this function is meant exclusively for
1763 * usage in the file write path of filesystems, and filesystems may
1764 * choose to explicitly ignore update via this function with the
Wolfram Sang2eadfc02009-04-02 15:23:37 +02001765 * S_NOCMTIME inode flag, e.g. for network filesystem where these
Josef Bacikc3b2da32012-03-26 09:59:21 -04001766 * timestamps are handled by the server. This can return an error for
1767 * file systems who need to allocate space in order to update an inode.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 */
1769
Josef Bacikc3b2da32012-03-26 09:59:21 -04001770int file_update_time(struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771{
Al Viro496ad9a2013-01-23 17:07:38 -05001772 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 struct timespec now;
Josef Bacikc3b2da32012-03-26 09:59:21 -04001774 int sync_it = 0;
1775 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Andi Kleence06e0b2009-09-18 13:05:48 -07001777 /* First try to exhaust all avenues to not sync */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 if (IS_NOCMTIME(inode))
Josef Bacikc3b2da32012-03-26 09:59:21 -04001779 return 0;
Dave Hansen20ddee22008-02-15 14:37:43 -08001780
Andi Kleence06e0b2009-09-18 13:05:48 -07001781 now = current_fs_time(inode->i_sb);
1782 if (!timespec_equal(&inode->i_mtime, &now))
1783 sync_it = S_MTIME;
1784
1785 if (!timespec_equal(&inode->i_ctime, &now))
1786 sync_it |= S_CTIME;
1787
1788 if (IS_I_VERSION(inode))
1789 sync_it |= S_VERSION;
1790
1791 if (!sync_it)
Josef Bacikc3b2da32012-03-26 09:59:21 -04001792 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
Andi Kleence06e0b2009-09-18 13:05:48 -07001794 /* Finally allowed to write? Takes lock. */
Jan Karaeb04c282012-06-12 16:20:35 +02001795 if (__mnt_want_write_file(file))
Josef Bacikc3b2da32012-03-26 09:59:21 -04001796 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797
Josef Bacikc3b2da32012-03-26 09:59:21 -04001798 ret = update_time(inode, &now, sync_it);
Jan Karaeb04c282012-06-12 16:20:35 +02001799 __mnt_drop_write_file(file);
Josef Bacikc3b2da32012-03-26 09:59:21 -04001800
1801 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802}
Christoph Hellwig870f4812006-01-09 20:52:01 -08001803EXPORT_SYMBOL(file_update_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
1805int inode_needs_sync(struct inode *inode)
1806{
1807 if (IS_SYNC(inode))
1808 return 1;
1809 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1810 return 1;
1811 return 0;
1812}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813EXPORT_SYMBOL(inode_needs_sync);
1814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815/*
Miklos Szeredi168a9fd2005-07-12 13:58:10 -07001816 * If we try to find an inode in the inode hash while it is being
1817 * deleted, we have to wait until the filesystem completes its
1818 * deletion before reporting that it isn't found. This function waits
1819 * until the deletion _might_ have completed. Callers are responsible
1820 * to recheck inode state.
1821 *
Christoph Hellwigeaff8072009-12-17 14:25:01 +01001822 * It doesn't matter if I_NEW is not set initially, a call to
Dave Chinner250df6e2011-03-22 22:23:36 +11001823 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1824 * will DTRT.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 */
1826static void __wait_on_freeing_inode(struct inode *inode)
1827{
1828 wait_queue_head_t *wq;
Christoph Hellwigeaff8072009-12-17 14:25:01 +01001829 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1830 wq = bit_waitqueue(&inode->i_state, __I_NEW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
Dave Chinner250df6e2011-03-22 22:23:36 +11001832 spin_unlock(&inode->i_lock);
Dave Chinner67a23c42011-03-22 22:23:42 +11001833 spin_unlock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 schedule();
1835 finish_wait(wq, &wait.wait);
Dave Chinner67a23c42011-03-22 22:23:42 +11001836 spin_lock(&inode_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837}
1838
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839static __initdata unsigned long ihash_entries;
1840static int __init set_ihash_entries(char *str)
1841{
1842 if (!str)
1843 return 0;
1844 ihash_entries = simple_strtoul(str, &str, 0);
1845 return 1;
1846}
1847__setup("ihash_entries=", set_ihash_entries);
1848
1849/*
1850 * Initialize the waitqueues and inode hash table.
1851 */
1852void __init inode_init_early(void)
1853{
Dimitri Sivanich074b8512012-02-08 12:39:07 -08001854 unsigned int loop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855
1856 /* If hashes are distributed across NUMA nodes, defer
1857 * hash allocation until vmalloc space is available.
1858 */
1859 if (hashdist)
1860 return;
1861
1862 inode_hashtable =
1863 alloc_large_system_hash("Inode-cache",
1864 sizeof(struct hlist_head),
1865 ihash_entries,
1866 14,
1867 HASH_EARLY,
1868 &i_hash_shift,
1869 &i_hash_mask,
Tim Bird31fe62b2012-05-23 13:33:35 +00001870 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 0);
1872
Dimitri Sivanich074b8512012-02-08 12:39:07 -08001873 for (loop = 0; loop < (1U << i_hash_shift); loop++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1875}
1876
Denis Cheng74bf17c2007-10-16 23:26:30 -07001877void __init inode_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878{
Dimitri Sivanich074b8512012-02-08 12:39:07 -08001879 unsigned int loop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
1881 /* inode slab cache */
Paul Jacksonb0196002006-03-24 03:16:09 -08001882 inode_cachep = kmem_cache_create("inode_cache",
1883 sizeof(struct inode),
1884 0,
1885 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
Vladimir Davydov5d097052016-01-14 15:18:21 -08001886 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
Paul Mundt20c2df82007-07-20 10:11:58 +09001887 init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 /* Hash may have been set up in inode_init_early */
1890 if (!hashdist)
1891 return;
1892
1893 inode_hashtable =
1894 alloc_large_system_hash("Inode-cache",
1895 sizeof(struct hlist_head),
1896 ihash_entries,
1897 14,
1898 0,
1899 &i_hash_shift,
1900 &i_hash_mask,
Tim Bird31fe62b2012-05-23 13:33:35 +00001901 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 0);
1903
Dimitri Sivanich074b8512012-02-08 12:39:07 -08001904 for (loop = 0; loop < (1U << i_hash_shift); loop++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1906}
1907
1908void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1909{
1910 inode->i_mode = mode;
1911 if (S_ISCHR(mode)) {
1912 inode->i_fop = &def_chr_fops;
1913 inode->i_rdev = rdev;
1914 } else if (S_ISBLK(mode)) {
1915 inode->i_fop = &def_blk_fops;
1916 inode->i_rdev = rdev;
1917 } else if (S_ISFIFO(mode))
Al Viro599a0ac2013-03-12 09:58:10 -04001918 inode->i_fop = &pipefifo_fops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 else if (S_ISSOCK(mode))
Al Virobd9b51e2014-11-18 23:38:21 -05001920 ; /* leave it no_open_fops */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 else
Manish Katiyaraf0d9ae2009-09-18 13:05:43 -07001922 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1923 " inode %s:%lu\n", mode, inode->i_sb->s_id,
1924 inode->i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925}
1926EXPORT_SYMBOL(init_special_inode);
Dmitry Monakhova1bd1202010-03-04 17:29:14 +03001927
1928/**
Ben Hutchingseaae668d2011-02-15 12:48:09 +00001929 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
Dmitry Monakhova1bd1202010-03-04 17:29:14 +03001930 * @inode: New inode
1931 * @dir: Directory inode
1932 * @mode: mode of the new inode
1933 */
1934void inode_init_owner(struct inode *inode, const struct inode *dir,
Al Viro62bb1092011-07-24 23:20:18 -04001935 umode_t mode)
Dmitry Monakhova1bd1202010-03-04 17:29:14 +03001936{
1937 inode->i_uid = current_fsuid();
1938 if (dir && dir->i_mode & S_ISGID) {
1939 inode->i_gid = dir->i_gid;
1940 if (S_ISDIR(mode))
1941 mode |= S_ISGID;
1942 } else
1943 inode->i_gid = current_fsgid();
1944 inode->i_mode = mode;
1945}
1946EXPORT_SYMBOL(inode_init_owner);
Serge E. Hallyne795b712011-03-23 16:43:25 -07001947
Serge E. Hallyn2e149672011-03-23 16:43:26 -07001948/**
1949 * inode_owner_or_capable - check current task permissions to inode
1950 * @inode: inode being checked
1951 *
Andy Lutomirski23adbe12014-06-10 12:45:42 -07001952 * Return true if current either has CAP_FOWNER in a namespace with the
1953 * inode owner uid mapped, or owns the file.
Serge E. Hallyne795b712011-03-23 16:43:25 -07001954 */
Serge E. Hallyn2e149672011-03-23 16:43:26 -07001955bool inode_owner_or_capable(const struct inode *inode)
Serge E. Hallyne795b712011-03-23 16:43:25 -07001956{
Andy Lutomirski23adbe12014-06-10 12:45:42 -07001957 struct user_namespace *ns;
1958
Eric W. Biederman92361632012-02-08 07:07:50 -08001959 if (uid_eq(current_fsuid(), inode->i_uid))
Serge E. Hallyne795b712011-03-23 16:43:25 -07001960 return true;
Andy Lutomirski23adbe12014-06-10 12:45:42 -07001961
1962 ns = current_user_ns();
1963 if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
Serge E. Hallyne795b712011-03-23 16:43:25 -07001964 return true;
1965 return false;
1966}
Serge E. Hallyn2e149672011-03-23 16:43:26 -07001967EXPORT_SYMBOL(inode_owner_or_capable);
Trond Myklebust1d59d612012-05-31 12:22:33 -04001968
1969/*
1970 * Direct i/o helper functions
1971 */
1972static void __inode_dio_wait(struct inode *inode)
1973{
1974 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
1975 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
1976
1977 do {
1978 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
1979 if (atomic_read(&inode->i_dio_count))
1980 schedule();
1981 } while (atomic_read(&inode->i_dio_count));
1982 finish_wait(wq, &q.wait);
1983}
1984
1985/**
1986 * inode_dio_wait - wait for outstanding DIO requests to finish
1987 * @inode: inode to wait for
1988 *
1989 * Waits for all pending direct I/O requests to finish so that we can
1990 * proceed with a truncate or equivalent operation.
1991 *
1992 * Must be called under a lock that serializes taking new references
1993 * to i_dio_count, usually by inode->i_mutex.
1994 */
1995void inode_dio_wait(struct inode *inode)
1996{
1997 if (atomic_read(&inode->i_dio_count))
1998 __inode_dio_wait(inode);
1999}
2000EXPORT_SYMBOL(inode_dio_wait);
2001
2002/*
Theodore Ts'o5f16f322014-03-24 14:43:12 -04002003 * inode_set_flags - atomically set some inode flags
2004 *
2005 * Note: the caller should be holding i_mutex, or else be sure that
2006 * they have exclusive access to the inode structure (i.e., while the
2007 * inode is being instantiated). The reason for the cmpxchg() loop
2008 * --- which wouldn't be necessary if all code paths which modify
2009 * i_flags actually followed this rule, is that there is at least one
Jan Kara5fa8e0a2015-05-21 16:05:53 +02002010 * code path which doesn't today so we use cmpxchg() out of an abundance
2011 * of caution.
Theodore Ts'o5f16f322014-03-24 14:43:12 -04002012 *
2013 * In the long run, i_mutex is overkill, and we should probably look
2014 * at using the i_lock spinlock to protect i_flags, and then make sure
2015 * it is so documented in include/linux/fs.h and that all code follows
2016 * the locking convention!!
2017 */
2018void inode_set_flags(struct inode *inode, unsigned int flags,
2019 unsigned int mask)
2020{
2021 unsigned int old_flags, new_flags;
2022
2023 WARN_ON_ONCE(flags & ~mask);
2024 do {
2025 old_flags = ACCESS_ONCE(inode->i_flags);
2026 new_flags = (old_flags & ~mask) | flags;
2027 } while (unlikely(cmpxchg(&inode->i_flags, old_flags,
2028 new_flags) != old_flags));
2029}
2030EXPORT_SYMBOL(inode_set_flags);
Al Viro21fc61c2015-11-17 01:07:57 -05002031
2032void inode_nohighmem(struct inode *inode)
2033{
2034 mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2035}
2036EXPORT_SYMBOL(inode_nohighmem);