blob: a98cb4524e6cbc8d0c5064813014c36963bc03e0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_types.h"
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020021#include "xfs_acl.h"
Nathan Scotta844f452005-11-02 14:38:42 +110022#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110024#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_trans.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include "xfs_bmap_btree.h"
Nathan Scotta844f452005-11-02 14:38:42 +110030#include "xfs_alloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include "xfs_dinode.h"
33#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110034#include "xfs_btree.h"
35#include "xfs_ialloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "xfs_quota.h"
37#include "xfs_utils.h"
David Chinner783a2f62008-10-30 17:39:58 +110038#include "xfs_trans_priv.h"
39#include "xfs_inode_item.h"
Christoph Hellwig24f211b2008-11-28 14:23:42 +110040#include "xfs_bmap.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000041#include "xfs_trace.h"
Christoph Hellwig24f211b2008-11-28 14:23:42 +110042
43
44/*
Dave Chinnerdcfcf202010-12-23 11:57:13 +110045 * Define xfs inode iolock lockdep classes. We need to ensure that all active
46 * inodes are considered the same for lockdep purposes, including inodes that
47 * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
48 * guarantee the locks are considered the same when there are multiple lock
49 * initialisation siteѕ. Also, define a reclaimable inode class so it is
50 * obvious in lockdep reports which class the report is against.
51 */
52static struct lock_class_key xfs_iolock_active;
53struct lock_class_key xfs_iolock_reclaimable;
54
55/*
Christoph Hellwig24f211b2008-11-28 14:23:42 +110056 * Allocate and initialise an xfs_inode.
57 */
58STATIC struct xfs_inode *
59xfs_inode_alloc(
60 struct xfs_mount *mp,
61 xfs_ino_t ino)
62{
63 struct xfs_inode *ip;
64
65 /*
66 * if this didn't occur in transactions, we could use
67 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
68 * code up to do this anyway.
69 */
70 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
71 if (!ip)
72 return NULL;
Christoph Hellwig54e34622009-08-07 14:38:25 -030073 if (inode_init_always(mp->m_super, VFS_I(ip))) {
74 kmem_zone_free(xfs_inode_zone, ip);
75 return NULL;
76 }
Christoph Hellwig24f211b2008-11-28 14:23:42 +110077
Christoph Hellwig24f211b2008-11-28 14:23:42 +110078 ASSERT(atomic_read(&ip->i_pincount) == 0);
79 ASSERT(!spin_is_locked(&ip->i_flags_lock));
Christoph Hellwig474fce02011-12-18 20:00:09 +000080 ASSERT(!xfs_isiflocked(ip));
Dave Chinner1a3e8f32010-12-17 17:29:43 +110081 ASSERT(ip->i_ino == 0);
Christoph Hellwig033da482009-10-19 04:05:26 +000082
83 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
Dave Chinnerdcfcf202010-12-23 11:57:13 +110084 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
85 &xfs_iolock_active, "xfs_iolock_active");
Christoph Hellwig24f211b2008-11-28 14:23:42 +110086
Christoph Hellwig24f211b2008-11-28 14:23:42 +110087 /* initialise the xfs inode */
88 ip->i_ino = ino;
89 ip->i_mount = mp;
90 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
91 ip->i_afp = NULL;
92 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
93 ip->i_flags = 0;
Christoph Hellwig24f211b2008-11-28 14:23:42 +110094 ip->i_delayed_blks = 0;
95 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
Christoph Hellwig24f211b2008-11-28 14:23:42 +110096
Christoph Hellwig24f211b2008-11-28 14:23:42 +110097 return ip;
98}
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100100STATIC void
101xfs_inode_free_callback(
102 struct rcu_head *head)
103{
104 struct inode *inode = container_of(head, struct inode, i_rcu);
105 struct xfs_inode *ip = XFS_I(inode);
106
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100107 kmem_zone_free(xfs_inode_zone, ip);
108}
109
Dave Chinner2f11fea2010-07-20 17:53:25 +1000110void
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300111xfs_inode_free(
112 struct xfs_inode *ip)
113{
114 switch (ip->i_d.di_mode & S_IFMT) {
115 case S_IFREG:
116 case S_IFDIR:
117 case S_IFLNK:
118 xfs_idestroy_fork(ip, XFS_DATA_FORK);
119 break;
120 }
121
122 if (ip->i_afp)
123 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
124
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300125 if (ip->i_itemp) {
126 /*
127 * Only if we are shutting down the fs will we see an
128 * inode still in the AIL. If it is there, we should remove
129 * it to prevent a use-after-free from occurring.
130 */
131 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
132 struct xfs_ail *ailp = lip->li_ailp;
133
134 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
135 XFS_FORCED_SHUTDOWN(ip->i_mount));
136 if (lip->li_flags & XFS_LI_IN_AIL) {
137 spin_lock(&ailp->xa_lock);
138 if (lip->li_flags & XFS_LI_IN_AIL)
139 xfs_trans_ail_delete(ailp, lip);
140 else
141 spin_unlock(&ailp->xa_lock);
142 }
143 xfs_inode_item_destroy(ip);
144 ip->i_itemp = NULL;
145 }
146
147 /* asserts to verify all state is correct here */
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300148 ASSERT(atomic_read(&ip->i_pincount) == 0);
149 ASSERT(!spin_is_locked(&ip->i_flags_lock));
Christoph Hellwig474fce02011-12-18 20:00:09 +0000150 ASSERT(!xfs_isiflocked(ip));
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300151
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100152 /*
153 * Because we use RCU freeing we need to ensure the inode always
154 * appears to be reclaimed with an invalid inode number when in the
155 * free state. The ip->i_flags_lock provides the barrier against lookup
156 * races.
157 */
158 spin_lock(&ip->i_flags_lock);
159 ip->i_flags = XFS_IRECLAIM;
160 ip->i_ino = 0;
161 spin_unlock(&ip->i_flags_lock);
Alex Elder92f1c002011-01-10 21:35:55 -0600162
163 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300164}
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166/*
David Chinner6441e542008-10-30 17:21:19 +1100167 * Check the validity of the inode we just found it the cache
168 */
169static int
170xfs_iget_cache_hit(
David Chinner6441e542008-10-30 17:21:19 +1100171 struct xfs_perag *pag,
172 struct xfs_inode *ip,
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100173 xfs_ino_t ino,
David Chinner6441e542008-10-30 17:21:19 +1100174 int flags,
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100175 int lock_flags) __releases(RCU)
David Chinner6441e542008-10-30 17:21:19 +1100176{
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400177 struct inode *inode = VFS_I(ip);
David Chinner6441e542008-10-30 17:21:19 +1100178 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400179 int error;
180
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100181 /*
182 * check for re-use of an inode within an RCU grace period due to the
183 * radix tree nodes not being updated yet. We monitor for this by
184 * setting the inode number to zero before freeing the inode structure.
185 * If the inode has been reallocated and set up, then the inode number
186 * will not match, so check for that, too.
187 */
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400188 spin_lock(&ip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100189 if (ip->i_ino != ino) {
190 trace_xfs_iget_skip(ip);
191 XFS_STATS_INC(xs_ig_frecycle);
192 error = EAGAIN;
193 goto out_error;
194 }
195
David Chinner6441e542008-10-30 17:21:19 +1100196
197 /*
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400198 * If we are racing with another cache hit that is currently
199 * instantiating this inode or currently recycling it out of
200 * reclaimabe state, wait for the initialisation to complete
201 * before continuing.
202 *
203 * XXX(hch): eventually we should do something equivalent to
204 * wait_on_inode to wait for these flags to be cleared
205 * instead of polling for it.
David Chinner6441e542008-10-30 17:21:19 +1100206 */
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400207 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000208 trace_xfs_iget_skip(ip);
David Chinner6441e542008-10-30 17:21:19 +1100209 XFS_STATS_INC(xs_ig_frecycle);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400210 error = EAGAIN;
David Chinner6441e542008-10-30 17:21:19 +1100211 goto out_error;
212 }
213
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400214 /*
215 * If lookup is racing with unlink return an error immediately.
216 */
217 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
218 error = ENOENT;
219 goto out_error;
220 }
David Chinner6441e542008-10-30 17:21:19 +1100221
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400222 /*
223 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
224 * Need to carefully get it back into useable state.
225 */
226 if (ip->i_flags & XFS_IRECLAIMABLE) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000227 trace_xfs_iget_reclaim(ip);
David Chinner6441e542008-10-30 17:21:19 +1100228
David Chinnerbf904242008-10-30 17:36:14 +1100229 /*
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000230 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
231 * from stomping over us while we recycle the inode. We can't
232 * clear the radix tree reclaimable tag yet as it requires
233 * pag_ici_lock to be held exclusive.
David Chinnerbf904242008-10-30 17:36:14 +1100234 */
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000235 ip->i_flags |= XFS_IRECLAIM;
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400236
237 spin_unlock(&ip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100238 rcu_read_unlock();
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400239
240 error = -inode_init_always(mp->m_super, inode);
241 if (error) {
242 /*
243 * Re-initializing the inode failed, and we are in deep
244 * trouble. Try to re-add it to the reclaim list.
245 */
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100246 rcu_read_lock();
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400247 spin_lock(&ip->i_flags_lock);
248
Dave Chinner778e24b2011-06-23 01:34:59 +0000249 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
250 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
Christoph Hellwigd2e078c2010-06-24 11:50:22 +1000251 trace_xfs_iget_reclaim_fail(ip);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400252 goto out_error;
253 }
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000254
Dave Chinner1a427ab2010-12-16 17:08:41 +1100255 spin_lock(&pag->pag_ici_lock);
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000256 spin_lock(&ip->i_flags_lock);
Dave Chinner778e24b2011-06-23 01:34:59 +0000257
258 /*
259 * Clear the per-lifetime state in the inode as we are now
260 * effectively a new inode and need to return to the initial
261 * state before reuse occurs.
262 */
263 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000264 ip->i_flags |= XFS_INEW;
265 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
Christoph Hellwigeaff8072009-12-17 14:25:01 +0100266 inode->i_state = I_NEW;
Dave Chinnerdcfcf202010-12-23 11:57:13 +1100267
268 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
269 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
270 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
271 &xfs_iolock_active, "xfs_iolock_active");
272
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000273 spin_unlock(&ip->i_flags_lock);
Dave Chinner1a427ab2010-12-16 17:08:41 +1100274 spin_unlock(&pag->pag_ici_lock);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400275 } else {
276 /* If the VFS inode is being torn down, pause and try again. */
277 if (!igrab(inode)) {
Christoph Hellwigd2e078c2010-06-24 11:50:22 +1000278 trace_xfs_iget_skip(ip);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400279 error = EAGAIN;
David Chinnerbf904242008-10-30 17:36:14 +1100280 goto out_error;
281 }
David Chinner6bfb3d02008-10-30 18:32:43 +1100282
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400283 /* We've got a live one. */
284 spin_unlock(&ip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100285 rcu_read_unlock();
Christoph Hellwigd2e078c2010-06-24 11:50:22 +1000286 trace_xfs_iget_hit(ip);
David Chinner6441e542008-10-30 17:21:19 +1100287 }
288
David Chinner6441e542008-10-30 17:21:19 +1100289 if (lock_flags != 0)
290 xfs_ilock(ip, lock_flags);
291
292 xfs_iflags_clear(ip, XFS_ISTALE);
David Chinner6441e542008-10-30 17:21:19 +1100293 XFS_STATS_INC(xs_ig_found);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000294
David Chinner6441e542008-10-30 17:21:19 +1100295 return 0;
296
297out_error:
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400298 spin_unlock(&ip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100299 rcu_read_unlock();
David Chinner6441e542008-10-30 17:21:19 +1100300 return error;
301}
302
303
304static int
305xfs_iget_cache_miss(
306 struct xfs_mount *mp,
307 struct xfs_perag *pag,
308 xfs_trans_t *tp,
309 xfs_ino_t ino,
310 struct xfs_inode **ipp,
David Chinner6441e542008-10-30 17:21:19 +1100311 int flags,
Christoph Hellwig0c3dc2b2009-11-14 16:17:23 +0000312 int lock_flags)
David Chinner6441e542008-10-30 17:21:19 +1100313{
314 struct xfs_inode *ip;
315 int error;
David Chinner6441e542008-10-30 17:21:19 +1100316 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
317
Christoph Hellwig24f211b2008-11-28 14:23:42 +1100318 ip = xfs_inode_alloc(mp, ino);
319 if (!ip)
320 return ENOMEM;
321
Dave Chinner7b6259e2010-06-24 11:35:17 +1000322 error = xfs_iread(mp, tp, ip, flags);
David Chinner6441e542008-10-30 17:21:19 +1100323 if (error)
Christoph Hellwig24f211b2008-11-28 14:23:42 +1100324 goto out_destroy;
David Chinner6441e542008-10-30 17:21:19 +1100325
Christoph Hellwigd2e078c2010-06-24 11:50:22 +1000326 trace_xfs_iget_miss(ip);
David Chinner6441e542008-10-30 17:21:19 +1100327
328 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
329 error = ENOENT;
330 goto out_destroy;
331 }
332
333 /*
334 * Preload the radix tree so we can insert safely under the
David Chinner56e73ec2008-10-30 17:55:27 +1100335 * write spinlock. Note that we cannot sleep inside the preload
336 * region.
David Chinner6441e542008-10-30 17:21:19 +1100337 */
338 if (radix_tree_preload(GFP_KERNEL)) {
339 error = EAGAIN;
Christoph Hellwiged93ec32009-03-03 14:48:35 -0500340 goto out_destroy;
341 }
342
343 /*
344 * Because the inode hasn't been added to the radix-tree yet it can't
345 * be found by another thread, so we can do the non-sleeping lock here.
346 */
347 if (lock_flags) {
348 if (!xfs_ilock_nowait(ip, lock_flags))
349 BUG();
David Chinner6441e542008-10-30 17:21:19 +1100350 }
351
Dave Chinnerf30d5002012-03-07 04:50:25 +0000352 /*
353 * These values must be set before inserting the inode into the radix
354 * tree as the moment it is inserted a concurrent lookup (allowed by the
355 * RCU locking mechanism) can find it and that lookup must see that this
356 * is an inode currently under construction (i.e. that XFS_INEW is set).
357 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
358 * memory barrier that ensures this detection works correctly at lookup
359 * time.
360 */
361 ip->i_udquot = ip->i_gdquot = NULL;
362 xfs_iflags_set(ip, XFS_INEW);
David Chinner6441e542008-10-30 17:21:19 +1100363
364 /* insert the new inode */
Dave Chinnerf30d5002012-03-07 04:50:25 +0000365 spin_lock(&pag->pag_ici_lock);
David Chinner6441e542008-10-30 17:21:19 +1100366 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
367 if (unlikely(error)) {
368 WARN_ON(error != -EEXIST);
369 XFS_STATS_INC(xs_ig_dup);
370 error = EAGAIN;
David Chinner56e73ec2008-10-30 17:55:27 +1100371 goto out_preload_end;
David Chinner6441e542008-10-30 17:21:19 +1100372 }
Dave Chinner1a427ab2010-12-16 17:08:41 +1100373 spin_unlock(&pag->pag_ici_lock);
David Chinner6441e542008-10-30 17:21:19 +1100374 radix_tree_preload_end();
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000375
David Chinner6441e542008-10-30 17:21:19 +1100376 *ipp = ip;
377 return 0;
378
David Chinner56e73ec2008-10-30 17:55:27 +1100379out_preload_end:
Dave Chinner1a427ab2010-12-16 17:08:41 +1100380 spin_unlock(&pag->pag_ici_lock);
David Chinner6441e542008-10-30 17:21:19 +1100381 radix_tree_preload_end();
David Chinner56e73ec2008-10-30 17:55:27 +1100382 if (lock_flags)
383 xfs_iunlock(ip, lock_flags);
David Chinner6441e542008-10-30 17:21:19 +1100384out_destroy:
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300385 __destroy_inode(VFS_I(ip));
386 xfs_inode_free(ip);
David Chinner6441e542008-10-30 17:21:19 +1100387 return error;
388}
389
390/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 * Look up an inode by number in the given file system.
David Chinnerda353b02007-08-28 14:00:13 +1000392 * The inode is looked up in the cache held in each AG.
David Chinnerbf904242008-10-30 17:36:14 +1100393 * If the inode is found in the cache, initialise the vfs inode
394 * if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 *
David Chinnerda353b02007-08-28 14:00:13 +1000396 * If it is not in core, read it in from the file system's device,
David Chinnerbf904242008-10-30 17:36:14 +1100397 * add it to the cache and initialise the vfs inode.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 *
399 * The inode is locked according to the value of the lock_flags parameter.
400 * This flag parameter indicates how and if the inode's IO lock and inode lock
401 * should be taken.
402 *
403 * mp -- the mount point structure for the current file system. It points
404 * to the inode hash table.
405 * tp -- a pointer to the current transaction if there is one. This is
406 * simply passed through to the xfs_iread() call.
407 * ino -- the number of the inode desired. This is the unique identifier
408 * within the file system for the inode being requested.
409 * lock_flags -- flags indicating how to lock the inode. See the comment
410 * for xfs_ilock() for a list of valid values.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 */
David Chinnerbf904242008-10-30 17:36:14 +1100412int
413xfs_iget(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 xfs_mount_t *mp,
415 xfs_trans_t *tp,
416 xfs_ino_t ino,
417 uint flags,
418 uint lock_flags,
Dave Chinner7b6259e2010-06-24 11:35:17 +1000419 xfs_inode_t **ipp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 int error;
David Chinnerda353b02007-08-28 14:00:13 +1000423 xfs_perag_t *pag;
424 xfs_agino_t agino;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Alex Elderad637a12012-02-16 22:01:00 +0000426 /*
427 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
428 * doesn't get freed while it's being referenced during a
429 * radix tree traversal here. It assumes this function
430 * aqcuires only the ILOCK (and therefore it has no need to
431 * involve the IOLOCK in this synchronization).
432 */
433 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
434
Christoph Hellwigd2767342010-10-06 18:31:23 +0000435 /* reject inode numbers outside existing AGs */
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100436 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
David Chinnerda353b02007-08-28 14:00:13 +1000437 return EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
David Chinnerda353b02007-08-28 14:00:13 +1000439 /* get the perag structure and ensure that it's inode capable */
Dave Chinner5017e972010-01-11 11:47:40 +0000440 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
David Chinnerda353b02007-08-28 14:00:13 +1000441 agino = XFS_INO_TO_AGINO(mp, ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443again:
David Chinner6441e542008-10-30 17:21:19 +1100444 error = 0;
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100445 rcu_read_lock();
David Chinnerda353b02007-08-28 14:00:13 +1000446 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
David Chinner6441e542008-10-30 17:21:19 +1100448 if (ip) {
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100449 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
David Chinner6441e542008-10-30 17:21:19 +1100450 if (error)
451 goto out_error_or_again;
452 } else {
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100453 rcu_read_unlock();
David Chinner6441e542008-10-30 17:21:19 +1100454 XFS_STATS_INC(xs_ig_missed);
David Chinnerda353b02007-08-28 14:00:13 +1000455
Dave Chinner7b6259e2010-06-24 11:35:17 +1000456 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
David Chinner6441e542008-10-30 17:21:19 +1100457 flags, lock_flags);
458 if (error)
459 goto out_error_or_again;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 }
Dave Chinner5017e972010-01-11 11:47:40 +0000461 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 *ipp = ip;
464
Christoph Hellwig41be8be2008-08-13 16:23:13 +1000465 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 * If we have a real type for an on-disk inode, we can set ops(&unlock)
467 * now. If it's a new inode being created, xfs_ialloc will handle it.
468 */
David Chinnerbf904242008-10-30 17:36:14 +1100469 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
Christoph Hellwig41be8be2008-08-13 16:23:13 +1000470 xfs_setup_inode(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 return 0;
David Chinner6441e542008-10-30 17:21:19 +1100472
473out_error_or_again:
474 if (error == EAGAIN) {
475 delay(1);
476 goto again;
477 }
Dave Chinner5017e972010-01-11 11:47:40 +0000478 xfs_perag_put(pag);
David Chinner6441e542008-10-30 17:21:19 +1100479 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480}
481
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 * This is a wrapper routine around the xfs_ilock() routine
484 * used to centralize some grungy code. It is used in places
485 * that wish to lock the inode solely for reading the extents.
486 * The reason these places can't just call xfs_ilock(SHARED)
487 * is that the inode lock also guards to bringing in of the
488 * extents from disk for a file in b-tree format. If the inode
489 * is in b-tree format, then we need to lock the inode exclusively
490 * until the extents are read in. Locking it exclusively all
491 * the time would limit our parallelism unnecessarily, though.
492 * What we do instead is check to see if the extents have been
493 * read in yet, and only lock the inode exclusively if they
494 * have not.
495 *
496 * The function returns a value which should be given to the
497 * corresponding xfs_iunlock_map_shared(). This value is
498 * the mode in which the lock was actually taken.
499 */
500uint
501xfs_ilock_map_shared(
502 xfs_inode_t *ip)
503{
504 uint lock_mode;
505
506 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
507 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
508 lock_mode = XFS_ILOCK_EXCL;
509 } else {
510 lock_mode = XFS_ILOCK_SHARED;
511 }
512
513 xfs_ilock(ip, lock_mode);
514
515 return lock_mode;
516}
517
518/*
519 * This is simply the unlock routine to go with xfs_ilock_map_shared().
520 * All it does is call xfs_iunlock() with the given lock_mode.
521 */
522void
523xfs_iunlock_map_shared(
524 xfs_inode_t *ip,
525 unsigned int lock_mode)
526{
527 xfs_iunlock(ip, lock_mode);
528}
529
530/*
531 * The xfs inode contains 2 locks: a multi-reader lock called the
532 * i_iolock and a multi-reader lock called the i_lock. This routine
533 * allows either or both of the locks to be obtained.
534 *
535 * The 2 locks should always be ordered so that the IO lock is
536 * obtained first in order to prevent deadlock.
537 *
538 * ip -- the inode being locked
539 * lock_flags -- this parameter indicates the inode's locks
540 * to be locked. It can be:
541 * XFS_IOLOCK_SHARED,
542 * XFS_IOLOCK_EXCL,
543 * XFS_ILOCK_SHARED,
544 * XFS_ILOCK_EXCL,
545 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
546 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
547 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
548 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
549 */
550void
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000551xfs_ilock(
552 xfs_inode_t *ip,
553 uint lock_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
555 /*
556 * You can't set both SHARED and EXCL for the same lock,
557 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
558 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
559 */
560 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
561 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
562 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
563 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
Lachlan McIlroyf7c66ce2007-05-08 13:50:19 +1000564 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000566 if (lock_flags & XFS_IOLOCK_EXCL)
Lachlan McIlroyf7c66ce2007-05-08 13:50:19 +1000567 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000568 else if (lock_flags & XFS_IOLOCK_SHARED)
Lachlan McIlroyf7c66ce2007-05-08 13:50:19 +1000569 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000570
571 if (lock_flags & XFS_ILOCK_EXCL)
Lachlan McIlroyf7c66ce2007-05-08 13:50:19 +1000572 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000573 else if (lock_flags & XFS_ILOCK_SHARED)
Lachlan McIlroyf7c66ce2007-05-08 13:50:19 +1000574 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000575
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000576 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577}
578
579/*
580 * This is just like xfs_ilock(), except that the caller
581 * is guaranteed not to sleep. It returns 1 if it gets
582 * the requested locks and 0 otherwise. If the IO lock is
583 * obtained but the inode lock cannot be, then the IO lock
584 * is dropped before returning.
585 *
586 * ip -- the inode being locked
587 * lock_flags -- this parameter indicates the inode's locks to be
588 * to be locked. See the comment for xfs_ilock() for a list
589 * of valid values.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 */
591int
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000592xfs_ilock_nowait(
593 xfs_inode_t *ip,
594 uint lock_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 /*
597 * You can't set both SHARED and EXCL for the same lock,
598 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
599 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
600 */
601 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
602 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
603 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
604 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
Lachlan McIlroyf7c66ce2007-05-08 13:50:19 +1000605 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 if (lock_flags & XFS_IOLOCK_EXCL) {
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000608 if (!mrtryupdate(&ip->i_iolock))
609 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 } else if (lock_flags & XFS_IOLOCK_SHARED) {
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000611 if (!mrtryaccess(&ip->i_iolock))
612 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 }
614 if (lock_flags & XFS_ILOCK_EXCL) {
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000615 if (!mrtryupdate(&ip->i_lock))
616 goto out_undo_iolock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 } else if (lock_flags & XFS_ILOCK_SHARED) {
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000618 if (!mrtryaccess(&ip->i_lock))
619 goto out_undo_iolock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 }
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000621 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 return 1;
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000623
624 out_undo_iolock:
625 if (lock_flags & XFS_IOLOCK_EXCL)
626 mrunlock_excl(&ip->i_iolock);
627 else if (lock_flags & XFS_IOLOCK_SHARED)
628 mrunlock_shared(&ip->i_iolock);
629 out:
630 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631}
632
633/*
634 * xfs_iunlock() is used to drop the inode locks acquired with
635 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
636 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
637 * that we know which locks to drop.
638 *
639 * ip -- the inode being unlocked
640 * lock_flags -- this parameter indicates the inode's locks to be
641 * to be unlocked. See the comment for xfs_ilock() for a list
642 * of valid values for this parameter.
643 *
644 */
645void
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000646xfs_iunlock(
647 xfs_inode_t *ip,
648 uint lock_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649{
650 /*
651 * You can't set both SHARED and EXCL for the same lock,
652 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
653 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
654 */
655 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
656 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
657 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
658 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
Christoph Hellwig5b03ff12012-02-20 02:31:22 +0000659 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 ASSERT(lock_flags != 0);
661
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000662 if (lock_flags & XFS_IOLOCK_EXCL)
663 mrunlock_excl(&ip->i_iolock);
664 else if (lock_flags & XFS_IOLOCK_SHARED)
665 mrunlock_shared(&ip->i_iolock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000667 if (lock_flags & XFS_ILOCK_EXCL)
668 mrunlock_excl(&ip->i_lock);
669 else if (lock_flags & XFS_ILOCK_SHARED)
670 mrunlock_shared(&ip->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000672 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
675/*
676 * give up write locks. the i/o lock cannot be held nested
677 * if it is being demoted.
678 */
679void
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000680xfs_ilock_demote(
681 xfs_inode_t *ip,
682 uint lock_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683{
684 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
685 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
686
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000687 if (lock_flags & XFS_ILOCK_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 mrdemote(&ip->i_lock);
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000689 if (lock_flags & XFS_IOLOCK_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 mrdemote(&ip->i_iolock);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000691
692 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693}
694
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000695#ifdef DEBUG
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000696int
697xfs_isilocked(
698 xfs_inode_t *ip,
699 uint lock_flags)
700{
Christoph Hellwigf9369722010-06-03 16:22:29 +1000701 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
702 if (!(lock_flags & XFS_ILOCK_SHARED))
703 return !!ip->i_lock.mr_writer;
704 return rwsem_is_locked(&ip->i_lock.mr_lock);
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000705 }
706
Christoph Hellwigf9369722010-06-03 16:22:29 +1000707 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
708 if (!(lock_flags & XFS_IOLOCK_SHARED))
709 return !!ip->i_iolock.mr_writer;
710 return rwsem_is_locked(&ip->i_iolock.mr_lock);
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000711 }
712
Christoph Hellwigf9369722010-06-03 16:22:29 +1000713 ASSERT(0);
714 return 0;
Christoph Hellwig579aa9c2008-04-22 17:34:00 +1000715}
716#endif
Christoph Hellwig474fce02011-12-18 20:00:09 +0000717
718void
719__xfs_iflock(
720 struct xfs_inode *ip)
721{
722 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
723 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
724
725 do {
726 prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
727 if (xfs_isiflocked(ip))
728 io_schedule();
729 } while (!xfs_iflock_nowait(ip));
730
731 finish_wait(wq, &wait.wait);
732}