blob: ea9a5fa49a4839f50a2f0cf5e76b2f79602d8ecb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_types.h"
Christoph Hellwigef14f0c2009-06-10 17:07:47 +020021#include "xfs_acl.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_bmap_btree.h"
Nathan Scotta844f452005-11-02 14:38:42 +110029#include "xfs_alloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_dinode.h"
32#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110033#include "xfs_btree.h"
34#include "xfs_ialloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include "xfs_quota.h"
36#include "xfs_utils.h"
David Chinner783a2f62008-10-30 17:39:58 +110037#include "xfs_trans_priv.h"
38#include "xfs_inode_item.h"
Christoph Hellwig24f211b2008-11-28 14:23:42 +110039#include "xfs_bmap.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000040#include "xfs_trace.h"
Dave Chinner6d8b79c2012-10-08 21:56:09 +110041#include "xfs_icache.h"
Christoph Hellwig24f211b2008-11-28 14:23:42 +110042
43
44/*
45 * Allocate and initialise an xfs_inode.
46 */
47STATIC struct xfs_inode *
48xfs_inode_alloc(
49 struct xfs_mount *mp,
50 xfs_ino_t ino)
51{
52 struct xfs_inode *ip;
53
54 /*
55 * if this didn't occur in transactions, we could use
56 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
57 * code up to do this anyway.
58 */
59 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
60 if (!ip)
61 return NULL;
Christoph Hellwig54e34622009-08-07 14:38:25 -030062 if (inode_init_always(mp->m_super, VFS_I(ip))) {
63 kmem_zone_free(xfs_inode_zone, ip);
64 return NULL;
65 }
Christoph Hellwig24f211b2008-11-28 14:23:42 +110066
Christoph Hellwig24f211b2008-11-28 14:23:42 +110067 ASSERT(atomic_read(&ip->i_pincount) == 0);
68 ASSERT(!spin_is_locked(&ip->i_flags_lock));
Christoph Hellwig474fce02011-12-18 20:00:09 +000069 ASSERT(!xfs_isiflocked(ip));
Dave Chinner1a3e8f32010-12-17 17:29:43 +110070 ASSERT(ip->i_ino == 0);
Christoph Hellwig033da482009-10-19 04:05:26 +000071
72 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
Christoph Hellwig24f211b2008-11-28 14:23:42 +110073
Christoph Hellwig24f211b2008-11-28 14:23:42 +110074 /* initialise the xfs inode */
75 ip->i_ino = ino;
76 ip->i_mount = mp;
77 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
78 ip->i_afp = NULL;
79 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
80 ip->i_flags = 0;
Christoph Hellwig24f211b2008-11-28 14:23:42 +110081 ip->i_delayed_blks = 0;
82 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
Christoph Hellwig24f211b2008-11-28 14:23:42 +110083
Christoph Hellwig24f211b2008-11-28 14:23:42 +110084 return ip;
85}
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +110087STATIC void
88xfs_inode_free_callback(
89 struct rcu_head *head)
90{
91 struct inode *inode = container_of(head, struct inode, i_rcu);
92 struct xfs_inode *ip = XFS_I(inode);
93
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +110094 kmem_zone_free(xfs_inode_zone, ip);
95}
96
Dave Chinner2f11fea2010-07-20 17:53:25 +100097void
Christoph Hellwigb36ec042009-08-07 14:38:34 -030098xfs_inode_free(
99 struct xfs_inode *ip)
100{
101 switch (ip->i_d.di_mode & S_IFMT) {
102 case S_IFREG:
103 case S_IFDIR:
104 case S_IFLNK:
105 xfs_idestroy_fork(ip, XFS_DATA_FORK);
106 break;
107 }
108
109 if (ip->i_afp)
110 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
111
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300112 if (ip->i_itemp) {
Christoph Hellwig32ce90a2012-04-23 15:58:32 +1000113 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300114 xfs_inode_item_destroy(ip);
115 ip->i_itemp = NULL;
116 }
117
118 /* asserts to verify all state is correct here */
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300119 ASSERT(atomic_read(&ip->i_pincount) == 0);
120 ASSERT(!spin_is_locked(&ip->i_flags_lock));
Christoph Hellwig474fce02011-12-18 20:00:09 +0000121 ASSERT(!xfs_isiflocked(ip));
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300122
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100123 /*
124 * Because we use RCU freeing we need to ensure the inode always
125 * appears to be reclaimed with an invalid inode number when in the
126 * free state. The ip->i_flags_lock provides the barrier against lookup
127 * races.
128 */
129 spin_lock(&ip->i_flags_lock);
130 ip->i_flags = XFS_IRECLAIM;
131 ip->i_ino = 0;
132 spin_unlock(&ip->i_flags_lock);
Alex Elder92f1c002011-01-10 21:35:55 -0600133
134 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/*
David Chinner6441e542008-10-30 17:21:19 +1100138 * Check the validity of the inode we just found it the cache
139 */
140static int
141xfs_iget_cache_hit(
David Chinner6441e542008-10-30 17:21:19 +1100142 struct xfs_perag *pag,
143 struct xfs_inode *ip,
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100144 xfs_ino_t ino,
David Chinner6441e542008-10-30 17:21:19 +1100145 int flags,
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100146 int lock_flags) __releases(RCU)
David Chinner6441e542008-10-30 17:21:19 +1100147{
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400148 struct inode *inode = VFS_I(ip);
David Chinner6441e542008-10-30 17:21:19 +1100149 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400150 int error;
151
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100152 /*
153 * check for re-use of an inode within an RCU grace period due to the
154 * radix tree nodes not being updated yet. We monitor for this by
155 * setting the inode number to zero before freeing the inode structure.
156 * If the inode has been reallocated and set up, then the inode number
157 * will not match, so check for that, too.
158 */
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400159 spin_lock(&ip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100160 if (ip->i_ino != ino) {
161 trace_xfs_iget_skip(ip);
162 XFS_STATS_INC(xs_ig_frecycle);
163 error = EAGAIN;
164 goto out_error;
165 }
166
David Chinner6441e542008-10-30 17:21:19 +1100167
168 /*
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400169 * If we are racing with another cache hit that is currently
170 * instantiating this inode or currently recycling it out of
171 * reclaimabe state, wait for the initialisation to complete
172 * before continuing.
173 *
174 * XXX(hch): eventually we should do something equivalent to
175 * wait_on_inode to wait for these flags to be cleared
176 * instead of polling for it.
David Chinner6441e542008-10-30 17:21:19 +1100177 */
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400178 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000179 trace_xfs_iget_skip(ip);
David Chinner6441e542008-10-30 17:21:19 +1100180 XFS_STATS_INC(xs_ig_frecycle);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400181 error = EAGAIN;
David Chinner6441e542008-10-30 17:21:19 +1100182 goto out_error;
183 }
184
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400185 /*
186 * If lookup is racing with unlink return an error immediately.
187 */
188 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
189 error = ENOENT;
190 goto out_error;
191 }
David Chinner6441e542008-10-30 17:21:19 +1100192
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400193 /*
194 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
195 * Need to carefully get it back into useable state.
196 */
197 if (ip->i_flags & XFS_IRECLAIMABLE) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000198 trace_xfs_iget_reclaim(ip);
David Chinner6441e542008-10-30 17:21:19 +1100199
David Chinnerbf904242008-10-30 17:36:14 +1100200 /*
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000201 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
202 * from stomping over us while we recycle the inode. We can't
203 * clear the radix tree reclaimable tag yet as it requires
204 * pag_ici_lock to be held exclusive.
David Chinnerbf904242008-10-30 17:36:14 +1100205 */
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000206 ip->i_flags |= XFS_IRECLAIM;
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400207
208 spin_unlock(&ip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100209 rcu_read_unlock();
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400210
211 error = -inode_init_always(mp->m_super, inode);
212 if (error) {
213 /*
214 * Re-initializing the inode failed, and we are in deep
215 * trouble. Try to re-add it to the reclaim list.
216 */
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100217 rcu_read_lock();
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400218 spin_lock(&ip->i_flags_lock);
219
Dave Chinner778e24b2011-06-23 01:34:59 +0000220 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
221 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
Christoph Hellwigd2e078c2010-06-24 11:50:22 +1000222 trace_xfs_iget_reclaim_fail(ip);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400223 goto out_error;
224 }
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000225
Dave Chinner1a427ab2010-12-16 17:08:41 +1100226 spin_lock(&pag->pag_ici_lock);
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000227 spin_lock(&ip->i_flags_lock);
Dave Chinner778e24b2011-06-23 01:34:59 +0000228
229 /*
230 * Clear the per-lifetime state in the inode as we are now
231 * effectively a new inode and need to return to the initial
232 * state before reuse occurs.
233 */
234 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000235 ip->i_flags |= XFS_INEW;
236 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
Christoph Hellwigeaff8072009-12-17 14:25:01 +0100237 inode->i_state = I_NEW;
Dave Chinnerdcfcf202010-12-23 11:57:13 +1100238
239 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
240 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
Dave Chinnerdcfcf202010-12-23 11:57:13 +1100241
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000242 spin_unlock(&ip->i_flags_lock);
Dave Chinner1a427ab2010-12-16 17:08:41 +1100243 spin_unlock(&pag->pag_ici_lock);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400244 } else {
245 /* If the VFS inode is being torn down, pause and try again. */
246 if (!igrab(inode)) {
Christoph Hellwigd2e078c2010-06-24 11:50:22 +1000247 trace_xfs_iget_skip(ip);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400248 error = EAGAIN;
David Chinnerbf904242008-10-30 17:36:14 +1100249 goto out_error;
250 }
David Chinner6bfb3d02008-10-30 18:32:43 +1100251
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400252 /* We've got a live one. */
253 spin_unlock(&ip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100254 rcu_read_unlock();
Christoph Hellwigd2e078c2010-06-24 11:50:22 +1000255 trace_xfs_iget_hit(ip);
David Chinner6441e542008-10-30 17:21:19 +1100256 }
257
David Chinner6441e542008-10-30 17:21:19 +1100258 if (lock_flags != 0)
259 xfs_ilock(ip, lock_flags);
260
Dave Chinner5132ba82012-03-22 05:15:10 +0000261 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
David Chinner6441e542008-10-30 17:21:19 +1100262 XFS_STATS_INC(xs_ig_found);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000263
David Chinner6441e542008-10-30 17:21:19 +1100264 return 0;
265
266out_error:
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400267 spin_unlock(&ip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100268 rcu_read_unlock();
David Chinner6441e542008-10-30 17:21:19 +1100269 return error;
270}
271
272
273static int
274xfs_iget_cache_miss(
275 struct xfs_mount *mp,
276 struct xfs_perag *pag,
277 xfs_trans_t *tp,
278 xfs_ino_t ino,
279 struct xfs_inode **ipp,
David Chinner6441e542008-10-30 17:21:19 +1100280 int flags,
Christoph Hellwig0c3dc2b2009-11-14 16:17:23 +0000281 int lock_flags)
David Chinner6441e542008-10-30 17:21:19 +1100282{
283 struct xfs_inode *ip;
284 int error;
David Chinner6441e542008-10-30 17:21:19 +1100285 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
Dave Chinner5132ba82012-03-22 05:15:10 +0000286 int iflags;
David Chinner6441e542008-10-30 17:21:19 +1100287
Christoph Hellwig24f211b2008-11-28 14:23:42 +1100288 ip = xfs_inode_alloc(mp, ino);
289 if (!ip)
290 return ENOMEM;
291
Dave Chinner7b6259e2010-06-24 11:35:17 +1000292 error = xfs_iread(mp, tp, ip, flags);
David Chinner6441e542008-10-30 17:21:19 +1100293 if (error)
Christoph Hellwig24f211b2008-11-28 14:23:42 +1100294 goto out_destroy;
David Chinner6441e542008-10-30 17:21:19 +1100295
Christoph Hellwigd2e078c2010-06-24 11:50:22 +1000296 trace_xfs_iget_miss(ip);
David Chinner6441e542008-10-30 17:21:19 +1100297
298 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
299 error = ENOENT;
300 goto out_destroy;
301 }
302
303 /*
304 * Preload the radix tree so we can insert safely under the
David Chinner56e73ec2008-10-30 17:55:27 +1100305 * write spinlock. Note that we cannot sleep inside the preload
Peter Watkins3ba31602012-05-07 16:11:37 -0400306 * region. Since we can be called from transaction context, don't
307 * recurse into the file system.
David Chinner6441e542008-10-30 17:21:19 +1100308 */
Peter Watkins3ba31602012-05-07 16:11:37 -0400309 if (radix_tree_preload(GFP_NOFS)) {
David Chinner6441e542008-10-30 17:21:19 +1100310 error = EAGAIN;
Christoph Hellwiged93ec32009-03-03 14:48:35 -0500311 goto out_destroy;
312 }
313
314 /*
315 * Because the inode hasn't been added to the radix-tree yet it can't
316 * be found by another thread, so we can do the non-sleeping lock here.
317 */
318 if (lock_flags) {
319 if (!xfs_ilock_nowait(ip, lock_flags))
320 BUG();
David Chinner6441e542008-10-30 17:21:19 +1100321 }
322
Dave Chinnerf30d5002012-03-07 04:50:25 +0000323 /*
324 * These values must be set before inserting the inode into the radix
325 * tree as the moment it is inserted a concurrent lookup (allowed by the
326 * RCU locking mechanism) can find it and that lookup must see that this
327 * is an inode currently under construction (i.e. that XFS_INEW is set).
328 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
329 * memory barrier that ensures this detection works correctly at lookup
330 * time.
331 */
Dave Chinner5132ba82012-03-22 05:15:10 +0000332 iflags = XFS_INEW;
333 if (flags & XFS_IGET_DONTCACHE)
334 iflags |= XFS_IDONTCACHE;
Dave Chinnerf30d5002012-03-07 04:50:25 +0000335 ip->i_udquot = ip->i_gdquot = NULL;
Dave Chinner5132ba82012-03-22 05:15:10 +0000336 xfs_iflags_set(ip, iflags);
David Chinner6441e542008-10-30 17:21:19 +1100337
338 /* insert the new inode */
Dave Chinnerf30d5002012-03-07 04:50:25 +0000339 spin_lock(&pag->pag_ici_lock);
David Chinner6441e542008-10-30 17:21:19 +1100340 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
341 if (unlikely(error)) {
342 WARN_ON(error != -EEXIST);
343 XFS_STATS_INC(xs_ig_dup);
344 error = EAGAIN;
David Chinner56e73ec2008-10-30 17:55:27 +1100345 goto out_preload_end;
David Chinner6441e542008-10-30 17:21:19 +1100346 }
Dave Chinner1a427ab2010-12-16 17:08:41 +1100347 spin_unlock(&pag->pag_ici_lock);
David Chinner6441e542008-10-30 17:21:19 +1100348 radix_tree_preload_end();
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000349
David Chinner6441e542008-10-30 17:21:19 +1100350 *ipp = ip;
351 return 0;
352
David Chinner56e73ec2008-10-30 17:55:27 +1100353out_preload_end:
Dave Chinner1a427ab2010-12-16 17:08:41 +1100354 spin_unlock(&pag->pag_ici_lock);
David Chinner6441e542008-10-30 17:21:19 +1100355 radix_tree_preload_end();
David Chinner56e73ec2008-10-30 17:55:27 +1100356 if (lock_flags)
357 xfs_iunlock(ip, lock_flags);
David Chinner6441e542008-10-30 17:21:19 +1100358out_destroy:
Christoph Hellwigb36ec042009-08-07 14:38:34 -0300359 __destroy_inode(VFS_I(ip));
360 xfs_inode_free(ip);
David Chinner6441e542008-10-30 17:21:19 +1100361 return error;
362}
363
364/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 * Look up an inode by number in the given file system.
David Chinnerda353b02007-08-28 14:00:13 +1000366 * The inode is looked up in the cache held in each AG.
David Chinnerbf904242008-10-30 17:36:14 +1100367 * If the inode is found in the cache, initialise the vfs inode
368 * if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 *
David Chinnerda353b02007-08-28 14:00:13 +1000370 * If it is not in core, read it in from the file system's device,
David Chinnerbf904242008-10-30 17:36:14 +1100371 * add it to the cache and initialise the vfs inode.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 *
373 * The inode is locked according to the value of the lock_flags parameter.
374 * This flag parameter indicates how and if the inode's IO lock and inode lock
375 * should be taken.
376 *
377 * mp -- the mount point structure for the current file system. It points
378 * to the inode hash table.
379 * tp -- a pointer to the current transaction if there is one. This is
380 * simply passed through to the xfs_iread() call.
381 * ino -- the number of the inode desired. This is the unique identifier
382 * within the file system for the inode being requested.
383 * lock_flags -- flags indicating how to lock the inode. See the comment
384 * for xfs_ilock() for a list of valid values.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 */
David Chinnerbf904242008-10-30 17:36:14 +1100386int
387xfs_iget(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 xfs_mount_t *mp,
389 xfs_trans_t *tp,
390 xfs_ino_t ino,
391 uint flags,
392 uint lock_flags,
Dave Chinner7b6259e2010-06-24 11:35:17 +1000393 xfs_inode_t **ipp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 int error;
David Chinnerda353b02007-08-28 14:00:13 +1000397 xfs_perag_t *pag;
398 xfs_agino_t agino;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Alex Elderad637a12012-02-16 22:01:00 +0000400 /*
401 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
402 * doesn't get freed while it's being referenced during a
403 * radix tree traversal here. It assumes this function
404 * aqcuires only the ILOCK (and therefore it has no need to
405 * involve the IOLOCK in this synchronization).
406 */
407 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
408
Christoph Hellwigd2767342010-10-06 18:31:23 +0000409 /* reject inode numbers outside existing AGs */
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100410 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
David Chinnerda353b02007-08-28 14:00:13 +1000411 return EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
David Chinnerda353b02007-08-28 14:00:13 +1000413 /* get the perag structure and ensure that it's inode capable */
Dave Chinner5017e972010-01-11 11:47:40 +0000414 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
David Chinnerda353b02007-08-28 14:00:13 +1000415 agino = XFS_INO_TO_AGINO(mp, ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
417again:
David Chinner6441e542008-10-30 17:21:19 +1100418 error = 0;
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100419 rcu_read_lock();
David Chinnerda353b02007-08-28 14:00:13 +1000420 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
David Chinner6441e542008-10-30 17:21:19 +1100422 if (ip) {
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100423 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
David Chinner6441e542008-10-30 17:21:19 +1100424 if (error)
425 goto out_error_or_again;
426 } else {
Dave Chinner1a3e8f32010-12-17 17:29:43 +1100427 rcu_read_unlock();
David Chinner6441e542008-10-30 17:21:19 +1100428 XFS_STATS_INC(xs_ig_missed);
David Chinnerda353b02007-08-28 14:00:13 +1000429
Dave Chinner7b6259e2010-06-24 11:35:17 +1000430 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
David Chinner6441e542008-10-30 17:21:19 +1100431 flags, lock_flags);
432 if (error)
433 goto out_error_or_again;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 }
Dave Chinner5017e972010-01-11 11:47:40 +0000435 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 *ipp = ip;
438
Christoph Hellwig41be8be2008-08-13 16:23:13 +1000439 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 * If we have a real type for an on-disk inode, we can set ops(&unlock)
441 * now. If it's a new inode being created, xfs_ialloc will handle it.
442 */
David Chinnerbf904242008-10-30 17:36:14 +1100443 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
Christoph Hellwig41be8be2008-08-13 16:23:13 +1000444 xfs_setup_inode(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 return 0;
David Chinner6441e542008-10-30 17:21:19 +1100446
447out_error_or_again:
448 if (error == EAGAIN) {
449 delay(1);
450 goto again;
451 }
Dave Chinner5017e972010-01-11 11:47:40 +0000452 xfs_perag_put(pag);
David Chinner6441e542008-10-30 17:21:19 +1100453 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
455