Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 19 | #include "xfs_fs.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include "xfs_types.h" |
Christoph Hellwig | ef14f0c | 2009-06-10 17:07:47 +0200 | [diff] [blame] | 21 | #include "xfs_acl.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 22 | #include "xfs_bit.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include "xfs_log.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 24 | #include "xfs_inum.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include "xfs_trans.h" |
| 26 | #include "xfs_sb.h" |
| 27 | #include "xfs_ag.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include "xfs_mount.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include "xfs_bmap_btree.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 30 | #include "xfs_alloc_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include "xfs_ialloc_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include "xfs_dinode.h" |
| 33 | #include "xfs_inode.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 34 | #include "xfs_btree.h" |
| 35 | #include "xfs_ialloc.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include "xfs_quota.h" |
| 37 | #include "xfs_utils.h" |
David Chinner | 783a2f6 | 2008-10-30 17:39:58 +1100 | [diff] [blame] | 38 | #include "xfs_trans_priv.h" |
| 39 | #include "xfs_inode_item.h" |
Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 40 | #include "xfs_bmap.h" |
| 41 | #include "xfs_btree_trace.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 42 | #include "xfs_trace.h" |
Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 43 | |
| 44 | |
| 45 | /* |
| 46 | * Allocate and initialise an xfs_inode. |
| 47 | */ |
| 48 | STATIC struct xfs_inode * |
| 49 | xfs_inode_alloc( |
| 50 | struct xfs_mount *mp, |
| 51 | xfs_ino_t ino) |
| 52 | { |
| 53 | struct xfs_inode *ip; |
| 54 | |
| 55 | /* |
| 56 | * if this didn't occur in transactions, we could use |
| 57 | * KM_MAYFAIL and return NULL here on ENOMEM. Set the |
| 58 | * code up to do this anyway. |
| 59 | */ |
| 60 | ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); |
| 61 | if (!ip) |
| 62 | return NULL; |
Christoph Hellwig | 54e3462 | 2009-08-07 14:38:25 -0300 | [diff] [blame] | 63 | if (inode_init_always(mp->m_super, VFS_I(ip))) { |
| 64 | kmem_zone_free(xfs_inode_zone, ip); |
| 65 | return NULL; |
| 66 | } |
Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 67 | |
| 68 | ASSERT(atomic_read(&ip->i_iocount) == 0); |
| 69 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
| 70 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); |
| 71 | ASSERT(completion_done(&ip->i_flush)); |
Christoph Hellwig | 033da48 | 2009-10-19 04:05:26 +0000 | [diff] [blame] | 72 | |
| 73 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); |
Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 74 | |
Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 75 | /* initialise the xfs inode */ |
| 76 | ip->i_ino = ino; |
| 77 | ip->i_mount = mp; |
| 78 | memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); |
| 79 | ip->i_afp = NULL; |
| 80 | memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); |
| 81 | ip->i_flags = 0; |
| 82 | ip->i_update_core = 0; |
Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 83 | ip->i_delayed_blks = 0; |
| 84 | memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); |
| 85 | ip->i_size = 0; |
| 86 | ip->i_new_size = 0; |
| 87 | |
Dave Chinner | 705db3f | 2009-04-06 18:40:17 +0200 | [diff] [blame] | 88 | /* prevent anyone from using this yet */ |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 89 | VFS_I(ip)->i_state = I_NEW; |
Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 90 | |
| 91 | return ip; |
| 92 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 94 | void |
Christoph Hellwig | b36ec04 | 2009-08-07 14:38:34 -0300 | [diff] [blame] | 95 | xfs_inode_free( |
| 96 | struct xfs_inode *ip) |
| 97 | { |
| 98 | switch (ip->i_d.di_mode & S_IFMT) { |
| 99 | case S_IFREG: |
| 100 | case S_IFDIR: |
| 101 | case S_IFLNK: |
| 102 | xfs_idestroy_fork(ip, XFS_DATA_FORK); |
| 103 | break; |
| 104 | } |
| 105 | |
| 106 | if (ip->i_afp) |
| 107 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); |
| 108 | |
Christoph Hellwig | b36ec04 | 2009-08-07 14:38:34 -0300 | [diff] [blame] | 109 | if (ip->i_itemp) { |
| 110 | /* |
| 111 | * Only if we are shutting down the fs will we see an |
| 112 | * inode still in the AIL. If it is there, we should remove |
| 113 | * it to prevent a use-after-free from occurring. |
| 114 | */ |
| 115 | xfs_log_item_t *lip = &ip->i_itemp->ili_item; |
| 116 | struct xfs_ail *ailp = lip->li_ailp; |
| 117 | |
| 118 | ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || |
| 119 | XFS_FORCED_SHUTDOWN(ip->i_mount)); |
| 120 | if (lip->li_flags & XFS_LI_IN_AIL) { |
| 121 | spin_lock(&ailp->xa_lock); |
| 122 | if (lip->li_flags & XFS_LI_IN_AIL) |
| 123 | xfs_trans_ail_delete(ailp, lip); |
| 124 | else |
| 125 | spin_unlock(&ailp->xa_lock); |
| 126 | } |
| 127 | xfs_inode_item_destroy(ip); |
| 128 | ip->i_itemp = NULL; |
| 129 | } |
| 130 | |
| 131 | /* asserts to verify all state is correct here */ |
| 132 | ASSERT(atomic_read(&ip->i_iocount) == 0); |
| 133 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
| 134 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); |
| 135 | ASSERT(completion_done(&ip->i_flush)); |
| 136 | |
| 137 | kmem_zone_free(xfs_inode_zone, ip); |
| 138 | } |
| 139 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | /* |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 141 | * Check the validity of the inode we just found it the cache |
| 142 | */ |
| 143 | static int |
| 144 | xfs_iget_cache_hit( |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 145 | struct xfs_perag *pag, |
| 146 | struct xfs_inode *ip, |
| 147 | int flags, |
| 148 | int lock_flags) __releases(pag->pag_ici_lock) |
| 149 | { |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 150 | struct inode *inode = VFS_I(ip); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 151 | struct xfs_mount *mp = ip->i_mount; |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 152 | int error; |
| 153 | |
| 154 | spin_lock(&ip->i_flags_lock); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 155 | |
| 156 | /* |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 157 | * If we are racing with another cache hit that is currently |
| 158 | * instantiating this inode or currently recycling it out of |
| 159 | * reclaimabe state, wait for the initialisation to complete |
| 160 | * before continuing. |
| 161 | * |
| 162 | * XXX(hch): eventually we should do something equivalent to |
| 163 | * wait_on_inode to wait for these flags to be cleared |
| 164 | * instead of polling for it. |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 165 | */ |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 166 | if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 167 | trace_xfs_iget_skip(ip); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 168 | XFS_STATS_INC(xs_ig_frecycle); |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 169 | error = EAGAIN; |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 170 | goto out_error; |
| 171 | } |
| 172 | |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 173 | /* |
| 174 | * If lookup is racing with unlink return an error immediately. |
| 175 | */ |
| 176 | if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { |
| 177 | error = ENOENT; |
| 178 | goto out_error; |
| 179 | } |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 180 | |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 181 | /* |
| 182 | * If IRECLAIMABLE is set, we've torn down the VFS inode already. |
| 183 | * Need to carefully get it back into useable state. |
| 184 | */ |
| 185 | if (ip->i_flags & XFS_IRECLAIMABLE) { |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 186 | trace_xfs_iget_reclaim(ip); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 187 | |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 188 | /* |
Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 189 | * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode |
| 190 | * from stomping over us while we recycle the inode. We can't |
| 191 | * clear the radix tree reclaimable tag yet as it requires |
| 192 | * pag_ici_lock to be held exclusive. |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 193 | */ |
Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 194 | ip->i_flags |= XFS_IRECLAIM; |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 195 | |
| 196 | spin_unlock(&ip->i_flags_lock); |
| 197 | read_unlock(&pag->pag_ici_lock); |
| 198 | |
| 199 | error = -inode_init_always(mp->m_super, inode); |
| 200 | if (error) { |
| 201 | /* |
| 202 | * Re-initializing the inode failed, and we are in deep |
| 203 | * trouble. Try to re-add it to the reclaim list. |
| 204 | */ |
| 205 | read_lock(&pag->pag_ici_lock); |
| 206 | spin_lock(&ip->i_flags_lock); |
| 207 | |
| 208 | ip->i_flags &= ~XFS_INEW; |
| 209 | ip->i_flags |= XFS_IRECLAIMABLE; |
| 210 | __xfs_inode_set_reclaim_tag(pag, ip); |
Christoph Hellwig | d2e078c | 2010-06-24 11:50:22 +1000 | [diff] [blame] | 211 | trace_xfs_iget_reclaim_fail(ip); |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 212 | goto out_error; |
| 213 | } |
Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 214 | |
| 215 | write_lock(&pag->pag_ici_lock); |
| 216 | spin_lock(&ip->i_flags_lock); |
| 217 | ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); |
| 218 | ip->i_flags |= XFS_INEW; |
| 219 | __xfs_inode_clear_reclaim_tag(mp, pag, ip); |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 220 | inode->i_state = I_NEW; |
Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 221 | spin_unlock(&ip->i_flags_lock); |
| 222 | write_unlock(&pag->pag_ici_lock); |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 223 | } else { |
| 224 | /* If the VFS inode is being torn down, pause and try again. */ |
| 225 | if (!igrab(inode)) { |
Christoph Hellwig | d2e078c | 2010-06-24 11:50:22 +1000 | [diff] [blame] | 226 | trace_xfs_iget_skip(ip); |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 227 | error = EAGAIN; |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 228 | goto out_error; |
| 229 | } |
David Chinner | 6bfb3d0 | 2008-10-30 18:32:43 +1100 | [diff] [blame] | 230 | |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 231 | /* We've got a live one. */ |
| 232 | spin_unlock(&ip->i_flags_lock); |
| 233 | read_unlock(&pag->pag_ici_lock); |
Christoph Hellwig | d2e078c | 2010-06-24 11:50:22 +1000 | [diff] [blame] | 234 | trace_xfs_iget_hit(ip); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 235 | } |
| 236 | |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 237 | if (lock_flags != 0) |
| 238 | xfs_ilock(ip, lock_flags); |
| 239 | |
| 240 | xfs_iflags_clear(ip, XFS_ISTALE); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 241 | XFS_STATS_INC(xs_ig_found); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 242 | |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 243 | return 0; |
| 244 | |
| 245 | out_error: |
Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 246 | spin_unlock(&ip->i_flags_lock); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 247 | read_unlock(&pag->pag_ici_lock); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 248 | return error; |
| 249 | } |
| 250 | |
| 251 | |
| 252 | static int |
| 253 | xfs_iget_cache_miss( |
| 254 | struct xfs_mount *mp, |
| 255 | struct xfs_perag *pag, |
| 256 | xfs_trans_t *tp, |
| 257 | xfs_ino_t ino, |
| 258 | struct xfs_inode **ipp, |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 259 | int flags, |
Christoph Hellwig | 0c3dc2b | 2009-11-14 16:17:23 +0000 | [diff] [blame] | 260 | int lock_flags) |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 261 | { |
| 262 | struct xfs_inode *ip; |
| 263 | int error; |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 264 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); |
| 265 | |
Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 266 | ip = xfs_inode_alloc(mp, ino); |
| 267 | if (!ip) |
| 268 | return ENOMEM; |
| 269 | |
Dave Chinner | 7b6259e | 2010-06-24 11:35:17 +1000 | [diff] [blame] | 270 | error = xfs_iread(mp, tp, ip, flags); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 271 | if (error) |
Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 272 | goto out_destroy; |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 273 | |
Christoph Hellwig | d2e078c | 2010-06-24 11:50:22 +1000 | [diff] [blame] | 274 | trace_xfs_iget_miss(ip); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 275 | |
| 276 | if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { |
| 277 | error = ENOENT; |
| 278 | goto out_destroy; |
| 279 | } |
| 280 | |
| 281 | /* |
| 282 | * Preload the radix tree so we can insert safely under the |
David Chinner | 56e73ec | 2008-10-30 17:55:27 +1100 | [diff] [blame] | 283 | * write spinlock. Note that we cannot sleep inside the preload |
| 284 | * region. |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 285 | */ |
| 286 | if (radix_tree_preload(GFP_KERNEL)) { |
| 287 | error = EAGAIN; |
Christoph Hellwig | ed93ec3 | 2009-03-03 14:48:35 -0500 | [diff] [blame] | 288 | goto out_destroy; |
| 289 | } |
| 290 | |
| 291 | /* |
| 292 | * Because the inode hasn't been added to the radix-tree yet it can't |
| 293 | * be found by another thread, so we can do the non-sleeping lock here. |
| 294 | */ |
| 295 | if (lock_flags) { |
| 296 | if (!xfs_ilock_nowait(ip, lock_flags)) |
| 297 | BUG(); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 298 | } |
| 299 | |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 300 | write_lock(&pag->pag_ici_lock); |
| 301 | |
| 302 | /* insert the new inode */ |
| 303 | error = radix_tree_insert(&pag->pag_ici_root, agino, ip); |
| 304 | if (unlikely(error)) { |
| 305 | WARN_ON(error != -EEXIST); |
| 306 | XFS_STATS_INC(xs_ig_dup); |
| 307 | error = EAGAIN; |
David Chinner | 56e73ec | 2008-10-30 17:55:27 +1100 | [diff] [blame] | 308 | goto out_preload_end; |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 309 | } |
| 310 | |
| 311 | /* These values _must_ be set before releasing the radix tree lock! */ |
| 312 | ip->i_udquot = ip->i_gdquot = NULL; |
| 313 | xfs_iflags_set(ip, XFS_INEW); |
| 314 | |
| 315 | write_unlock(&pag->pag_ici_lock); |
| 316 | radix_tree_preload_end(); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 317 | |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 318 | *ipp = ip; |
| 319 | return 0; |
| 320 | |
David Chinner | 56e73ec | 2008-10-30 17:55:27 +1100 | [diff] [blame] | 321 | out_preload_end: |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 322 | write_unlock(&pag->pag_ici_lock); |
| 323 | radix_tree_preload_end(); |
David Chinner | 56e73ec | 2008-10-30 17:55:27 +1100 | [diff] [blame] | 324 | if (lock_flags) |
| 325 | xfs_iunlock(ip, lock_flags); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 326 | out_destroy: |
Christoph Hellwig | b36ec04 | 2009-08-07 14:38:34 -0300 | [diff] [blame] | 327 | __destroy_inode(VFS_I(ip)); |
| 328 | xfs_inode_free(ip); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 329 | return error; |
| 330 | } |
| 331 | |
| 332 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | * Look up an inode by number in the given file system. |
David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 334 | * The inode is looked up in the cache held in each AG. |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 335 | * If the inode is found in the cache, initialise the vfs inode |
| 336 | * if necessary. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | * |
David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 338 | * If it is not in core, read it in from the file system's device, |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 339 | * add it to the cache and initialise the vfs inode. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | * |
| 341 | * The inode is locked according to the value of the lock_flags parameter. |
| 342 | * This flag parameter indicates how and if the inode's IO lock and inode lock |
| 343 | * should be taken. |
| 344 | * |
| 345 | * mp -- the mount point structure for the current file system. It points |
| 346 | * to the inode hash table. |
| 347 | * tp -- a pointer to the current transaction if there is one. This is |
| 348 | * simply passed through to the xfs_iread() call. |
| 349 | * ino -- the number of the inode desired. This is the unique identifier |
| 350 | * within the file system for the inode being requested. |
| 351 | * lock_flags -- flags indicating how to lock the inode. See the comment |
| 352 | * for xfs_ilock() for a list of valid values. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | */ |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 354 | int |
| 355 | xfs_iget( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | xfs_mount_t *mp, |
| 357 | xfs_trans_t *tp, |
| 358 | xfs_ino_t ino, |
| 359 | uint flags, |
| 360 | uint lock_flags, |
Dave Chinner | 7b6259e | 2010-06-24 11:35:17 +1000 | [diff] [blame] | 361 | xfs_inode_t **ipp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | xfs_inode_t *ip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | int error; |
David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 365 | xfs_perag_t *pag; |
| 366 | xfs_agino_t agino; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | |
David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 368 | /* the radix tree exists only in inode capable AGs */ |
| 369 | if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi) |
| 370 | return EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | |
David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 372 | /* get the perag structure and ensure that it's inode capable */ |
Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 373 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); |
David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 374 | agino = XFS_INO_TO_AGINO(mp, ino); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | |
| 376 | again: |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 377 | error = 0; |
David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 378 | read_lock(&pag->pag_ici_lock); |
| 379 | ip = radix_tree_lookup(&pag->pag_ici_root, agino); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 381 | if (ip) { |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 382 | error = xfs_iget_cache_hit(pag, ip, flags, lock_flags); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 383 | if (error) |
| 384 | goto out_error_or_again; |
| 385 | } else { |
David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 386 | read_unlock(&pag->pag_ici_lock); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 387 | XFS_STATS_INC(xs_ig_missed); |
David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 388 | |
Dave Chinner | 7b6259e | 2010-06-24 11:35:17 +1000 | [diff] [blame] | 389 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 390 | flags, lock_flags); |
| 391 | if (error) |
| 392 | goto out_error_or_again; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | } |
Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 394 | xfs_perag_put(pag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | *ipp = ip; |
| 397 | |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 398 | ASSERT(ip->i_df.if_ext_max == |
| 399 | XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); |
Christoph Hellwig | 41be8be | 2008-08-13 16:23:13 +1000 | [diff] [blame] | 400 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | * If we have a real type for an on-disk inode, we can set ops(&unlock) |
| 402 | * now. If it's a new inode being created, xfs_ialloc will handle it. |
| 403 | */ |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 404 | if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) |
Christoph Hellwig | 41be8be | 2008-08-13 16:23:13 +1000 | [diff] [blame] | 405 | xfs_setup_inode(ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | return 0; |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 407 | |
| 408 | out_error_or_again: |
| 409 | if (error == EAGAIN) { |
| 410 | delay(1); |
| 411 | goto again; |
| 412 | } |
Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 413 | xfs_perag_put(pag); |
David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 414 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | } |
| 416 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | * This is a wrapper routine around the xfs_ilock() routine |
| 419 | * used to centralize some grungy code. It is used in places |
| 420 | * that wish to lock the inode solely for reading the extents. |
| 421 | * The reason these places can't just call xfs_ilock(SHARED) |
| 422 | * is that the inode lock also guards to bringing in of the |
| 423 | * extents from disk for a file in b-tree format. If the inode |
| 424 | * is in b-tree format, then we need to lock the inode exclusively |
| 425 | * until the extents are read in. Locking it exclusively all |
| 426 | * the time would limit our parallelism unnecessarily, though. |
| 427 | * What we do instead is check to see if the extents have been |
| 428 | * read in yet, and only lock the inode exclusively if they |
| 429 | * have not. |
| 430 | * |
| 431 | * The function returns a value which should be given to the |
| 432 | * corresponding xfs_iunlock_map_shared(). This value is |
| 433 | * the mode in which the lock was actually taken. |
| 434 | */ |
| 435 | uint |
| 436 | xfs_ilock_map_shared( |
| 437 | xfs_inode_t *ip) |
| 438 | { |
| 439 | uint lock_mode; |
| 440 | |
| 441 | if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && |
| 442 | ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { |
| 443 | lock_mode = XFS_ILOCK_EXCL; |
| 444 | } else { |
| 445 | lock_mode = XFS_ILOCK_SHARED; |
| 446 | } |
| 447 | |
| 448 | xfs_ilock(ip, lock_mode); |
| 449 | |
| 450 | return lock_mode; |
| 451 | } |
| 452 | |
| 453 | /* |
| 454 | * This is simply the unlock routine to go with xfs_ilock_map_shared(). |
| 455 | * All it does is call xfs_iunlock() with the given lock_mode. |
| 456 | */ |
| 457 | void |
| 458 | xfs_iunlock_map_shared( |
| 459 | xfs_inode_t *ip, |
| 460 | unsigned int lock_mode) |
| 461 | { |
| 462 | xfs_iunlock(ip, lock_mode); |
| 463 | } |
| 464 | |
| 465 | /* |
| 466 | * The xfs inode contains 2 locks: a multi-reader lock called the |
| 467 | * i_iolock and a multi-reader lock called the i_lock. This routine |
| 468 | * allows either or both of the locks to be obtained. |
| 469 | * |
| 470 | * The 2 locks should always be ordered so that the IO lock is |
| 471 | * obtained first in order to prevent deadlock. |
| 472 | * |
| 473 | * ip -- the inode being locked |
| 474 | * lock_flags -- this parameter indicates the inode's locks |
| 475 | * to be locked. It can be: |
| 476 | * XFS_IOLOCK_SHARED, |
| 477 | * XFS_IOLOCK_EXCL, |
| 478 | * XFS_ILOCK_SHARED, |
| 479 | * XFS_ILOCK_EXCL, |
| 480 | * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED, |
| 481 | * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL, |
| 482 | * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED, |
| 483 | * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
| 484 | */ |
| 485 | void |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 486 | xfs_ilock( |
| 487 | xfs_inode_t *ip, |
| 488 | uint lock_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | { |
| 490 | /* |
| 491 | * You can't set both SHARED and EXCL for the same lock, |
| 492 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, |
| 493 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. |
| 494 | */ |
| 495 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != |
| 496 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); |
| 497 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
| 498 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); |
Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 499 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 501 | if (lock_flags & XFS_IOLOCK_EXCL) |
Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 502 | mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 503 | else if (lock_flags & XFS_IOLOCK_SHARED) |
Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 504 | mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 505 | |
| 506 | if (lock_flags & XFS_ILOCK_EXCL) |
Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 507 | mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 508 | else if (lock_flags & XFS_ILOCK_SHARED) |
Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 509 | mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 510 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 511 | trace_xfs_ilock(ip, lock_flags, _RET_IP_); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | } |
| 513 | |
| 514 | /* |
| 515 | * This is just like xfs_ilock(), except that the caller |
| 516 | * is guaranteed not to sleep. It returns 1 if it gets |
| 517 | * the requested locks and 0 otherwise. If the IO lock is |
| 518 | * obtained but the inode lock cannot be, then the IO lock |
| 519 | * is dropped before returning. |
| 520 | * |
| 521 | * ip -- the inode being locked |
| 522 | * lock_flags -- this parameter indicates the inode's locks to be |
| 523 | * to be locked. See the comment for xfs_ilock() for a list |
| 524 | * of valid values. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | */ |
| 526 | int |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 527 | xfs_ilock_nowait( |
| 528 | xfs_inode_t *ip, |
| 529 | uint lock_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | /* |
| 532 | * You can't set both SHARED and EXCL for the same lock, |
| 533 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, |
| 534 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. |
| 535 | */ |
| 536 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != |
| 537 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); |
| 538 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
| 539 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); |
Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 540 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | if (lock_flags & XFS_IOLOCK_EXCL) { |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 543 | if (!mrtryupdate(&ip->i_iolock)) |
| 544 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | } else if (lock_flags & XFS_IOLOCK_SHARED) { |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 546 | if (!mrtryaccess(&ip->i_iolock)) |
| 547 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | } |
| 549 | if (lock_flags & XFS_ILOCK_EXCL) { |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 550 | if (!mrtryupdate(&ip->i_lock)) |
| 551 | goto out_undo_iolock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | } else if (lock_flags & XFS_ILOCK_SHARED) { |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 553 | if (!mrtryaccess(&ip->i_lock)) |
| 554 | goto out_undo_iolock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | } |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 556 | trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | return 1; |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 558 | |
| 559 | out_undo_iolock: |
| 560 | if (lock_flags & XFS_IOLOCK_EXCL) |
| 561 | mrunlock_excl(&ip->i_iolock); |
| 562 | else if (lock_flags & XFS_IOLOCK_SHARED) |
| 563 | mrunlock_shared(&ip->i_iolock); |
| 564 | out: |
| 565 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | } |
| 567 | |
| 568 | /* |
| 569 | * xfs_iunlock() is used to drop the inode locks acquired with |
| 570 | * xfs_ilock() and xfs_ilock_nowait(). The caller must pass |
| 571 | * in the flags given to xfs_ilock() or xfs_ilock_nowait() so |
| 572 | * that we know which locks to drop. |
| 573 | * |
| 574 | * ip -- the inode being unlocked |
| 575 | * lock_flags -- this parameter indicates the inode's locks to be |
| 576 | * to be unlocked. See the comment for xfs_ilock() for a list |
| 577 | * of valid values for this parameter. |
| 578 | * |
| 579 | */ |
| 580 | void |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 581 | xfs_iunlock( |
| 582 | xfs_inode_t *ip, |
| 583 | uint lock_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | { |
| 585 | /* |
| 586 | * You can't set both SHARED and EXCL for the same lock, |
| 587 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, |
| 588 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. |
| 589 | */ |
| 590 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != |
| 591 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); |
| 592 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
| 593 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); |
Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 594 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY | |
| 595 | XFS_LOCK_DEP_MASK)) == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | ASSERT(lock_flags != 0); |
| 597 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 598 | if (lock_flags & XFS_IOLOCK_EXCL) |
| 599 | mrunlock_excl(&ip->i_iolock); |
| 600 | else if (lock_flags & XFS_IOLOCK_SHARED) |
| 601 | mrunlock_shared(&ip->i_iolock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 603 | if (lock_flags & XFS_ILOCK_EXCL) |
| 604 | mrunlock_excl(&ip->i_lock); |
| 605 | else if (lock_flags & XFS_ILOCK_SHARED) |
| 606 | mrunlock_shared(&ip->i_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 608 | if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) && |
| 609 | !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | /* |
| 611 | * Let the AIL know that this item has been unlocked in case |
| 612 | * it is in the AIL and anyone is waiting on it. Don't do |
| 613 | * this if the caller has asked us not to. |
| 614 | */ |
David Chinner | 783a2f6 | 2008-10-30 17:39:58 +1100 | [diff] [blame] | 615 | xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp, |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 616 | (xfs_log_item_t*)(ip->i_itemp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | } |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 618 | trace_xfs_iunlock(ip, lock_flags, _RET_IP_); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | } |
| 620 | |
| 621 | /* |
| 622 | * give up write locks. the i/o lock cannot be held nested |
| 623 | * if it is being demoted. |
| 624 | */ |
| 625 | void |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 626 | xfs_ilock_demote( |
| 627 | xfs_inode_t *ip, |
| 628 | uint lock_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | { |
| 630 | ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)); |
| 631 | ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); |
| 632 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 633 | if (lock_flags & XFS_ILOCK_EXCL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | mrdemote(&ip->i_lock); |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 635 | if (lock_flags & XFS_IOLOCK_EXCL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | mrdemote(&ip->i_iolock); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 637 | |
| 638 | trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | } |
| 640 | |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 641 | #ifdef DEBUG |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 642 | int |
| 643 | xfs_isilocked( |
| 644 | xfs_inode_t *ip, |
| 645 | uint lock_flags) |
| 646 | { |
Christoph Hellwig | f936972 | 2010-06-03 16:22:29 +1000 | [diff] [blame] | 647 | if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { |
| 648 | if (!(lock_flags & XFS_ILOCK_SHARED)) |
| 649 | return !!ip->i_lock.mr_writer; |
| 650 | return rwsem_is_locked(&ip->i_lock.mr_lock); |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 651 | } |
| 652 | |
Christoph Hellwig | f936972 | 2010-06-03 16:22:29 +1000 | [diff] [blame] | 653 | if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { |
| 654 | if (!(lock_flags & XFS_IOLOCK_SHARED)) |
| 655 | return !!ip->i_iolock.mr_writer; |
| 656 | return rwsem_is_locked(&ip->i_iolock.mr_lock); |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 657 | } |
| 658 | |
Christoph Hellwig | f936972 | 2010-06-03 16:22:29 +1000 | [diff] [blame] | 659 | ASSERT(0); |
| 660 | return 0; |
Christoph Hellwig | 579aa9c | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 661 | } |
| 662 | #endif |