blob: f739a031986d25fc7fed4fc5ef48540f890217d2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11003 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Robert P. J. Day40ebd812007-11-23 16:30:51 +110018#include <linux/log2.h>
19
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110022#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110023#include "xfs_format.h"
24#include "xfs_log_format.h"
25#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100028#include "xfs_defer.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110029#include "xfs_inode.h"
Dave Chinner57062782013-10-15 09:17:51 +110030#include "xfs_da_format.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100031#include "xfs_da_btree.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100032#include "xfs_dir2.h"
Nathan Scotta844f452005-11-02 14:38:42 +110033#include "xfs_attr_sf.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100034#include "xfs_attr.h"
Dave Chinner239880e2013-10-23 10:50:10 +110035#include "xfs_trans_space.h"
36#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_buf_item.h"
Nathan Scotta844f452005-11-02 14:38:42 +110038#include "xfs_inode_item.h"
Nathan Scotta844f452005-11-02 14:38:42 +110039#include "xfs_ialloc.h"
40#include "xfs_bmap.h"
Dave Chinner68988112013-08-12 20:49:42 +100041#include "xfs_bmap_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include "xfs_quota.h"
David Chinner2a82b8b2007-07-11 11:09:12 +100044#include "xfs_filestream.h"
Christoph Hellwig93848a92013-04-03 16:11:17 +110045#include "xfs_cksum.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000046#include "xfs_trace.h"
Dave Chinner33479e02012-10-08 21:56:11 +110047#include "xfs_icache.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100048#include "xfs_symlink.h"
Dave Chinner239880e2013-10-23 10:50:10 +110049#include "xfs_trans_priv.h"
50#include "xfs_log.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110051#include "xfs_bmap_btree.h"
Darrick J. Wongaa8968f2016-10-03 09:11:38 -070052#include "xfs_reflink.h"
Darrick J. Wong005c5db2017-03-28 14:51:10 -070053#include "xfs_dir2_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055kmem_zone_t *xfs_inode_zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57/*
Christoph Hellwig8f04c472011-07-08 14:34:34 +020058 * Used in xfs_itruncate_extents(). This is the maximum number of extents
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * freed from a file in a single transaction.
60 */
61#define XFS_ITRUNC_MAX_EXTENTS 2
62
Dave Chinner54d7b5c2016-02-09 16:54:58 +110063STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
64STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
65STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
Zhi Yong Wuab297432013-12-18 08:22:41 +080066
Dave Chinner2a0ec1d2012-04-23 15:59:02 +100067/*
68 * helper function to extract extent size hint from inode
69 */
70xfs_extlen_t
71xfs_get_extsz_hint(
72 struct xfs_inode *ip)
73{
74 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
75 return ip->i_d.di_extsize;
76 if (XFS_IS_REALTIME_INODE(ip))
77 return ip->i_mount->m_sb.sb_rextsize;
78 return 0;
79}
80
Dave Chinnerfa96aca2012-10-08 21:56:10 +110081/*
Darrick J. Wongf7ca3522016-10-03 09:11:43 -070082 * Helper function to extract CoW extent size hint from inode.
83 * Between the extent size hint and the CoW extent size hint, we
Darrick J. Wonge153aa72016-10-03 09:11:49 -070084 * return the greater of the two. If the value is zero (automatic),
85 * use the default size.
Darrick J. Wongf7ca3522016-10-03 09:11:43 -070086 */
87xfs_extlen_t
88xfs_get_cowextsz_hint(
89 struct xfs_inode *ip)
90{
91 xfs_extlen_t a, b;
92
93 a = 0;
94 if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
95 a = ip->i_d.di_cowextsize;
96 b = xfs_get_extsz_hint(ip);
97
Darrick J. Wonge153aa72016-10-03 09:11:49 -070098 a = max(a, b);
99 if (a == 0)
100 return XFS_DEFAULT_COWEXTSZ_HINT;
101 return a;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700102}
103
104/*
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800105 * These two are wrapper routines around the xfs_ilock() routine used to
106 * centralize some grungy code. They are used in places that wish to lock the
107 * inode solely for reading the extents. The reason these places can't just
108 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
109 * bringing in of the extents from disk for a file in b-tree format. If the
110 * inode is in b-tree format, then we need to lock the inode exclusively until
111 * the extents are read in. Locking it exclusively all the time would limit
112 * our parallelism unnecessarily, though. What we do instead is check to see
113 * if the extents have been read in yet, and only lock the inode exclusively
114 * if they have not.
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100115 *
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800116 * The functions return a value which should be given to the corresponding
Christoph Hellwig01f4f322013-12-06 12:30:08 -0800117 * xfs_iunlock() call.
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100118 */
119uint
Christoph Hellwig309ecac82013-12-06 12:30:09 -0800120xfs_ilock_data_map_shared(
121 struct xfs_inode *ip)
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100122{
Christoph Hellwig309ecac82013-12-06 12:30:09 -0800123 uint lock_mode = XFS_ILOCK_SHARED;
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100124
Christoph Hellwig309ecac82013-12-06 12:30:09 -0800125 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
126 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100127 lock_mode = XFS_ILOCK_EXCL;
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100128 xfs_ilock(ip, lock_mode);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100129 return lock_mode;
130}
131
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800132uint
133xfs_ilock_attr_map_shared(
134 struct xfs_inode *ip)
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100135{
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800136 uint lock_mode = XFS_ILOCK_SHARED;
137
138 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
139 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
140 lock_mode = XFS_ILOCK_EXCL;
141 xfs_ilock(ip, lock_mode);
142 return lock_mode;
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100143}
144
145/*
Christoph Hellwig65523212016-11-30 14:33:25 +1100146 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
147 * multi-reader locks: i_mmap_lock and the i_lock. This routine allows
148 * various combinations of the locks to be obtained.
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100149 *
Dave Chinner653c60b2015-02-23 21:43:37 +1100150 * The 3 locks should always be ordered so that the IO lock is obtained first,
151 * the mmap lock second and the ilock last in order to prevent deadlock.
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100152 *
Dave Chinner653c60b2015-02-23 21:43:37 +1100153 * Basic locking order:
154 *
Christoph Hellwig65523212016-11-30 14:33:25 +1100155 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
Dave Chinner653c60b2015-02-23 21:43:37 +1100156 *
157 * mmap_sem locking order:
158 *
Christoph Hellwig65523212016-11-30 14:33:25 +1100159 * i_rwsem -> page lock -> mmap_sem
Dave Chinner653c60b2015-02-23 21:43:37 +1100160 * mmap_sem -> i_mmap_lock -> page_lock
161 *
162 * The difference in mmap_sem locking order mean that we cannot hold the
163 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
164 * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
165 * in get_user_pages() to map the user pages into the kernel address space for
Christoph Hellwig65523212016-11-30 14:33:25 +1100166 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
Dave Chinner653c60b2015-02-23 21:43:37 +1100167 * page faults already hold the mmap_sem.
168 *
169 * Hence to serialise fully against both syscall and mmap based IO, we need to
Christoph Hellwig65523212016-11-30 14:33:25 +1100170 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
Dave Chinner653c60b2015-02-23 21:43:37 +1100171 * taken in places where we need to invalidate the page cache in a race
172 * free manner (e.g. truncate, hole punch and other extent manipulation
173 * functions).
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100174 */
175void
176xfs_ilock(
177 xfs_inode_t *ip,
178 uint lock_flags)
179{
180 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
181
182 /*
183 * You can't set both SHARED and EXCL for the same lock,
184 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
185 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
186 */
187 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
188 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
Dave Chinner653c60b2015-02-23 21:43:37 +1100189 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
190 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100191 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
192 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
Dave Chinner0952c812015-08-19 10:32:49 +1000193 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100194
Christoph Hellwig65523212016-11-30 14:33:25 +1100195 if (lock_flags & XFS_IOLOCK_EXCL) {
196 down_write_nested(&VFS_I(ip)->i_rwsem,
197 XFS_IOLOCK_DEP(lock_flags));
198 } else if (lock_flags & XFS_IOLOCK_SHARED) {
199 down_read_nested(&VFS_I(ip)->i_rwsem,
200 XFS_IOLOCK_DEP(lock_flags));
201 }
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100202
Dave Chinner653c60b2015-02-23 21:43:37 +1100203 if (lock_flags & XFS_MMAPLOCK_EXCL)
204 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
205 else if (lock_flags & XFS_MMAPLOCK_SHARED)
206 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
207
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100208 if (lock_flags & XFS_ILOCK_EXCL)
209 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
210 else if (lock_flags & XFS_ILOCK_SHARED)
211 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
212}
213
214/*
215 * This is just like xfs_ilock(), except that the caller
216 * is guaranteed not to sleep. It returns 1 if it gets
217 * the requested locks and 0 otherwise. If the IO lock is
218 * obtained but the inode lock cannot be, then the IO lock
219 * is dropped before returning.
220 *
221 * ip -- the inode being locked
222 * lock_flags -- this parameter indicates the inode's locks to be
223 * to be locked. See the comment for xfs_ilock() for a list
224 * of valid values.
225 */
226int
227xfs_ilock_nowait(
228 xfs_inode_t *ip,
229 uint lock_flags)
230{
231 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
232
233 /*
234 * You can't set both SHARED and EXCL for the same lock,
235 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
236 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
237 */
238 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
239 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
Dave Chinner653c60b2015-02-23 21:43:37 +1100240 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
241 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100242 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
243 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
Dave Chinner0952c812015-08-19 10:32:49 +1000244 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100245
246 if (lock_flags & XFS_IOLOCK_EXCL) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100247 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100248 goto out;
249 } else if (lock_flags & XFS_IOLOCK_SHARED) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100250 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100251 goto out;
252 }
Dave Chinner653c60b2015-02-23 21:43:37 +1100253
254 if (lock_flags & XFS_MMAPLOCK_EXCL) {
255 if (!mrtryupdate(&ip->i_mmaplock))
256 goto out_undo_iolock;
257 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
258 if (!mrtryaccess(&ip->i_mmaplock))
259 goto out_undo_iolock;
260 }
261
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100262 if (lock_flags & XFS_ILOCK_EXCL) {
263 if (!mrtryupdate(&ip->i_lock))
Dave Chinner653c60b2015-02-23 21:43:37 +1100264 goto out_undo_mmaplock;
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100265 } else if (lock_flags & XFS_ILOCK_SHARED) {
266 if (!mrtryaccess(&ip->i_lock))
Dave Chinner653c60b2015-02-23 21:43:37 +1100267 goto out_undo_mmaplock;
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100268 }
269 return 1;
270
Dave Chinner653c60b2015-02-23 21:43:37 +1100271out_undo_mmaplock:
272 if (lock_flags & XFS_MMAPLOCK_EXCL)
273 mrunlock_excl(&ip->i_mmaplock);
274 else if (lock_flags & XFS_MMAPLOCK_SHARED)
275 mrunlock_shared(&ip->i_mmaplock);
276out_undo_iolock:
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100277 if (lock_flags & XFS_IOLOCK_EXCL)
Christoph Hellwig65523212016-11-30 14:33:25 +1100278 up_write(&VFS_I(ip)->i_rwsem);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100279 else if (lock_flags & XFS_IOLOCK_SHARED)
Christoph Hellwig65523212016-11-30 14:33:25 +1100280 up_read(&VFS_I(ip)->i_rwsem);
Dave Chinner653c60b2015-02-23 21:43:37 +1100281out:
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100282 return 0;
283}
284
285/*
286 * xfs_iunlock() is used to drop the inode locks acquired with
287 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
288 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
289 * that we know which locks to drop.
290 *
291 * ip -- the inode being unlocked
292 * lock_flags -- this parameter indicates the inode's locks to be
293 * to be unlocked. See the comment for xfs_ilock() for a list
294 * of valid values for this parameter.
295 *
296 */
297void
298xfs_iunlock(
299 xfs_inode_t *ip,
300 uint lock_flags)
301{
302 /*
303 * You can't set both SHARED and EXCL for the same lock,
304 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
305 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
306 */
307 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
308 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
Dave Chinner653c60b2015-02-23 21:43:37 +1100309 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
310 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100311 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
312 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
Dave Chinner0952c812015-08-19 10:32:49 +1000313 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100314 ASSERT(lock_flags != 0);
315
316 if (lock_flags & XFS_IOLOCK_EXCL)
Christoph Hellwig65523212016-11-30 14:33:25 +1100317 up_write(&VFS_I(ip)->i_rwsem);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100318 else if (lock_flags & XFS_IOLOCK_SHARED)
Christoph Hellwig65523212016-11-30 14:33:25 +1100319 up_read(&VFS_I(ip)->i_rwsem);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100320
Dave Chinner653c60b2015-02-23 21:43:37 +1100321 if (lock_flags & XFS_MMAPLOCK_EXCL)
322 mrunlock_excl(&ip->i_mmaplock);
323 else if (lock_flags & XFS_MMAPLOCK_SHARED)
324 mrunlock_shared(&ip->i_mmaplock);
325
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100326 if (lock_flags & XFS_ILOCK_EXCL)
327 mrunlock_excl(&ip->i_lock);
328 else if (lock_flags & XFS_ILOCK_SHARED)
329 mrunlock_shared(&ip->i_lock);
330
331 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
332}
333
334/*
335 * give up write locks. the i/o lock cannot be held nested
336 * if it is being demoted.
337 */
338void
339xfs_ilock_demote(
340 xfs_inode_t *ip,
341 uint lock_flags)
342{
Dave Chinner653c60b2015-02-23 21:43:37 +1100343 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
344 ASSERT((lock_flags &
345 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100346
347 if (lock_flags & XFS_ILOCK_EXCL)
348 mrdemote(&ip->i_lock);
Dave Chinner653c60b2015-02-23 21:43:37 +1100349 if (lock_flags & XFS_MMAPLOCK_EXCL)
350 mrdemote(&ip->i_mmaplock);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100351 if (lock_flags & XFS_IOLOCK_EXCL)
Christoph Hellwig65523212016-11-30 14:33:25 +1100352 downgrade_write(&VFS_I(ip)->i_rwsem);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100353
354 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
355}
356
Dave Chinner742ae1e2013-04-30 21:39:34 +1000357#if defined(DEBUG) || defined(XFS_WARN)
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100358int
359xfs_isilocked(
360 xfs_inode_t *ip,
361 uint lock_flags)
362{
363 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
364 if (!(lock_flags & XFS_ILOCK_SHARED))
365 return !!ip->i_lock.mr_writer;
366 return rwsem_is_locked(&ip->i_lock.mr_lock);
367 }
368
Dave Chinner653c60b2015-02-23 21:43:37 +1100369 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
370 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
371 return !!ip->i_mmaplock.mr_writer;
372 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
373 }
374
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100375 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
376 if (!(lock_flags & XFS_IOLOCK_SHARED))
Christoph Hellwig65523212016-11-30 14:33:25 +1100377 return !debug_locks ||
378 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
379 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100380 }
381
382 ASSERT(0);
383 return 0;
384}
385#endif
386
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000387#ifdef DEBUG
388int xfs_locked_n;
389int xfs_small_retries;
390int xfs_middle_retries;
391int xfs_lots_retries;
392int xfs_lock_delays;
393#endif
394
Dave Chinnerb6a99472015-08-25 10:05:13 +1000395/*
396 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
397 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
398 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
399 * errors and warnings.
400 */
401#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
Dave Chinner3403ccc2015-08-20 09:27:49 +1000402static bool
403xfs_lockdep_subclass_ok(
404 int subclass)
405{
406 return subclass < MAX_LOCKDEP_SUBCLASSES;
407}
408#else
409#define xfs_lockdep_subclass_ok(subclass) (true)
410#endif
411
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000412/*
Dave Chinner653c60b2015-02-23 21:43:37 +1100413 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
Dave Chinner0952c812015-08-19 10:32:49 +1000414 * value. This can be called for any type of inode lock combination, including
415 * parent locking. Care must be taken to ensure we don't overrun the subclass
416 * storage fields in the class mask we build.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000417 */
418static inline int
419xfs_lock_inumorder(int lock_mode, int subclass)
420{
Dave Chinner0952c812015-08-19 10:32:49 +1000421 int class = 0;
422
423 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
424 XFS_ILOCK_RTSUM)));
Dave Chinner3403ccc2015-08-20 09:27:49 +1000425 ASSERT(xfs_lockdep_subclass_ok(subclass));
Dave Chinner0952c812015-08-19 10:32:49 +1000426
Dave Chinner653c60b2015-02-23 21:43:37 +1100427 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
Dave Chinner0952c812015-08-19 10:32:49 +1000428 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
Dave Chinner0952c812015-08-19 10:32:49 +1000429 class += subclass << XFS_IOLOCK_SHIFT;
Dave Chinner653c60b2015-02-23 21:43:37 +1100430 }
431
432 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
Dave Chinner0952c812015-08-19 10:32:49 +1000433 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
434 class += subclass << XFS_MMAPLOCK_SHIFT;
Dave Chinner653c60b2015-02-23 21:43:37 +1100435 }
436
Dave Chinner0952c812015-08-19 10:32:49 +1000437 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
438 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
439 class += subclass << XFS_ILOCK_SHIFT;
440 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000441
Dave Chinner0952c812015-08-19 10:32:49 +1000442 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000443}
444
445/*
Dave Chinner95afcf52015-03-25 14:03:32 +1100446 * The following routine will lock n inodes in exclusive mode. We assume the
447 * caller calls us with the inodes in i_ino order.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000448 *
Dave Chinner95afcf52015-03-25 14:03:32 +1100449 * We need to detect deadlock where an inode that we lock is in the AIL and we
450 * start waiting for another inode that is locked by a thread in a long running
451 * transaction (such as truncate). This can result in deadlock since the long
452 * running trans might need to wait for the inode we just locked in order to
453 * push the tail and free space in the log.
Dave Chinner0952c812015-08-19 10:32:49 +1000454 *
455 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
456 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
457 * lock more than one at a time, lockdep will report false positives saying we
458 * have violated locking orders.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000459 */
Eric Sandeen0d5a75e2016-06-01 17:38:15 +1000460static void
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000461xfs_lock_inodes(
462 xfs_inode_t **ips,
463 int inodes,
464 uint lock_mode)
465{
466 int attempts = 0, i, j, try_lock;
467 xfs_log_item_t *lp;
468
Dave Chinner0952c812015-08-19 10:32:49 +1000469 /*
470 * Currently supports between 2 and 5 inodes with exclusive locking. We
471 * support an arbitrary depth of locking here, but absolute limits on
472 * inodes depend on the the type of locking and the limits placed by
473 * lockdep annotations in xfs_lock_inumorder. These are all checked by
474 * the asserts.
475 */
Dave Chinner95afcf52015-03-25 14:03:32 +1100476 ASSERT(ips && inodes >= 2 && inodes <= 5);
Dave Chinner0952c812015-08-19 10:32:49 +1000477 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
478 XFS_ILOCK_EXCL));
479 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
480 XFS_ILOCK_SHARED)));
Dave Chinner0952c812015-08-19 10:32:49 +1000481 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
482 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
483 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
484 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
485
486 if (lock_mode & XFS_IOLOCK_EXCL) {
487 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
488 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
489 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000490
491 try_lock = 0;
492 i = 0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000493again:
494 for (; i < inodes; i++) {
495 ASSERT(ips[i]);
496
Dave Chinner95afcf52015-03-25 14:03:32 +1100497 if (i && (ips[i] == ips[i - 1])) /* Already locked */
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000498 continue;
499
500 /*
Dave Chinner95afcf52015-03-25 14:03:32 +1100501 * If try_lock is not set yet, make sure all locked inodes are
502 * not in the AIL. If any are, set try_lock to be used later.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000503 */
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000504 if (!try_lock) {
505 for (j = (i - 1); j >= 0 && !try_lock; j--) {
506 lp = (xfs_log_item_t *)ips[j]->i_itemp;
Dave Chinner95afcf52015-03-25 14:03:32 +1100507 if (lp && (lp->li_flags & XFS_LI_IN_AIL))
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000508 try_lock++;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000509 }
510 }
511
512 /*
513 * If any of the previous locks we have locked is in the AIL,
514 * we must TRY to get the second and subsequent locks. If
515 * we can't get any, we must release all we have
516 * and try again.
517 */
Dave Chinner95afcf52015-03-25 14:03:32 +1100518 if (!try_lock) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000519 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
Dave Chinner95afcf52015-03-25 14:03:32 +1100520 continue;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000521 }
Dave Chinner95afcf52015-03-25 14:03:32 +1100522
523 /* try_lock means we have an inode locked that is in the AIL. */
524 ASSERT(i != 0);
525 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
526 continue;
527
528 /*
529 * Unlock all previous guys and try again. xfs_iunlock will try
530 * to push the tail if the inode is in the AIL.
531 */
532 attempts++;
533 for (j = i - 1; j >= 0; j--) {
534 /*
535 * Check to see if we've already unlocked this one. Not
536 * the first one going back, and the inode ptr is the
537 * same.
538 */
539 if (j != (i - 1) && ips[j] == ips[j + 1])
540 continue;
541
542 xfs_iunlock(ips[j], lock_mode);
543 }
544
545 if ((attempts % 5) == 0) {
546 delay(1); /* Don't just spin the CPU */
547#ifdef DEBUG
548 xfs_lock_delays++;
549#endif
550 }
551 i = 0;
552 try_lock = 0;
553 goto again;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000554 }
555
556#ifdef DEBUG
557 if (attempts) {
558 if (attempts < 5) xfs_small_retries++;
559 else if (attempts < 100) xfs_middle_retries++;
560 else xfs_lots_retries++;
561 } else {
562 xfs_locked_n++;
563 }
564#endif
565}
566
567/*
Dave Chinner653c60b2015-02-23 21:43:37 +1100568 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
569 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
570 * lock more than one at a time, lockdep will report false positives saying we
571 * have violated locking orders.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000572 */
573void
574xfs_lock_two_inodes(
575 xfs_inode_t *ip0,
576 xfs_inode_t *ip1,
577 uint lock_mode)
578{
579 xfs_inode_t *temp;
580 int attempts = 0;
581 xfs_log_item_t *lp;
582
Christoph Hellwig65523212016-11-30 14:33:25 +1100583 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
584 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
Dave Chinner653c60b2015-02-23 21:43:37 +1100585 ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
586
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000587 ASSERT(ip0->i_ino != ip1->i_ino);
588
589 if (ip0->i_ino > ip1->i_ino) {
590 temp = ip0;
591 ip0 = ip1;
592 ip1 = temp;
593 }
594
595 again:
596 xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
597
598 /*
599 * If the first lock we have locked is in the AIL, we must TRY to get
600 * the second lock. If we can't get it, we must release the first one
601 * and try again.
602 */
603 lp = (xfs_log_item_t *)ip0->i_itemp;
604 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
605 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
606 xfs_iunlock(ip0, lock_mode);
607 if ((++attempts % 5) == 0)
608 delay(1); /* Don't just spin the CPU */
609 goto again;
610 }
611 } else {
612 xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
613 }
614}
615
616
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100617void
618__xfs_iflock(
619 struct xfs_inode *ip)
620{
621 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
622 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
623
624 do {
Ingo Molnar21417132017-03-05 11:25:39 +0100625 prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100626 if (xfs_isiflocked(ip))
627 io_schedule();
628 } while (!xfs_iflock_nowait(ip));
629
Ingo Molnar21417132017-03-05 11:25:39 +0100630 finish_wait(wq, &wait.wq_entry);
Dave Chinnerfa96aca2012-10-08 21:56:10 +1100631}
632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633STATIC uint
634_xfs_dic2xflags(
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700635 uint16_t di_flags,
Dave Chinner58f88ca2016-01-04 16:44:15 +1100636 uint64_t di_flags2,
637 bool has_attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638{
639 uint flags = 0;
640
641 if (di_flags & XFS_DIFLAG_ANY) {
642 if (di_flags & XFS_DIFLAG_REALTIME)
Dave Chinnere7b89482016-01-04 16:44:15 +1100643 flags |= FS_XFLAG_REALTIME;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 if (di_flags & XFS_DIFLAG_PREALLOC)
Dave Chinnere7b89482016-01-04 16:44:15 +1100645 flags |= FS_XFLAG_PREALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 if (di_flags & XFS_DIFLAG_IMMUTABLE)
Dave Chinnere7b89482016-01-04 16:44:15 +1100647 flags |= FS_XFLAG_IMMUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 if (di_flags & XFS_DIFLAG_APPEND)
Dave Chinnere7b89482016-01-04 16:44:15 +1100649 flags |= FS_XFLAG_APPEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 if (di_flags & XFS_DIFLAG_SYNC)
Dave Chinnere7b89482016-01-04 16:44:15 +1100651 flags |= FS_XFLAG_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 if (di_flags & XFS_DIFLAG_NOATIME)
Dave Chinnere7b89482016-01-04 16:44:15 +1100653 flags |= FS_XFLAG_NOATIME;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 if (di_flags & XFS_DIFLAG_NODUMP)
Dave Chinnere7b89482016-01-04 16:44:15 +1100655 flags |= FS_XFLAG_NODUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 if (di_flags & XFS_DIFLAG_RTINHERIT)
Dave Chinnere7b89482016-01-04 16:44:15 +1100657 flags |= FS_XFLAG_RTINHERIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 if (di_flags & XFS_DIFLAG_PROJINHERIT)
Dave Chinnere7b89482016-01-04 16:44:15 +1100659 flags |= FS_XFLAG_PROJINHERIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
Dave Chinnere7b89482016-01-04 16:44:15 +1100661 flags |= FS_XFLAG_NOSYMLINKS;
Nathan Scottdd9f4382006-01-11 15:28:28 +1100662 if (di_flags & XFS_DIFLAG_EXTSIZE)
Dave Chinnere7b89482016-01-04 16:44:15 +1100663 flags |= FS_XFLAG_EXTSIZE;
Nathan Scottdd9f4382006-01-11 15:28:28 +1100664 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
Dave Chinnere7b89482016-01-04 16:44:15 +1100665 flags |= FS_XFLAG_EXTSZINHERIT;
Barry Naujokd3446ea2006-06-09 14:54:19 +1000666 if (di_flags & XFS_DIFLAG_NODEFRAG)
Dave Chinnere7b89482016-01-04 16:44:15 +1100667 flags |= FS_XFLAG_NODEFRAG;
David Chinner2a82b8b2007-07-11 11:09:12 +1000668 if (di_flags & XFS_DIFLAG_FILESTREAM)
Dave Chinnere7b89482016-01-04 16:44:15 +1100669 flags |= FS_XFLAG_FILESTREAM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 }
671
Dave Chinner58f88ca2016-01-04 16:44:15 +1100672 if (di_flags2 & XFS_DIFLAG2_ANY) {
673 if (di_flags2 & XFS_DIFLAG2_DAX)
674 flags |= FS_XFLAG_DAX;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700675 if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
676 flags |= FS_XFLAG_COWEXTSIZE;
Dave Chinner58f88ca2016-01-04 16:44:15 +1100677 }
678
679 if (has_attr)
680 flags |= FS_XFLAG_HASATTR;
681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 return flags;
683}
684
685uint
686xfs_ip2xflags(
Dave Chinner58f88ca2016-01-04 16:44:15 +1100687 struct xfs_inode *ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688{
Dave Chinner58f88ca2016-01-04 16:44:15 +1100689 struct xfs_icdinode *dic = &ip->i_d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Dave Chinner58f88ca2016-01-04 16:44:15 +1100691 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692}
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694/*
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000695 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
696 * is allowed, otherwise it has to be an exact match. If a CI match is found,
697 * ci_name->name will point to a the actual name (caller must free) or
698 * will be set to NULL if an exact match is found.
699 */
700int
701xfs_lookup(
702 xfs_inode_t *dp,
703 struct xfs_name *name,
704 xfs_inode_t **ipp,
705 struct xfs_name *ci_name)
706{
707 xfs_ino_t inum;
708 int error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000709
710 trace_xfs_lookup(dp, name);
711
712 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
Dave Chinner24513372014-06-25 14:58:08 +1000713 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000714
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000715 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000716 if (error)
Dave Chinnerdbad7c92015-08-19 10:33:00 +1000717 goto out_unlock;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000718
719 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
720 if (error)
721 goto out_free_name;
722
723 return 0;
724
725out_free_name:
726 if (ci_name)
727 kmem_free(ci_name->name);
Dave Chinnerdbad7c92015-08-19 10:33:00 +1000728out_unlock:
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000729 *ipp = NULL;
730 return error;
731}
732
733/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 * Allocate an inode on disk and return a copy of its in-core version.
735 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
736 * appropriately within the inode. The uid and gid for the inode are
737 * set according to the contents of the given cred structure.
738 *
739 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
Carlos Maiolinocd856db2012-10-20 11:08:19 -0300740 * has a free inode available, call xfs_iget() to obtain the in-core
741 * version of the allocated inode. Finally, fill in the inode and
742 * log its initial contents. In this case, ialloc_context would be
743 * set to NULL.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 *
Carlos Maiolinocd856db2012-10-20 11:08:19 -0300745 * If xfs_dialloc() does not have an available inode, it will replenish
746 * its supply by doing an allocation. Since we can only do one
747 * allocation within a transaction without deadlocks, we must commit
748 * the current transaction before returning the inode itself.
749 * In this case, therefore, we will set ialloc_context and return.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 * The caller should then commit the current transaction, start a new
751 * transaction, and call xfs_ialloc() again to actually get the inode.
752 *
753 * To ensure that some other process does not grab the inode that
754 * was allocated during the first call to xfs_ialloc(), this routine
755 * also returns the [locked] bp pointing to the head of the freelist
756 * as ialloc_context. The caller should hold this buffer across
757 * the commit and pass it back into this routine on the second call.
David Chinnerb11f94d2007-07-11 11:09:33 +1000758 *
759 * If we are allocating quota inodes, we do not have a parent inode
760 * to attach to or associate with (i.e. pip == NULL) because they
761 * are not linked into the directory structure - they are attached
762 * directly to the superblock - and so have no parent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 */
Eric Sandeen0d5a75e2016-06-01 17:38:15 +1000764static int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765xfs_ialloc(
766 xfs_trans_t *tp,
767 xfs_inode_t *pip,
Al Viro576b1d62011-07-26 02:50:15 -0400768 umode_t mode,
Nathan Scott31b084a2005-05-05 13:25:00 -0700769 xfs_nlink_t nlink,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 xfs_dev_t rdev,
Arkadiusz Mi?kiewicz67430992010-09-26 06:10:18 +0000771 prid_t prid,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 int okalloc,
773 xfs_buf_t **ialloc_context,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 xfs_inode_t **ipp)
775{
Christoph Hellwig93848a92013-04-03 16:11:17 +1100776 struct xfs_mount *mp = tp->t_mountp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 xfs_ino_t ino;
778 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 uint flags;
780 int error;
Dave Chinnere076b0f2014-10-02 09:18:13 +1000781 struct timespec tv;
Dave Chinner39878482016-02-09 16:54:58 +1100782 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
784 /*
785 * Call the space management code to pick
786 * the on-disk inode to be allocated.
787 */
David Chinnerb11f94d2007-07-11 11:09:33 +1000788 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
Christoph Hellwig08358902012-07-04 10:54:47 -0400789 ialloc_context, &ino);
David Chinnerbf904242008-10-30 17:36:14 +1100790 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 return error;
Christoph Hellwig08358902012-07-04 10:54:47 -0400792 if (*ialloc_context || ino == NULLFSINO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 *ipp = NULL;
794 return 0;
795 }
796 ASSERT(*ialloc_context == NULL);
797
798 /*
799 * Get the in-core inode with the lock held exclusively.
800 * This is because we're setting fields here we need
801 * to prevent others from looking at until we're done.
802 */
Christoph Hellwig93848a92013-04-03 16:11:17 +1100803 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
Christoph Hellwigec3ba852011-02-13 13:26:42 +0000804 XFS_ILOCK_EXCL, &ip);
David Chinnerbf904242008-10-30 17:36:14 +1100805 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 ASSERT(ip != NULL);
Dave Chinner39878482016-02-09 16:54:58 +1100808 inode = VFS_I(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809
Dave Chinner263997a2014-05-20 07:46:40 +1000810 /*
811 * We always convert v1 inodes to v2 now - we only support filesystems
812 * with >= v2 inode capability, so there is no reason for ever leaving
813 * an inode in v1 format.
814 */
815 if (ip->i_d.di_version == 1)
816 ip->i_d.di_version = 2;
817
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100818 inode->i_mode = mode;
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100819 set_nlink(inode, nlink);
Dwight Engen7aab1b22013-08-15 14:08:01 -0400820 ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
821 ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
Arkadiusz Mi?kiewicz67430992010-09-26 06:10:18 +0000822 xfs_set_projid(ip, prid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Christoph Hellwigbd186aa2007-08-30 17:21:12 +1000824 if (pip && XFS_INHERIT_GID(pip)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 ip->i_d.di_gid = pip->i_d.di_gid;
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100826 if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
827 inode->i_mode |= S_ISGID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 }
829
830 /*
831 * If the group ID of the new file does not match the effective group
832 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
833 * (and only if the irix_sgid_inherit compatibility variable is set).
834 */
835 if ((irix_sgid_inherit) &&
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100836 (inode->i_mode & S_ISGID) &&
837 (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
838 inode->i_mode &= ~S_ISGID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
840 ip->i_d.di_size = 0;
841 ip->i_d.di_nextents = 0;
842 ASSERT(ip->i_d.di_nblocks == 0);
Christoph Hellwigdff35fd2008-08-13 16:44:15 +1000843
Deepa Dinamanic2050a42016-09-14 07:48:06 -0700844 tv = current_time(inode);
Dave Chinner39878482016-02-09 16:54:58 +1100845 inode->i_mtime = tv;
846 inode->i_atime = tv;
847 inode->i_ctime = tv;
Christoph Hellwigdff35fd2008-08-13 16:44:15 +1000848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 ip->i_d.di_extsize = 0;
850 ip->i_d.di_dmevmask = 0;
851 ip->i_d.di_dmstate = 0;
852 ip->i_d.di_flags = 0;
Christoph Hellwig93848a92013-04-03 16:11:17 +1100853
854 if (ip->i_d.di_version == 3) {
Dave Chinner83e06f22016-02-09 16:54:58 +1100855 inode->i_version = 1;
Christoph Hellwig93848a92013-04-03 16:11:17 +1100856 ip->i_d.di_flags2 = 0;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700857 ip->i_d.di_cowextsize = 0;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700858 ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
859 ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
Christoph Hellwig93848a92013-04-03 16:11:17 +1100860 }
861
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 flags = XFS_ILOG_CORE;
864 switch (mode & S_IFMT) {
865 case S_IFIFO:
866 case S_IFCHR:
867 case S_IFBLK:
868 case S_IFSOCK:
869 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
870 ip->i_df.if_u2.if_rdev = rdev;
871 ip->i_df.if_flags = 0;
872 flags |= XFS_ILOG_DEV;
873 break;
874 case S_IFREG:
875 case S_IFDIR:
David Chinnerb11f94d2007-07-11 11:09:33 +1000876 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
Dave Chinner58f88ca2016-01-04 16:44:15 +1100877 uint di_flags = 0;
Nathan Scott365ca832005-06-21 15:39:12 +1000878
Al Viroabbede12011-07-26 02:31:30 -0400879 if (S_ISDIR(mode)) {
Nathan Scott365ca832005-06-21 15:39:12 +1000880 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
881 di_flags |= XFS_DIFLAG_RTINHERIT;
Nathan Scottdd9f4382006-01-11 15:28:28 +1100882 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
883 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
884 ip->i_d.di_extsize = pip->i_d.di_extsize;
885 }
Dave Chinner9336e3a2014-10-02 09:18:40 +1000886 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
887 di_flags |= XFS_DIFLAG_PROJINHERIT;
Al Viroabbede12011-07-26 02:31:30 -0400888 } else if (S_ISREG(mode)) {
Christoph Hellwig613d7042007-10-11 17:44:08 +1000889 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
Nathan Scott365ca832005-06-21 15:39:12 +1000890 di_flags |= XFS_DIFLAG_REALTIME;
Nathan Scottdd9f4382006-01-11 15:28:28 +1100891 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
892 di_flags |= XFS_DIFLAG_EXTSIZE;
893 ip->i_d.di_extsize = pip->i_d.di_extsize;
894 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 }
896 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
897 xfs_inherit_noatime)
Nathan Scott365ca832005-06-21 15:39:12 +1000898 di_flags |= XFS_DIFLAG_NOATIME;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
900 xfs_inherit_nodump)
Nathan Scott365ca832005-06-21 15:39:12 +1000901 di_flags |= XFS_DIFLAG_NODUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
903 xfs_inherit_sync)
Nathan Scott365ca832005-06-21 15:39:12 +1000904 di_flags |= XFS_DIFLAG_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
906 xfs_inherit_nosymlinks)
Nathan Scott365ca832005-06-21 15:39:12 +1000907 di_flags |= XFS_DIFLAG_NOSYMLINKS;
Barry Naujokd3446ea2006-06-09 14:54:19 +1000908 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
909 xfs_inherit_nodefrag)
910 di_flags |= XFS_DIFLAG_NODEFRAG;
David Chinner2a82b8b2007-07-11 11:09:12 +1000911 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
912 di_flags |= XFS_DIFLAG_FILESTREAM;
Dave Chinner58f88ca2016-01-04 16:44:15 +1100913
Nathan Scott365ca832005-06-21 15:39:12 +1000914 ip->i_d.di_flags |= di_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 }
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700916 if (pip &&
917 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
918 pip->i_d.di_version == 3 &&
919 ip->i_d.di_version == 3) {
Lukas Czerner56bdf852017-08-03 13:19:13 -0700920 uint64_t di_flags2 = 0;
921
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700922 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
Lukas Czerner56bdf852017-08-03 13:19:13 -0700923 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700924 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
925 }
Lukas Czerner56bdf852017-08-03 13:19:13 -0700926 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
927 di_flags2 |= XFS_DIFLAG2_DAX;
928
929 ip->i_d.di_flags2 |= di_flags2;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700930 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 /* FALLTHROUGH */
932 case S_IFLNK:
933 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
934 ip->i_df.if_flags = XFS_IFEXTENTS;
935 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
936 ip->i_df.if_u1.if_extents = NULL;
937 break;
938 default:
939 ASSERT(0);
940 }
941 /*
942 * Attribute fork settings for new inode.
943 */
944 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
945 ip->i_d.di_anextents = 0;
946
947 /*
948 * Log the new values stuffed into the inode.
949 */
Christoph Hellwigddc34152011-09-19 15:00:54 +0000950 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 xfs_trans_log_inode(tp, ip, flags);
952
Dave Chinner58c90472015-02-23 22:38:08 +1100953 /* now that we have an i_mode we can setup the inode structure */
Christoph Hellwig41be8be2008-08-13 16:23:13 +1000954 xfs_setup_inode(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
956 *ipp = ip;
957 return 0;
958}
959
Dave Chinnere546cb72013-08-12 20:49:47 +1000960/*
961 * Allocates a new inode from disk and return a pointer to the
962 * incore copy. This routine will internally commit the current
963 * transaction and allocate a new one if the Space Manager needed
964 * to do an allocation to replenish the inode free-list.
965 *
966 * This routine is designed to be called from xfs_create and
967 * xfs_create_dir.
968 *
969 */
970int
971xfs_dir_ialloc(
972 xfs_trans_t **tpp, /* input: current transaction;
973 output: may be a new transaction. */
974 xfs_inode_t *dp, /* directory within whose allocate
975 the inode. */
976 umode_t mode,
977 xfs_nlink_t nlink,
978 xfs_dev_t rdev,
979 prid_t prid, /* project id */
980 int okalloc, /* ok to allocate new space */
981 xfs_inode_t **ipp, /* pointer to inode; it will be
982 locked. */
983 int *committed)
984
985{
986 xfs_trans_t *tp;
Dave Chinnere546cb72013-08-12 20:49:47 +1000987 xfs_inode_t *ip;
988 xfs_buf_t *ialloc_context = NULL;
989 int code;
Dave Chinnere546cb72013-08-12 20:49:47 +1000990 void *dqinfo;
991 uint tflags;
992
993 tp = *tpp;
994 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
995
996 /*
997 * xfs_ialloc will return a pointer to an incore inode if
998 * the Space Manager has an available inode on the free
999 * list. Otherwise, it will do an allocation and replenish
1000 * the freelist. Since we can only do one allocation per
1001 * transaction without deadlocks, we will need to commit the
1002 * current transaction and start a new one. We will then
1003 * need to call xfs_ialloc again to get the inode.
1004 *
1005 * If xfs_ialloc did an allocation to replenish the freelist,
1006 * it returns the bp containing the head of the freelist as
1007 * ialloc_context. We will hold a lock on it across the
1008 * transaction commit so that no other process can steal
1009 * the inode(s) that we've just allocated.
1010 */
1011 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
1012 &ialloc_context, &ip);
1013
1014 /*
1015 * Return an error if we were unable to allocate a new inode.
1016 * This should only happen if we run out of space on disk or
1017 * encounter a disk error.
1018 */
1019 if (code) {
1020 *ipp = NULL;
1021 return code;
1022 }
1023 if (!ialloc_context && !ip) {
1024 *ipp = NULL;
Dave Chinner24513372014-06-25 14:58:08 +10001025 return -ENOSPC;
Dave Chinnere546cb72013-08-12 20:49:47 +10001026 }
1027
1028 /*
1029 * If the AGI buffer is non-NULL, then we were unable to get an
1030 * inode in one operation. We need to commit the current
1031 * transaction and call xfs_ialloc() again. It is guaranteed
1032 * to succeed the second time.
1033 */
1034 if (ialloc_context) {
1035 /*
1036 * Normally, xfs_trans_commit releases all the locks.
1037 * We call bhold to hang on to the ialloc_context across
1038 * the commit. Holding this buffer prevents any other
1039 * processes from doing any allocations in this
1040 * allocation group.
1041 */
1042 xfs_trans_bhold(tp, ialloc_context);
Dave Chinnere546cb72013-08-12 20:49:47 +10001043
1044 /*
1045 * We want the quota changes to be associated with the next
1046 * transaction, NOT this one. So, detach the dqinfo from this
1047 * and attach it to the next transaction.
1048 */
1049 dqinfo = NULL;
1050 tflags = 0;
1051 if (tp->t_dqinfo) {
1052 dqinfo = (void *)tp->t_dqinfo;
1053 tp->t_dqinfo = NULL;
1054 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1055 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1056 }
1057
Christoph Hellwig411350d2017-08-28 10:21:03 -07001058 code = xfs_trans_roll(&tp);
Christoph Hellwig2e6db6c2015-06-04 13:47:29 +10001059 if (committed != NULL)
Dave Chinnere546cb72013-08-12 20:49:47 +10001060 *committed = 1;
Jie Liu3d3c8b52013-08-12 20:49:59 +10001061
Dave Chinnere546cb72013-08-12 20:49:47 +10001062 /*
1063 * Re-attach the quota info that we detached from prev trx.
1064 */
1065 if (dqinfo) {
1066 tp->t_dqinfo = dqinfo;
1067 tp->t_flags |= tflags;
1068 }
1069
1070 if (code) {
1071 xfs_buf_relse(ialloc_context);
Christoph Hellwig2e6db6c2015-06-04 13:47:29 +10001072 *tpp = tp;
Dave Chinnere546cb72013-08-12 20:49:47 +10001073 *ipp = NULL;
1074 return code;
1075 }
1076 xfs_trans_bjoin(tp, ialloc_context);
1077
1078 /*
1079 * Call ialloc again. Since we've locked out all
1080 * other allocations in this allocation group,
1081 * this call should always succeed.
1082 */
1083 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1084 okalloc, &ialloc_context, &ip);
1085
1086 /*
1087 * If we get an error at this point, return to the caller
1088 * so that the current transaction can be aborted.
1089 */
1090 if (code) {
1091 *tpp = tp;
1092 *ipp = NULL;
1093 return code;
1094 }
1095 ASSERT(!ialloc_context && ip);
1096
1097 } else {
1098 if (committed != NULL)
1099 *committed = 0;
1100 }
1101
1102 *ipp = ip;
1103 *tpp = tp;
1104
1105 return 0;
1106}
1107
1108/*
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001109 * Decrement the link count on an inode & log the change. If this causes the
1110 * link count to go to zero, move the inode to AGI unlinked list so that it can
1111 * be freed when the last active reference goes away via xfs_inactive().
Dave Chinnere546cb72013-08-12 20:49:47 +10001112 */
Eric Sandeen0d5a75e2016-06-01 17:38:15 +10001113static int /* error */
Dave Chinnere546cb72013-08-12 20:49:47 +10001114xfs_droplink(
1115 xfs_trans_t *tp,
1116 xfs_inode_t *ip)
1117{
Dave Chinnere546cb72013-08-12 20:49:47 +10001118 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1119
Dave Chinnere546cb72013-08-12 20:49:47 +10001120 drop_nlink(VFS_I(ip));
1121 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1122
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001123 if (VFS_I(ip)->i_nlink)
1124 return 0;
1125
1126 return xfs_iunlink(tp, ip);
Dave Chinnere546cb72013-08-12 20:49:47 +10001127}
1128
1129/*
Dave Chinnere546cb72013-08-12 20:49:47 +10001130 * Increment the link count on an inode & log the change.
1131 */
Eric Sandeen0d5a75e2016-06-01 17:38:15 +10001132static int
Dave Chinnere546cb72013-08-12 20:49:47 +10001133xfs_bumplink(
1134 xfs_trans_t *tp,
1135 xfs_inode_t *ip)
1136{
1137 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1138
Dave Chinner263997a2014-05-20 07:46:40 +10001139 ASSERT(ip->i_d.di_version > 1);
Dave Chinnere546cb72013-08-12 20:49:47 +10001140 inc_nlink(VFS_I(ip));
Dave Chinnere546cb72013-08-12 20:49:47 +10001141 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1142 return 0;
1143}
1144
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001145int
1146xfs_create(
1147 xfs_inode_t *dp,
1148 struct xfs_name *name,
1149 umode_t mode,
1150 xfs_dev_t rdev,
1151 xfs_inode_t **ipp)
1152{
1153 int is_dir = S_ISDIR(mode);
1154 struct xfs_mount *mp = dp->i_mount;
1155 struct xfs_inode *ip = NULL;
1156 struct xfs_trans *tp = NULL;
1157 int error;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001158 struct xfs_defer_ops dfops;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001159 xfs_fsblock_t first_block;
1160 bool unlock_dp_on_error = false;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001161 prid_t prid;
1162 struct xfs_dquot *udqp = NULL;
1163 struct xfs_dquot *gdqp = NULL;
1164 struct xfs_dquot *pdqp = NULL;
Brian Foster062647a2014-11-28 14:00:16 +11001165 struct xfs_trans_res *tres;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001166 uint resblks;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001167
1168 trace_xfs_create(dp, name);
1169
1170 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10001171 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001172
Zhi Yong Wu163467d2013-12-18 08:22:39 +08001173 prid = xfs_get_initial_prid(dp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001174
1175 /*
1176 * Make sure that we have allocated dquot(s) on disk.
1177 */
Dwight Engen7aab1b22013-08-15 14:08:01 -04001178 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1179 xfs_kgid_to_gid(current_fsgid()), prid,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001180 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1181 &udqp, &gdqp, &pdqp);
1182 if (error)
1183 return error;
1184
1185 if (is_dir) {
1186 rdev = 0;
1187 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
Brian Foster062647a2014-11-28 14:00:16 +11001188 tres = &M_RES(mp)->tr_mkdir;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001189 } else {
1190 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
Brian Foster062647a2014-11-28 14:00:16 +11001191 tres = &M_RES(mp)->tr_create;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001192 }
1193
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001194 /*
1195 * Initially assume that the file does not exist and
1196 * reserve the resources for that case. If that is not
1197 * the case we'll drop the one we have and get a more
1198 * appropriate transaction later.
1199 */
Christoph Hellwig253f4912016-04-06 09:19:55 +10001200 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
Dave Chinner24513372014-06-25 14:58:08 +10001201 if (error == -ENOSPC) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001202 /* flush outstanding delalloc blocks and retry */
1203 xfs_flush_inodes(mp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10001204 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001205 }
Dave Chinner24513372014-06-25 14:58:08 +10001206 if (error == -ENOSPC) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001207 /* No space at all so try a "no-allocation" reservation */
1208 resblks = 0;
Christoph Hellwig253f4912016-04-06 09:19:55 +10001209 error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001210 }
Christoph Hellwig4906e212015-06-04 13:47:56 +10001211 if (error)
Christoph Hellwig253f4912016-04-06 09:19:55 +10001212 goto out_release_inode;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001213
Christoph Hellwig65523212016-11-30 14:33:25 +11001214 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001215 unlock_dp_on_error = true;
1216
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001217 xfs_defer_init(&dfops, &first_block);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001218
1219 /*
1220 * Reserve disk quota and the inode.
1221 */
1222 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1223 pdqp, resblks, 1, 0);
1224 if (error)
1225 goto out_trans_cancel;
1226
Eric Sandeen94f3cad2014-09-09 11:57:52 +10001227 if (!resblks) {
1228 error = xfs_dir_canenter(tp, dp, name);
1229 if (error)
1230 goto out_trans_cancel;
1231 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001232
1233 /*
1234 * A newly created regular or special file just has one directory
1235 * entry pointing to them, but a directory also the "." entry
1236 * pointing to itself.
1237 */
1238 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001239 prid, resblks > 0, &ip, NULL);
Jan Karad6077aa2015-07-29 11:52:08 +10001240 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001241 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001242
1243 /*
1244 * Now we join the directory inode to the transaction. We do not do it
1245 * earlier because xfs_dir_ialloc might commit the previous transaction
1246 * (and release all the locks). An error from here on will result in
1247 * the transaction cancel unlocking dp so don't do it explicitly in the
1248 * error path.
1249 */
Christoph Hellwig65523212016-11-30 14:33:25 +11001250 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001251 unlock_dp_on_error = false;
1252
1253 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001254 &first_block, &dfops, resblks ?
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001255 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1256 if (error) {
Dave Chinner24513372014-06-25 14:58:08 +10001257 ASSERT(error != -ENOSPC);
Christoph Hellwig4906e212015-06-04 13:47:56 +10001258 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001259 }
1260 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1261 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1262
1263 if (is_dir) {
1264 error = xfs_dir_init(tp, ip, dp);
1265 if (error)
1266 goto out_bmap_cancel;
1267
1268 error = xfs_bumplink(tp, dp);
1269 if (error)
1270 goto out_bmap_cancel;
1271 }
1272
1273 /*
1274 * If this is a synchronous mount, make sure that the
1275 * create transaction goes to disk before returning to
1276 * the user.
1277 */
1278 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1279 xfs_trans_set_sync(tp);
1280
1281 /*
1282 * Attach the dquot(s) to the inodes and modify them incore.
1283 * These ids of the inode couldn't have changed since the new
1284 * inode has been locked ever since it was created.
1285 */
1286 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1287
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001288 error = xfs_defer_finish(&tp, &dfops, NULL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001289 if (error)
1290 goto out_bmap_cancel;
1291
Christoph Hellwig70393312015-06-04 13:48:08 +10001292 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001293 if (error)
1294 goto out_release_inode;
1295
1296 xfs_qm_dqrele(udqp);
1297 xfs_qm_dqrele(gdqp);
1298 xfs_qm_dqrele(pdqp);
1299
1300 *ipp = ip;
1301 return 0;
1302
1303 out_bmap_cancel:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001304 xfs_defer_cancel(&dfops);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001305 out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001306 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001307 out_release_inode:
1308 /*
Dave Chinner58c90472015-02-23 22:38:08 +11001309 * Wait until after the current transaction is aborted to finish the
1310 * setup of the inode and release the inode. This prevents recursive
1311 * transactions and deadlocks from xfs_inactive.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001312 */
Dave Chinner58c90472015-02-23 22:38:08 +11001313 if (ip) {
1314 xfs_finish_inode_setup(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001315 IRELE(ip);
Dave Chinner58c90472015-02-23 22:38:08 +11001316 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001317
1318 xfs_qm_dqrele(udqp);
1319 xfs_qm_dqrele(gdqp);
1320 xfs_qm_dqrele(pdqp);
1321
1322 if (unlock_dp_on_error)
Christoph Hellwig65523212016-11-30 14:33:25 +11001323 xfs_iunlock(dp, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001324 return error;
1325}
1326
1327int
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001328xfs_create_tmpfile(
1329 struct xfs_inode *dp,
1330 struct dentry *dentry,
Brian Foster330033d2014-04-17 08:15:30 +10001331 umode_t mode,
1332 struct xfs_inode **ipp)
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001333{
1334 struct xfs_mount *mp = dp->i_mount;
1335 struct xfs_inode *ip = NULL;
1336 struct xfs_trans *tp = NULL;
1337 int error;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001338 prid_t prid;
1339 struct xfs_dquot *udqp = NULL;
1340 struct xfs_dquot *gdqp = NULL;
1341 struct xfs_dquot *pdqp = NULL;
1342 struct xfs_trans_res *tres;
1343 uint resblks;
1344
1345 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10001346 return -EIO;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001347
1348 prid = xfs_get_initial_prid(dp);
1349
1350 /*
1351 * Make sure that we have allocated dquot(s) on disk.
1352 */
1353 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1354 xfs_kgid_to_gid(current_fsgid()), prid,
1355 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1356 &udqp, &gdqp, &pdqp);
1357 if (error)
1358 return error;
1359
1360 resblks = XFS_IALLOC_SPACE_RES(mp);
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001361 tres = &M_RES(mp)->tr_create_tmpfile;
Christoph Hellwig253f4912016-04-06 09:19:55 +10001362
1363 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
Dave Chinner24513372014-06-25 14:58:08 +10001364 if (error == -ENOSPC) {
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001365 /* No space at all so try a "no-allocation" reservation */
1366 resblks = 0;
Christoph Hellwig253f4912016-04-06 09:19:55 +10001367 error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001368 }
Christoph Hellwig4906e212015-06-04 13:47:56 +10001369 if (error)
Christoph Hellwig253f4912016-04-06 09:19:55 +10001370 goto out_release_inode;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001371
1372 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1373 pdqp, resblks, 1, 0);
1374 if (error)
1375 goto out_trans_cancel;
1376
1377 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0,
1378 prid, resblks > 0, &ip, NULL);
Jan Karad6077aa2015-07-29 11:52:08 +10001379 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001380 goto out_trans_cancel;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001381
1382 if (mp->m_flags & XFS_MOUNT_WSYNC)
1383 xfs_trans_set_sync(tp);
1384
1385 /*
1386 * Attach the dquot(s) to the inodes and modify them incore.
1387 * These ids of the inode couldn't have changed since the new
1388 * inode has been locked ever since it was created.
1389 */
1390 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1391
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001392 error = xfs_iunlink(tp, ip);
1393 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001394 goto out_trans_cancel;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001395
Christoph Hellwig70393312015-06-04 13:48:08 +10001396 error = xfs_trans_commit(tp);
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001397 if (error)
1398 goto out_release_inode;
1399
1400 xfs_qm_dqrele(udqp);
1401 xfs_qm_dqrele(gdqp);
1402 xfs_qm_dqrele(pdqp);
1403
Brian Foster330033d2014-04-17 08:15:30 +10001404 *ipp = ip;
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001405 return 0;
1406
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001407 out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001408 xfs_trans_cancel(tp);
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001409 out_release_inode:
1410 /*
Dave Chinner58c90472015-02-23 22:38:08 +11001411 * Wait until after the current transaction is aborted to finish the
1412 * setup of the inode and release the inode. This prevents recursive
1413 * transactions and deadlocks from xfs_inactive.
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001414 */
Dave Chinner58c90472015-02-23 22:38:08 +11001415 if (ip) {
1416 xfs_finish_inode_setup(ip);
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001417 IRELE(ip);
Dave Chinner58c90472015-02-23 22:38:08 +11001418 }
Zhi Yong Wu99b64362013-12-18 08:22:40 +08001419
1420 xfs_qm_dqrele(udqp);
1421 xfs_qm_dqrele(gdqp);
1422 xfs_qm_dqrele(pdqp);
1423
1424 return error;
1425}
1426
1427int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001428xfs_link(
1429 xfs_inode_t *tdp,
1430 xfs_inode_t *sip,
1431 struct xfs_name *target_name)
1432{
1433 xfs_mount_t *mp = tdp->i_mount;
1434 xfs_trans_t *tp;
1435 int error;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001436 struct xfs_defer_ops dfops;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001437 xfs_fsblock_t first_block;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001438 int resblks;
1439
1440 trace_xfs_link(tdp, target_name);
1441
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001442 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001443
1444 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10001445 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001446
1447 error = xfs_qm_dqattach(sip, 0);
1448 if (error)
1449 goto std_return;
1450
1451 error = xfs_qm_dqattach(tdp, 0);
1452 if (error)
1453 goto std_return;
1454
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001455 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
Christoph Hellwig253f4912016-04-06 09:19:55 +10001456 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
Dave Chinner24513372014-06-25 14:58:08 +10001457 if (error == -ENOSPC) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001458 resblks = 0;
Christoph Hellwig253f4912016-04-06 09:19:55 +10001459 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001460 }
Christoph Hellwig4906e212015-06-04 13:47:56 +10001461 if (error)
Christoph Hellwig253f4912016-04-06 09:19:55 +10001462 goto std_return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001463
1464 xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
1465
1466 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
Christoph Hellwig65523212016-11-30 14:33:25 +11001467 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001468
1469 /*
1470 * If we are using project inheritance, we only allow hard link
1471 * creation in our tree when the project IDs are the same; else
1472 * the tree quota mechanism could be circumvented.
1473 */
1474 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1475 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
Dave Chinner24513372014-06-25 14:58:08 +10001476 error = -EXDEV;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001477 goto error_return;
1478 }
1479
Eric Sandeen94f3cad2014-09-09 11:57:52 +10001480 if (!resblks) {
1481 error = xfs_dir_canenter(tp, tdp, target_name);
1482 if (error)
1483 goto error_return;
1484 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001485
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001486 xfs_defer_init(&dfops, &first_block);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001487
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001488 /*
1489 * Handle initial link state of O_TMPFILE inode
1490 */
1491 if (VFS_I(sip)->i_nlink == 0) {
Zhi Yong Wuab297432013-12-18 08:22:41 +08001492 error = xfs_iunlink_remove(tp, sip);
1493 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001494 goto error_return;
Zhi Yong Wuab297432013-12-18 08:22:41 +08001495 }
1496
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001497 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001498 &first_block, &dfops, resblks);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001499 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001500 goto error_return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001501 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1502 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1503
1504 error = xfs_bumplink(tp, sip);
1505 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10001506 goto error_return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001507
1508 /*
1509 * If this is a synchronous mount, make sure that the
1510 * link transaction goes to disk before returning to
1511 * the user.
1512 */
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001513 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001514 xfs_trans_set_sync(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001515
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001516 error = xfs_defer_finish(&tp, &dfops, NULL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001517 if (error) {
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001518 xfs_defer_cancel(&dfops);
Christoph Hellwig4906e212015-06-04 13:47:56 +10001519 goto error_return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001520 }
1521
Christoph Hellwig70393312015-06-04 13:48:08 +10001522 return xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001523
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001524 error_return:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001525 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001526 std_return:
1527 return error;
1528}
1529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530/*
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001531 * Free up the underlying blocks past new_size. The new size must be smaller
1532 * than the current size. This routine can be used both for the attribute and
1533 * data fork, and does not modify the inode size, which is left to the caller.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 *
David Chinnerf6485052008-04-17 16:50:04 +10001535 * The transaction passed to this routine must have made a permanent log
1536 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1537 * given transaction and start new ones, so make sure everything involved in
1538 * the transaction is tidy before calling here. Some transaction will be
1539 * returned to the caller to be committed. The incoming transaction must
1540 * already include the inode, and both inode locks must be held exclusively.
1541 * The inode must also be "held" within the transaction. On return the inode
1542 * will be "held" within the returned transaction. This routine does NOT
1543 * require any disk space to be reserved for it within the transaction.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 *
David Chinnerf6485052008-04-17 16:50:04 +10001545 * If we get an error, we must return with the inode locked and linked into the
1546 * current transaction. This keeps things simple for the higher level code,
1547 * because it always knows that the inode is locked and held in the transaction
1548 * that returns to it whether errors occur or not. We don't mark the inode
1549 * dirty on error so that transactions can be easily aborted if possible.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 */
1551int
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001552xfs_itruncate_extents(
1553 struct xfs_trans **tpp,
1554 struct xfs_inode *ip,
1555 int whichfork,
1556 xfs_fsize_t new_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557{
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001558 struct xfs_mount *mp = ip->i_mount;
1559 struct xfs_trans *tp = *tpp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001560 struct xfs_defer_ops dfops;
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001561 xfs_fsblock_t first_block;
1562 xfs_fileoff_t first_unmap_block;
1563 xfs_fileoff_t last_block;
1564 xfs_filblks_t unmap_len;
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001565 int error = 0;
1566 int done = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
Christoph Hellwig0b561852012-07-04 11:13:31 -04001568 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1569 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1570 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
Christoph Hellwigce7ae1512011-12-18 20:00:11 +00001571 ASSERT(new_size <= XFS_ISIZE(ip));
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001572 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 ASSERT(ip->i_itemp != NULL);
Christoph Hellwig898621d2010-06-24 11:36:58 +10001574 ASSERT(ip->i_itemp->ili_lock_flags == 0);
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001575 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
Christoph Hellwig673e8e52011-12-18 20:00:04 +00001577 trace_xfs_itruncate_extents_start(ip, new_size);
1578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 /*
1580 * Since it is possible for space to become allocated beyond
1581 * the end of the file (in a crash where the space is allocated
1582 * but the inode size is not yet updated), simply remove any
1583 * blocks which show up between the new EOF and the maximum
1584 * possible file size. If the first block to be removed is
1585 * beyond the maximum file size (ie it is the same as last_block),
1586 * then there is nothing to do.
1587 */
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001588 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
Dave Chinner32972382012-06-08 15:44:54 +10001589 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001590 if (first_unmap_block == last_block)
1591 return 0;
1592
1593 ASSERT(first_unmap_block < last_block);
1594 unmap_len = last_block - first_unmap_block + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 while (!done) {
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001596 xfs_defer_init(&dfops, &first_block);
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001597 error = xfs_bunmapi(tp, ip,
Olaf Weber3e57ecf2006-06-09 14:48:12 +10001598 first_unmap_block, unmap_len,
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001599 xfs_bmapi_aflag(whichfork),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 XFS_ITRUNC_MAX_EXTENTS,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001601 &first_block, &dfops,
Christoph Hellwigb4e91812010-06-23 18:11:15 +10001602 &done);
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001603 if (error)
1604 goto out_bmap_cancel;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
1606 /*
1607 * Duplicate the transaction that has the permanent
1608 * reservation and commit the old transaction.
1609 */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001610 error = xfs_defer_finish(&tp, &dfops, ip);
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001611 if (error)
1612 goto out_bmap_cancel;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
Christoph Hellwig411350d2017-08-28 10:21:03 -07001614 error = xfs_trans_roll_inode(&tp, ip);
David Chinnerf6485052008-04-17 16:50:04 +10001615 if (error)
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001616 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 }
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001618
Darrick J. Wongaa8968f2016-10-03 09:11:38 -07001619 /* Remove all pending CoW reservations. */
1620 error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
Christoph Hellwig3802a342017-03-07 16:45:58 -08001621 last_block, true);
Darrick J. Wongaa8968f2016-10-03 09:11:38 -07001622 if (error)
1623 goto out;
1624
1625 /*
1626 * Clear the reflink flag if we truncated everything.
1627 */
Darrick J. Wong83104d42016-10-03 09:11:46 -07001628 if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) {
Darrick J. Wongaa8968f2016-10-03 09:11:38 -07001629 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
Darrick J. Wong83104d42016-10-03 09:11:46 -07001630 xfs_inode_clear_cowblocks_tag(ip);
1631 }
Darrick J. Wongaa8968f2016-10-03 09:11:38 -07001632
Christoph Hellwig673e8e52011-12-18 20:00:04 +00001633 /*
1634 * Always re-log the inode so that our permanent transaction can keep
1635 * on rolling it forward in the log.
1636 */
1637 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1638
1639 trace_xfs_itruncate_extents_end(ip, new_size);
1640
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001641out:
1642 *tpp = tp;
1643 return error;
1644out_bmap_cancel:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 /*
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001646 * If the bunmapi call encounters an error, return to the caller where
1647 * the transaction can be properly aborted. We just need to make sure
1648 * we're not holding any resources that we were not when we came in.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001650 xfs_defer_cancel(&dfops);
Christoph Hellwig8f04c472011-07-08 14:34:34 +02001651 goto out;
1652}
1653
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001654int
1655xfs_release(
1656 xfs_inode_t *ip)
1657{
1658 xfs_mount_t *mp = ip->i_mount;
1659 int error;
1660
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001661 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001662 return 0;
1663
1664 /* If this is a read-only mount, don't do this (would generate I/O) */
1665 if (mp->m_flags & XFS_MOUNT_RDONLY)
1666 return 0;
1667
1668 if (!XFS_FORCED_SHUTDOWN(mp)) {
1669 int truncated;
1670
1671 /*
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001672 * If we previously truncated this file and removed old data
1673 * in the process, we want to initiate "early" writeout on
1674 * the last close. This is an attempt to combat the notorious
1675 * NULL files problem which is particularly noticeable from a
1676 * truncate down, buffered (re-)write (delalloc), followed by
1677 * a crash. What we are effectively doing here is
1678 * significantly reducing the time window where we'd otherwise
1679 * be exposed to that problem.
1680 */
1681 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1682 if (truncated) {
1683 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
Dave Chinnereac152b2014-08-04 13:22:49 +10001684 if (ip->i_delayed_blks > 0) {
Dave Chinner24513372014-06-25 14:58:08 +10001685 error = filemap_flush(VFS_I(ip)->i_mapping);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001686 if (error)
1687 return error;
1688 }
1689 }
1690 }
1691
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001692 if (VFS_I(ip)->i_nlink == 0)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001693 return 0;
1694
1695 if (xfs_can_free_eofblocks(ip, false)) {
1696
1697 /*
Brian Fostera36b9262017-01-27 23:22:55 -08001698 * Check if the inode is being opened, written and closed
1699 * frequently and we have delayed allocation blocks outstanding
1700 * (e.g. streaming writes from the NFS server), truncating the
1701 * blocks past EOF will cause fragmentation to occur.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001702 *
Brian Fostera36b9262017-01-27 23:22:55 -08001703 * In this case don't do the truncation, but we have to be
1704 * careful how we detect this case. Blocks beyond EOF show up as
1705 * i_delayed_blks even when the inode is clean, so we need to
1706 * truncate them away first before checking for a dirty release.
1707 * Hence on the first dirty close we will still remove the
1708 * speculative allocation, but after that we will leave it in
1709 * place.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001710 */
1711 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1712 return 0;
Brian Fostera36b9262017-01-27 23:22:55 -08001713 /*
1714 * If we can't get the iolock just skip truncating the blocks
1715 * past EOF because we could deadlock with the mmap_sem
1716 * otherwise. We'll get another chance to drop them once the
1717 * last reference to the inode is dropped, so we'll never leak
1718 * blocks permanently.
1719 */
1720 if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1721 error = xfs_free_eofblocks(ip);
1722 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1723 if (error)
1724 return error;
1725 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001726
1727 /* delalloc blocks after truncation means it really is dirty */
1728 if (ip->i_delayed_blks)
1729 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1730 }
1731 return 0;
1732}
1733
1734/*
Brian Fosterf7be2d72013-09-20 11:06:10 -04001735 * xfs_inactive_truncate
1736 *
1737 * Called to perform a truncate when an inode becomes unlinked.
1738 */
1739STATIC int
1740xfs_inactive_truncate(
1741 struct xfs_inode *ip)
1742{
1743 struct xfs_mount *mp = ip->i_mount;
1744 struct xfs_trans *tp;
1745 int error;
1746
Christoph Hellwig253f4912016-04-06 09:19:55 +10001747 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
Brian Fosterf7be2d72013-09-20 11:06:10 -04001748 if (error) {
1749 ASSERT(XFS_FORCED_SHUTDOWN(mp));
Brian Fosterf7be2d72013-09-20 11:06:10 -04001750 return error;
1751 }
1752
1753 xfs_ilock(ip, XFS_ILOCK_EXCL);
1754 xfs_trans_ijoin(tp, ip, 0);
1755
1756 /*
1757 * Log the inode size first to prevent stale data exposure in the event
1758 * of a system crash before the truncate completes. See the related
Jan Kara69bca802016-05-26 14:46:43 +02001759 * comment in xfs_vn_setattr_size() for details.
Brian Fosterf7be2d72013-09-20 11:06:10 -04001760 */
1761 ip->i_d.di_size = 0;
1762 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1763
1764 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1765 if (error)
1766 goto error_trans_cancel;
1767
1768 ASSERT(ip->i_d.di_nextents == 0);
1769
Christoph Hellwig70393312015-06-04 13:48:08 +10001770 error = xfs_trans_commit(tp);
Brian Fosterf7be2d72013-09-20 11:06:10 -04001771 if (error)
1772 goto error_unlock;
1773
1774 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1775 return 0;
1776
1777error_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001778 xfs_trans_cancel(tp);
Brian Fosterf7be2d72013-09-20 11:06:10 -04001779error_unlock:
1780 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1781 return error;
1782}
1783
1784/*
Brian Foster88877d22013-09-20 11:06:11 -04001785 * xfs_inactive_ifree()
1786 *
1787 * Perform the inode free when an inode is unlinked.
1788 */
1789STATIC int
1790xfs_inactive_ifree(
1791 struct xfs_inode *ip)
1792{
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001793 struct xfs_defer_ops dfops;
Brian Foster88877d22013-09-20 11:06:11 -04001794 xfs_fsblock_t first_block;
Brian Foster88877d22013-09-20 11:06:11 -04001795 struct xfs_mount *mp = ip->i_mount;
1796 struct xfs_trans *tp;
1797 int error;
1798
Brian Foster9d43b182014-04-24 16:00:52 +10001799 /*
Christoph Hellwig76d771b2017-01-25 07:49:35 -08001800 * We try to use a per-AG reservation for any block needed by the finobt
1801 * tree, but as the finobt feature predates the per-AG reservation
1802 * support a degraded file system might not have enough space for the
1803 * reservation at mount time. In that case try to dip into the reserved
1804 * pool and pray.
Brian Foster9d43b182014-04-24 16:00:52 +10001805 *
1806 * Send a warning if the reservation does happen to fail, as the inode
1807 * now remains allocated and sits on the unlinked list until the fs is
1808 * repaired.
1809 */
Christoph Hellwig76d771b2017-01-25 07:49:35 -08001810 if (unlikely(mp->m_inotbt_nores)) {
1811 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1812 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1813 &tp);
1814 } else {
1815 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1816 }
Brian Foster88877d22013-09-20 11:06:11 -04001817 if (error) {
Dave Chinner24513372014-06-25 14:58:08 +10001818 if (error == -ENOSPC) {
Brian Foster9d43b182014-04-24 16:00:52 +10001819 xfs_warn_ratelimited(mp,
1820 "Failed to remove inode(s) from unlinked list. "
1821 "Please free space, unmount and run xfs_repair.");
1822 } else {
1823 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1824 }
Brian Foster88877d22013-09-20 11:06:11 -04001825 return error;
1826 }
1827
1828 xfs_ilock(ip, XFS_ILOCK_EXCL);
1829 xfs_trans_ijoin(tp, ip, 0);
1830
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001831 xfs_defer_init(&dfops, &first_block);
1832 error = xfs_ifree(tp, ip, &dfops);
Brian Foster88877d22013-09-20 11:06:11 -04001833 if (error) {
1834 /*
1835 * If we fail to free the inode, shut down. The cancel
1836 * might do that, we need to make sure. Otherwise the
1837 * inode might be lost for a long time or forever.
1838 */
1839 if (!XFS_FORCED_SHUTDOWN(mp)) {
1840 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1841 __func__, error);
1842 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1843 }
Christoph Hellwig4906e212015-06-04 13:47:56 +10001844 xfs_trans_cancel(tp);
Brian Foster88877d22013-09-20 11:06:11 -04001845 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1846 return error;
1847 }
1848
1849 /*
1850 * Credit the quota account(s). The inode is gone.
1851 */
1852 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1853
1854 /*
Brian Fosterd4a97a02015-08-19 10:01:40 +10001855 * Just ignore errors at this point. There is nothing we can do except
1856 * to try to keep going. Make sure it's not a silent error.
Brian Foster88877d22013-09-20 11:06:11 -04001857 */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001858 error = xfs_defer_finish(&tp, &dfops, NULL);
Brian Fosterd4a97a02015-08-19 10:01:40 +10001859 if (error) {
Darrick J. Wong310a75a2016-08-03 11:18:10 +10001860 xfs_notice(mp, "%s: xfs_defer_finish returned error %d",
Brian Foster88877d22013-09-20 11:06:11 -04001861 __func__, error);
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001862 xfs_defer_cancel(&dfops);
Brian Fosterd4a97a02015-08-19 10:01:40 +10001863 }
Christoph Hellwig70393312015-06-04 13:48:08 +10001864 error = xfs_trans_commit(tp);
Brian Foster88877d22013-09-20 11:06:11 -04001865 if (error)
1866 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1867 __func__, error);
1868
1869 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1870 return 0;
1871}
1872
1873/*
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001874 * xfs_inactive
1875 *
1876 * This is called when the vnode reference count for the vnode
1877 * goes to zero. If the file has been unlinked, then it must
1878 * now be truncated. Also, we clear all of the read-ahead state
1879 * kept for the inode here since the file is now closed.
1880 */
Brian Foster74564fb2013-09-20 11:06:12 -04001881void
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001882xfs_inactive(
1883 xfs_inode_t *ip)
1884{
Jie Liu3d3c8b52013-08-12 20:49:59 +10001885 struct xfs_mount *mp;
Jie Liu3d3c8b52013-08-12 20:49:59 +10001886 int error;
1887 int truncate = 0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001888
1889 /*
1890 * If the inode is already free, then there can be nothing
1891 * to clean up here.
1892 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001893 if (VFS_I(ip)->i_mode == 0) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001894 ASSERT(ip->i_df.if_real_bytes == 0);
1895 ASSERT(ip->i_df.if_broot_bytes == 0);
Brian Foster74564fb2013-09-20 11:06:12 -04001896 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001897 }
1898
1899 mp = ip->i_mount;
Darrick J. Wong17c12bc2016-10-03 09:11:29 -07001900 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001901
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001902 /* If this is a read-only mount, don't do this (would generate I/O) */
1903 if (mp->m_flags & XFS_MOUNT_RDONLY)
Brian Foster74564fb2013-09-20 11:06:12 -04001904 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001905
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001906 if (VFS_I(ip)->i_nlink != 0) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001907 /*
1908 * force is true because we are evicting an inode from the
1909 * cache. Post-eof blocks must be freed, lest we end up with
1910 * broken free space accounting.
Brian Foster3b4683c2017-04-11 10:50:05 -07001911 *
1912 * Note: don't bother with iolock here since lockdep complains
1913 * about acquiring it in reclaim context. We have the only
1914 * reference to the inode at this point anyways.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001915 */
Brian Foster3b4683c2017-04-11 10:50:05 -07001916 if (xfs_can_free_eofblocks(ip, true))
Brian Fostera36b9262017-01-27 23:22:55 -08001917 xfs_free_eofblocks(ip);
Brian Foster74564fb2013-09-20 11:06:12 -04001918
1919 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001920 }
1921
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001922 if (S_ISREG(VFS_I(ip)->i_mode) &&
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001923 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1924 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1925 truncate = 1;
1926
1927 error = xfs_qm_dqattach(ip, 0);
1928 if (error)
Brian Foster74564fb2013-09-20 11:06:12 -04001929 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001930
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001931 if (S_ISLNK(VFS_I(ip)->i_mode))
Brian Foster36b21dd2013-09-20 11:06:09 -04001932 error = xfs_inactive_symlink(ip);
Brian Fosterf7be2d72013-09-20 11:06:10 -04001933 else if (truncate)
1934 error = xfs_inactive_truncate(ip);
1935 if (error)
Brian Foster74564fb2013-09-20 11:06:12 -04001936 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001937
1938 /*
1939 * If there are attributes associated with the file then blow them away
1940 * now. The code calls a routine that recursively deconstructs the
Dave Chinner6dfe5a02015-05-29 07:40:08 +10001941 * attribute fork. If also blows away the in-core attribute fork.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001942 */
Dave Chinner6dfe5a02015-05-29 07:40:08 +10001943 if (XFS_IFORK_Q(ip)) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001944 error = xfs_attr_inactive(ip);
1945 if (error)
Brian Foster74564fb2013-09-20 11:06:12 -04001946 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001947 }
1948
Dave Chinner6dfe5a02015-05-29 07:40:08 +10001949 ASSERT(!ip->i_afp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001950 ASSERT(ip->i_d.di_anextents == 0);
Dave Chinner6dfe5a02015-05-29 07:40:08 +10001951 ASSERT(ip->i_d.di_forkoff == 0);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001952
1953 /*
1954 * Free the inode.
1955 */
Brian Foster88877d22013-09-20 11:06:11 -04001956 error = xfs_inactive_ifree(ip);
1957 if (error)
Brian Foster74564fb2013-09-20 11:06:12 -04001958 return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001959
1960 /*
1961 * Release the dquots held by inode, if any.
1962 */
1963 xfs_qm_dqdetach(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001964}
1965
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966/*
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001967 * This is called when the inode's link count goes to 0 or we are creating a
1968 * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
1969 * set to true as the link count is dropped to zero by the VFS after we've
1970 * created the file successfully, so we have to add it to the unlinked list
1971 * while the link count is non-zero.
1972 *
1973 * We place the on-disk inode on a list in the AGI. It will be pulled from this
1974 * list when the inode is freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 */
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001976STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977xfs_iunlink(
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001978 struct xfs_trans *tp,
1979 struct xfs_inode *ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980{
Dave Chinner54d7b5c2016-02-09 16:54:58 +11001981 xfs_mount_t *mp = tp->t_mountp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 xfs_agi_t *agi;
1983 xfs_dinode_t *dip;
1984 xfs_buf_t *agibp;
1985 xfs_buf_t *ibp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 xfs_agino_t agino;
1987 short bucket_index;
1988 int offset;
1989 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001991 ASSERT(VFS_I(ip)->i_mode != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 /*
1994 * Get the agi buffer first. It ensures lock ordering
1995 * on the list.
1996 */
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11001997 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
Vlad Apostolov859d7182007-10-11 17:44:18 +10001998 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 agi = XFS_BUF_TO_AGI(agibp);
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 /*
2003 * Get the index into the agi hash table for the
2004 * list this inode will go on.
2005 */
2006 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2007 ASSERT(agino != 0);
2008 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2009 ASSERT(agi->agi_unlinked[bucket_index]);
Christoph Hellwig16259e72005-11-02 15:11:25 +11002010 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011
Christoph Hellwig69ef9212011-07-08 14:36:05 +02002012 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 /*
2014 * There is already another inode in the bucket we need
2015 * to add ourselves to. Add us at the front of the list.
2016 * Here we put the head pointer into our next pointer,
2017 * and then we fall through to point the head at us.
2018 */
Christoph Hellwig475ee412012-07-03 12:21:22 -04002019 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2020 0, 0);
Vlad Apostolovc319b582007-11-23 16:27:51 +11002021 if (error)
2022 return error;
2023
Christoph Hellwig69ef9212011-07-08 14:36:05 +02002024 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
Christoph Hellwig92bfc6e2008-11-28 14:23:41 +11002026 offset = ip->i_imap.im_boffset +
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 offsetof(xfs_dinode_t, di_next_unlinked);
Dave Chinner0a32c262013-06-05 12:09:08 +10002028
2029 /* need to recalc the inode CRC if appropriate */
2030 xfs_dinode_calc_crc(mp, dip);
2031
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 xfs_trans_inode_buf(tp, ibp);
2033 xfs_trans_log_buf(tp, ibp, offset,
2034 (offset + sizeof(xfs_agino_t) - 1));
2035 xfs_inobp_check(mp, ibp);
2036 }
2037
2038 /*
2039 * Point the bucket head pointer at the inode being inserted.
2040 */
2041 ASSERT(agino != 0);
Christoph Hellwig16259e72005-11-02 15:11:25 +11002042 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 offset = offsetof(xfs_agi_t, agi_unlinked) +
2044 (sizeof(xfs_agino_t) * bucket_index);
2045 xfs_trans_log_buf(tp, agibp, offset,
2046 (offset + sizeof(xfs_agino_t) - 1));
2047 return 0;
2048}
2049
2050/*
2051 * Pull the on-disk inode from the AGI unlinked list.
2052 */
2053STATIC int
2054xfs_iunlink_remove(
2055 xfs_trans_t *tp,
2056 xfs_inode_t *ip)
2057{
2058 xfs_ino_t next_ino;
2059 xfs_mount_t *mp;
2060 xfs_agi_t *agi;
2061 xfs_dinode_t *dip;
2062 xfs_buf_t *agibp;
2063 xfs_buf_t *ibp;
2064 xfs_agnumber_t agno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 xfs_agino_t agino;
2066 xfs_agino_t next_agino;
2067 xfs_buf_t *last_ibp;
Nathan Scott6fdf8cc2006-06-28 10:13:52 +10002068 xfs_dinode_t *last_dip = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 short bucket_index;
Nathan Scott6fdf8cc2006-06-28 10:13:52 +10002070 int offset, last_offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 mp = tp->t_mountp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
2076 /*
2077 * Get the agi buffer first. It ensures lock ordering
2078 * on the list.
2079 */
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002080 error = xfs_read_agi(mp, tp, agno, &agibp);
2081 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 return error;
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002083
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 agi = XFS_BUF_TO_AGI(agibp);
Christoph Hellwig5e1be0f2008-11-28 14:23:37 +11002085
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 /*
2087 * Get the index into the agi hash table for the
2088 * list this inode will go on.
2089 */
2090 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2091 ASSERT(agino != 0);
2092 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
Christoph Hellwig69ef9212011-07-08 14:36:05 +02002093 ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 ASSERT(agi->agi_unlinked[bucket_index]);
2095
Christoph Hellwig16259e72005-11-02 15:11:25 +11002096 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 /*
Christoph Hellwig475ee412012-07-03 12:21:22 -04002098 * We're at the head of the list. Get the inode's on-disk
2099 * buffer to see if there is anyone after us on the list.
2100 * Only modify our next pointer if it is not already NULLAGINO.
2101 * This saves us the overhead of dealing with the buffer when
2102 * there is no need to change it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 */
Christoph Hellwig475ee412012-07-03 12:21:22 -04002104 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2105 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 if (error) {
Christoph Hellwig475ee412012-07-03 12:21:22 -04002107 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
Dave Chinner0b932cc2011-03-07 10:08:35 +11002108 __func__, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 return error;
2110 }
Christoph Hellwig347d1c02007-08-28 13:57:51 +10002111 next_agino = be32_to_cpu(dip->di_next_unlinked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 ASSERT(next_agino != 0);
2113 if (next_agino != NULLAGINO) {
Christoph Hellwig347d1c02007-08-28 13:57:51 +10002114 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
Christoph Hellwig92bfc6e2008-11-28 14:23:41 +11002115 offset = ip->i_imap.im_boffset +
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 offsetof(xfs_dinode_t, di_next_unlinked);
Dave Chinner0a32c262013-06-05 12:09:08 +10002117
2118 /* need to recalc the inode CRC if appropriate */
2119 xfs_dinode_calc_crc(mp, dip);
2120
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 xfs_trans_inode_buf(tp, ibp);
2122 xfs_trans_log_buf(tp, ibp, offset,
2123 (offset + sizeof(xfs_agino_t) - 1));
2124 xfs_inobp_check(mp, ibp);
2125 } else {
2126 xfs_trans_brelse(tp, ibp);
2127 }
2128 /*
2129 * Point the bucket head pointer at the next inode.
2130 */
2131 ASSERT(next_agino != 0);
2132 ASSERT(next_agino != agino);
Christoph Hellwig16259e72005-11-02 15:11:25 +11002133 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 offset = offsetof(xfs_agi_t, agi_unlinked) +
2135 (sizeof(xfs_agino_t) * bucket_index);
2136 xfs_trans_log_buf(tp, agibp, offset,
2137 (offset + sizeof(xfs_agino_t) - 1));
2138 } else {
2139 /*
2140 * We need to search the list for the inode being freed.
2141 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11002142 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 last_ibp = NULL;
2144 while (next_agino != agino) {
Christoph Hellwig129dbc92012-07-03 12:21:51 -04002145 struct xfs_imap imap;
2146
2147 if (last_ibp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 xfs_trans_brelse(tp, last_ibp);
Christoph Hellwig129dbc92012-07-03 12:21:51 -04002149
2150 imap.im_blkno = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
Christoph Hellwig129dbc92012-07-03 12:21:51 -04002152
2153 error = xfs_imap(mp, tp, next_ino, &imap, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 if (error) {
Dave Chinner0b932cc2011-03-07 10:08:35 +11002155 xfs_warn(mp,
Christoph Hellwig129dbc92012-07-03 12:21:51 -04002156 "%s: xfs_imap returned error %d.",
2157 __func__, error);
2158 return error;
2159 }
2160
2161 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
2162 &last_ibp, 0, 0);
2163 if (error) {
2164 xfs_warn(mp,
2165 "%s: xfs_imap_to_bp returned error %d.",
Dave Chinner0b932cc2011-03-07 10:08:35 +11002166 __func__, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 return error;
2168 }
Christoph Hellwig129dbc92012-07-03 12:21:51 -04002169
2170 last_offset = imap.im_boffset;
Christoph Hellwig347d1c02007-08-28 13:57:51 +10002171 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 ASSERT(next_agino != NULLAGINO);
2173 ASSERT(next_agino != 0);
2174 }
Christoph Hellwig475ee412012-07-03 12:21:22 -04002175
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 /*
Christoph Hellwig475ee412012-07-03 12:21:22 -04002177 * Now last_ibp points to the buffer previous to us on the
2178 * unlinked list. Pull us from the list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 */
Christoph Hellwig475ee412012-07-03 12:21:22 -04002180 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2181 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 if (error) {
Christoph Hellwig475ee412012-07-03 12:21:22 -04002183 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
Dave Chinner0b932cc2011-03-07 10:08:35 +11002184 __func__, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 return error;
2186 }
Christoph Hellwig347d1c02007-08-28 13:57:51 +10002187 next_agino = be32_to_cpu(dip->di_next_unlinked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 ASSERT(next_agino != 0);
2189 ASSERT(next_agino != agino);
2190 if (next_agino != NULLAGINO) {
Christoph Hellwig347d1c02007-08-28 13:57:51 +10002191 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
Christoph Hellwig92bfc6e2008-11-28 14:23:41 +11002192 offset = ip->i_imap.im_boffset +
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 offsetof(xfs_dinode_t, di_next_unlinked);
Dave Chinner0a32c262013-06-05 12:09:08 +10002194
2195 /* need to recalc the inode CRC if appropriate */
2196 xfs_dinode_calc_crc(mp, dip);
2197
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 xfs_trans_inode_buf(tp, ibp);
2199 xfs_trans_log_buf(tp, ibp, offset,
2200 (offset + sizeof(xfs_agino_t) - 1));
2201 xfs_inobp_check(mp, ibp);
2202 } else {
2203 xfs_trans_brelse(tp, ibp);
2204 }
2205 /*
2206 * Point the previous inode on the list to the next inode.
2207 */
Christoph Hellwig347d1c02007-08-28 13:57:51 +10002208 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 ASSERT(next_agino != 0);
2210 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
Dave Chinner0a32c262013-06-05 12:09:08 +10002211
2212 /* need to recalc the inode CRC if appropriate */
2213 xfs_dinode_calc_crc(mp, last_dip);
2214
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 xfs_trans_inode_buf(tp, last_ibp);
2216 xfs_trans_log_buf(tp, last_ibp, offset,
2217 (offset + sizeof(xfs_agino_t) - 1));
2218 xfs_inobp_check(mp, last_ibp);
2219 }
2220 return 0;
2221}
2222
Dave Chinner5b3eed72010-08-24 11:42:41 +10002223/*
Zhi Yong Wu0b8182d2013-08-12 03:14:59 +00002224 * A big issue when freeing the inode cluster is that we _cannot_ skip any
Dave Chinner5b3eed72010-08-24 11:42:41 +10002225 * inodes that are in memory - they all must be marked stale and attached to
2226 * the cluster buffer.
2227 */
Chandra Seetharaman2a30f36d2011-09-20 13:56:55 +00002228STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229xfs_ifree_cluster(
Brian Foster09b56602015-05-29 09:26:03 +10002230 xfs_inode_t *free_ip,
2231 xfs_trans_t *tp,
2232 struct xfs_icluster *xic)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233{
2234 xfs_mount_t *mp = free_ip->i_mount;
2235 int blks_per_cluster;
Jie Liu982e9392013-12-13 15:51:49 +11002236 int inodes_per_cluster;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 int nbufs;
Dave Chinner5b257b42010-06-03 16:22:29 +10002238 int i, j;
Brian Foster3cdaa182015-06-04 13:03:34 +10002239 int ioffset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 xfs_daddr_t blkno;
2241 xfs_buf_t *bp;
Dave Chinner5b257b42010-06-03 16:22:29 +10002242 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 xfs_inode_log_item_t *iip;
2244 xfs_log_item_t *lip;
Dave Chinner5017e972010-01-11 11:47:40 +00002245 struct xfs_perag *pag;
Brian Foster09b56602015-05-29 09:26:03 +10002246 xfs_ino_t inum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
Brian Foster09b56602015-05-29 09:26:03 +10002248 inum = xic->first_ino;
Dave Chinner5017e972010-01-11 11:47:40 +00002249 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
Jie Liu982e9392013-12-13 15:51:49 +11002250 blks_per_cluster = xfs_icluster_size_fsb(mp);
2251 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
2252 nbufs = mp->m_ialloc_blks / blks_per_cluster;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
Jie Liu982e9392013-12-13 15:51:49 +11002254 for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
Brian Foster09b56602015-05-29 09:26:03 +10002255 /*
2256 * The allocation bitmap tells us which inodes of the chunk were
2257 * physically allocated. Skip the cluster if an inode falls into
2258 * a sparse region.
2259 */
Brian Foster3cdaa182015-06-04 13:03:34 +10002260 ioffset = inum - xic->first_ino;
2261 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2262 ASSERT(do_mod(ioffset, inodes_per_cluster) == 0);
Brian Foster09b56602015-05-29 09:26:03 +10002263 continue;
2264 }
2265
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2267 XFS_INO_TO_AGBNO(mp, inum));
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 /*
Dave Chinner5b257b42010-06-03 16:22:29 +10002270 * We obtain and lock the backing buffer first in the process
2271 * here, as we have to ensure that any dirty inode that we
2272 * can't get the flush lock on is attached to the buffer.
2273 * If we scan the in-memory inodes first, then buffer IO can
2274 * complete before we get a lock on it, and hence we may fail
2275 * to mark all the active inodes on the buffer stale.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 */
Dave Chinner5b257b42010-06-03 16:22:29 +10002277 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
Dave Chinnerb6aff292012-11-02 11:38:42 +11002278 mp->m_bsize * blks_per_cluster,
2279 XBF_UNMAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
Chandra Seetharaman2a30f36d2011-09-20 13:56:55 +00002281 if (!bp)
Dave Chinner24513372014-06-25 14:58:08 +10002282 return -ENOMEM;
Dave Chinnerb0f539d2012-11-14 17:53:49 +11002283
2284 /*
2285 * This buffer may not have been correctly initialised as we
2286 * didn't read it from disk. That's not important because we are
2287 * only using to mark the buffer as stale in the log, and to
2288 * attach stale cached inodes on it. That means it will never be
2289 * dispatched for IO. If it is, we want to know about it, and we
2290 * want it to fail. We can acheive this by adding a write
2291 * verifier to the buffer.
2292 */
Dave Chinner1813dd62012-11-14 17:54:40 +11002293 bp->b_ops = &xfs_inode_buf_ops;
Dave Chinnerb0f539d2012-11-14 17:53:49 +11002294
Dave Chinner5b257b42010-06-03 16:22:29 +10002295 /*
2296 * Walk the inodes already attached to the buffer and mark them
2297 * stale. These will all have the flush locks held, so an
Dave Chinner5b3eed72010-08-24 11:42:41 +10002298 * in-memory inode walk can't lock them. By marking them all
2299 * stale first, we will not attempt to lock them in the loop
2300 * below as the XFS_ISTALE flag will be set.
Dave Chinner5b257b42010-06-03 16:22:29 +10002301 */
Christoph Hellwigadadbee2011-07-13 13:43:49 +02002302 lip = bp->b_fspriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 while (lip) {
2304 if (lip->li_type == XFS_LI_INODE) {
2305 iip = (xfs_inode_log_item_t *)lip;
2306 ASSERT(iip->ili_logged == 1);
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10002307 lip->li_cb = xfs_istale_done;
David Chinner7b2e2a32008-10-30 17:39:12 +11002308 xfs_trans_ail_copy_lsn(mp->m_ail,
2309 &iip->ili_flush_lsn,
2310 &iip->ili_item.li_lsn);
David Chinnere5ffd2b2006-11-21 18:55:33 +11002311 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 }
2313 lip = lip->li_bio_list;
2314 }
2315
Dave Chinner5b3eed72010-08-24 11:42:41 +10002316
Dave Chinner5b257b42010-06-03 16:22:29 +10002317 /*
2318 * For each inode in memory attempt to add it to the inode
2319 * buffer and set it up for being staled on buffer IO
2320 * completion. This is safe as we've locked out tail pushing
2321 * and flushing by locking the buffer.
2322 *
2323 * We have already marked every inode that was part of a
2324 * transaction stale above, which means there is no point in
2325 * even trying to lock them.
2326 */
Jie Liu982e9392013-12-13 15:51:49 +11002327 for (i = 0; i < inodes_per_cluster; i++) {
Dave Chinner5b3eed72010-08-24 11:42:41 +10002328retry:
Dave Chinner1a3e8f32010-12-17 17:29:43 +11002329 rcu_read_lock();
Dave Chinner5b257b42010-06-03 16:22:29 +10002330 ip = radix_tree_lookup(&pag->pag_ici_root,
2331 XFS_INO_TO_AGINO(mp, (inum + i)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
Dave Chinner1a3e8f32010-12-17 17:29:43 +11002333 /* Inode not in memory, nothing to do */
2334 if (!ip) {
2335 rcu_read_unlock();
Dave Chinner5b257b42010-06-03 16:22:29 +10002336 continue;
2337 }
2338
Dave Chinner5b3eed72010-08-24 11:42:41 +10002339 /*
Dave Chinner1a3e8f32010-12-17 17:29:43 +11002340 * because this is an RCU protected lookup, we could
2341 * find a recently freed or even reallocated inode
2342 * during the lookup. We need to check under the
2343 * i_flags_lock for a valid inode here. Skip it if it
2344 * is not valid, the wrong inode or stale.
2345 */
2346 spin_lock(&ip->i_flags_lock);
2347 if (ip->i_ino != inum + i ||
2348 __xfs_iflags_test(ip, XFS_ISTALE)) {
2349 spin_unlock(&ip->i_flags_lock);
2350 rcu_read_unlock();
2351 continue;
2352 }
2353 spin_unlock(&ip->i_flags_lock);
2354
2355 /*
Dave Chinner5b3eed72010-08-24 11:42:41 +10002356 * Don't try to lock/unlock the current inode, but we
2357 * _cannot_ skip the other inodes that we did not find
2358 * in the list attached to the buffer and are not
2359 * already marked stale. If we can't lock it, back off
2360 * and retry.
2361 */
Omar Sandovalf2e9ad22017-08-25 10:05:26 -07002362 if (ip != free_ip) {
2363 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2364 rcu_read_unlock();
2365 delay(1);
2366 goto retry;
2367 }
2368
2369 /*
2370 * Check the inode number again in case we're
2371 * racing with freeing in xfs_reclaim_inode().
2372 * See the comments in that function for more
2373 * information as to why the initial check is
2374 * not sufficient.
2375 */
2376 if (ip->i_ino != inum + i) {
2377 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2378 continue;
2379 }
Dave Chinner5b257b42010-06-03 16:22:29 +10002380 }
Dave Chinner1a3e8f32010-12-17 17:29:43 +11002381 rcu_read_unlock();
Dave Chinner5b257b42010-06-03 16:22:29 +10002382
Dave Chinner5b3eed72010-08-24 11:42:41 +10002383 xfs_iflock(ip);
Dave Chinner5b257b42010-06-03 16:22:29 +10002384 xfs_iflags_set(ip, XFS_ISTALE);
Dave Chinner5b257b42010-06-03 16:22:29 +10002385
Dave Chinner5b3eed72010-08-24 11:42:41 +10002386 /*
2387 * we don't need to attach clean inodes or those only
2388 * with unlogged changes (which we throw away, anyway).
2389 */
Dave Chinner5b257b42010-06-03 16:22:29 +10002390 iip = ip->i_itemp;
Dave Chinner5b3eed72010-08-24 11:42:41 +10002391 if (!iip || xfs_inode_clean(ip)) {
Dave Chinner5b257b42010-06-03 16:22:29 +10002392 ASSERT(ip != free_ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 xfs_ifunlock(ip);
2394 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2395 continue;
2396 }
2397
Christoph Hellwigf5d8d5c2012-02-29 09:53:54 +00002398 iip->ili_last_fields = iip->ili_fields;
2399 iip->ili_fields = 0;
Dave Chinnerfc0561c2015-11-03 13:14:59 +11002400 iip->ili_fsync_fields = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 iip->ili_logged = 1;
David Chinner7b2e2a32008-10-30 17:39:12 +11002402 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2403 &iip->ili_item.li_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
Christoph Hellwigca30b2a2010-06-23 18:11:15 +10002405 xfs_buf_attach_iodone(bp, xfs_istale_done,
2406 &iip->ili_item);
Dave Chinner5b257b42010-06-03 16:22:29 +10002407
2408 if (ip != free_ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 }
2411
Dave Chinner5b3eed72010-08-24 11:42:41 +10002412 xfs_trans_stale_inode_buf(tp, bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 xfs_trans_binval(tp, bp);
2414 }
2415
Dave Chinner5017e972010-01-11 11:47:40 +00002416 xfs_perag_put(pag);
Chandra Seetharaman2a30f36d2011-09-20 13:56:55 +00002417 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418}
2419
2420/*
2421 * This is called to return an inode to the inode free list.
2422 * The inode should already be truncated to 0 length and have
2423 * no pages associated with it. This routine also assumes that
2424 * the inode is already a part of the transaction.
2425 *
2426 * The on-disk copy of the inode will have been added to the list
2427 * of unlinked inodes in the AGI. We need to remove the inode from
2428 * that list atomically with respect to freeing it here.
2429 */
2430int
2431xfs_ifree(
2432 xfs_trans_t *tp,
2433 xfs_inode_t *ip,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002434 struct xfs_defer_ops *dfops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435{
2436 int error;
Brian Foster09b56602015-05-29 09:26:03 +10002437 struct xfs_icluster xic = { 0 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438
Christoph Hellwig579aa9c2008-04-22 17:34:00 +10002439 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
Dave Chinner54d7b5c2016-02-09 16:54:58 +11002440 ASSERT(VFS_I(ip)->i_nlink == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 ASSERT(ip->i_d.di_nextents == 0);
2442 ASSERT(ip->i_d.di_anextents == 0);
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002443 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 ASSERT(ip->i_d.di_nblocks == 0);
2445
2446 /*
2447 * Pull the on-disk inode from the AGI unlinked list.
2448 */
2449 error = xfs_iunlink_remove(tp, ip);
Dave Chinner1baaed82013-06-27 16:04:50 +10002450 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002453 error = xfs_difree(tp, ip->i_ino, dfops, &xic);
Dave Chinner1baaed82013-06-27 16:04:50 +10002454 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 return error;
Dave Chinner1baaed82013-06-27 16:04:50 +10002456
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002457 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 ip->i_d.di_flags = 0;
2459 ip->i_d.di_dmevmask = 0;
2460 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2462 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2463 /*
2464 * Bump the generation count so no one will be confused
2465 * by reincarnations of this inode.
2466 */
Dave Chinner9e9a2672016-02-09 16:54:58 +11002467 VFS_I(ip)->i_generation++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2469
Brian Foster09b56602015-05-29 09:26:03 +10002470 if (xic.deleted)
2471 error = xfs_ifree_cluster(ip, tp, &xic);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
Chandra Seetharaman2a30f36d2011-09-20 13:56:55 +00002473 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474}
2475
2476/*
Christoph Hellwig60ec6782010-02-17 19:43:56 +00002477 * This is called to unpin an inode. The caller must have the inode locked
2478 * in at least shared mode so that the buffer cannot be subsequently pinned
2479 * once someone is waiting for it to be unpinned.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 */
Christoph Hellwig60ec6782010-02-17 19:43:56 +00002481static void
Christoph Hellwigf392e632011-12-18 20:00:10 +00002482xfs_iunpin(
Christoph Hellwig60ec6782010-02-17 19:43:56 +00002483 struct xfs_inode *ip)
David Chinnera3f74ff2008-03-06 13:43:42 +11002484{
Christoph Hellwig579aa9c2008-04-22 17:34:00 +10002485 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
David Chinnera3f74ff2008-03-06 13:43:42 +11002486
Dave Chinner4aaf15d2010-03-08 11:24:07 +11002487 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2488
David Chinnera3f74ff2008-03-06 13:43:42 +11002489 /* Give the log a push to start the unpinning I/O */
Christoph Hellwig60ec6782010-02-17 19:43:56 +00002490 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
Christoph Hellwiga14a3482010-01-19 09:56:46 +00002491
David Chinnera3f74ff2008-03-06 13:43:42 +11002492}
2493
Christoph Hellwigf392e632011-12-18 20:00:10 +00002494static void
2495__xfs_iunpin_wait(
2496 struct xfs_inode *ip)
2497{
2498 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2499 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2500
2501 xfs_iunpin(ip);
2502
2503 do {
Ingo Molnar21417132017-03-05 11:25:39 +01002504 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
Christoph Hellwigf392e632011-12-18 20:00:10 +00002505 if (xfs_ipincount(ip))
2506 io_schedule();
2507 } while (xfs_ipincount(ip));
Ingo Molnar21417132017-03-05 11:25:39 +01002508 finish_wait(wq, &wait.wq_entry);
Christoph Hellwigf392e632011-12-18 20:00:10 +00002509}
2510
Dave Chinner777df5a2010-02-06 12:37:26 +11002511void
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512xfs_iunpin_wait(
Christoph Hellwig60ec6782010-02-17 19:43:56 +00002513 struct xfs_inode *ip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514{
Christoph Hellwigf392e632011-12-18 20:00:10 +00002515 if (xfs_ipincount(ip))
2516 __xfs_iunpin_wait(ip);
David Chinnera3f74ff2008-03-06 13:43:42 +11002517}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
Dave Chinner27320362013-10-29 22:11:44 +11002519/*
2520 * Removing an inode from the namespace involves removing the directory entry
2521 * and dropping the link count on the inode. Removing the directory entry can
2522 * result in locking an AGF (directory blocks were freed) and removing a link
2523 * count can result in placing the inode on an unlinked list which results in
2524 * locking an AGI.
2525 *
2526 * The big problem here is that we have an ordering constraint on AGF and AGI
2527 * locking - inode allocation locks the AGI, then can allocate a new extent for
2528 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2529 * removes the inode from the unlinked list, requiring that we lock the AGI
2530 * first, and then freeing the inode can result in an inode chunk being freed
2531 * and hence freeing disk space requiring that we lock an AGF.
2532 *
2533 * Hence the ordering that is imposed by other parts of the code is AGI before
2534 * AGF. This means we cannot remove the directory entry before we drop the inode
2535 * reference count and put it on the unlinked list as this results in a lock
2536 * order of AGF then AGI, and this can deadlock against inode allocation and
2537 * freeing. Therefore we must drop the link counts before we remove the
2538 * directory entry.
2539 *
2540 * This is still safe from a transactional point of view - it is not until we
Darrick J. Wong310a75a2016-08-03 11:18:10 +10002541 * get to xfs_defer_finish() that we have the possibility of multiple
Dave Chinner27320362013-10-29 22:11:44 +11002542 * transactions in this operation. Hence as long as we remove the directory
2543 * entry and drop the link count in the first transaction of the remove
2544 * operation, there are no transactional constraints on the ordering here.
2545 */
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002546int
2547xfs_remove(
2548 xfs_inode_t *dp,
2549 struct xfs_name *name,
2550 xfs_inode_t *ip)
2551{
2552 xfs_mount_t *mp = dp->i_mount;
2553 xfs_trans_t *tp = NULL;
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002554 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002555 int error = 0;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002556 struct xfs_defer_ops dfops;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002557 xfs_fsblock_t first_block;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002558 uint resblks;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002559
2560 trace_xfs_remove(dp, name);
2561
2562 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10002563 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002564
2565 error = xfs_qm_dqattach(dp, 0);
2566 if (error)
2567 goto std_return;
2568
2569 error = xfs_qm_dqattach(ip, 0);
2570 if (error)
2571 goto std_return;
2572
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002573 /*
2574 * We try to get the real space reservation first,
2575 * allowing for directory btree deletion(s) implying
2576 * possible bmap insert(s). If we can't get the space
2577 * reservation then we use 0 instead, and avoid the bmap
2578 * btree insert(s) in the directory code by, if the bmap
2579 * insert tries to happen, instead trimming the LAST
2580 * block from the directory.
2581 */
2582 resblks = XFS_REMOVE_SPACE_RES(mp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10002583 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
Dave Chinner24513372014-06-25 14:58:08 +10002584 if (error == -ENOSPC) {
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002585 resblks = 0;
Christoph Hellwig253f4912016-04-06 09:19:55 +10002586 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2587 &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002588 }
2589 if (error) {
Dave Chinner24513372014-06-25 14:58:08 +10002590 ASSERT(error != -ENOSPC);
Christoph Hellwig253f4912016-04-06 09:19:55 +10002591 goto std_return;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002592 }
2593
2594 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
2595
Christoph Hellwig65523212016-11-30 14:33:25 +11002596 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002597 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2598
2599 /*
2600 * If we're removing a directory perform some additional validation.
2601 */
2602 if (is_dir) {
Dave Chinner54d7b5c2016-02-09 16:54:58 +11002603 ASSERT(VFS_I(ip)->i_nlink >= 2);
2604 if (VFS_I(ip)->i_nlink != 2) {
Dave Chinner24513372014-06-25 14:58:08 +10002605 error = -ENOTEMPTY;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002606 goto out_trans_cancel;
2607 }
2608 if (!xfs_dir_isempty(ip)) {
Dave Chinner24513372014-06-25 14:58:08 +10002609 error = -ENOTEMPTY;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002610 goto out_trans_cancel;
2611 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002612
Dave Chinner27320362013-10-29 22:11:44 +11002613 /* Drop the link from ip's "..". */
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002614 error = xfs_droplink(tp, dp);
2615 if (error)
Dave Chinner27320362013-10-29 22:11:44 +11002616 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002617
Dave Chinner27320362013-10-29 22:11:44 +11002618 /* Drop the "." link from ip to self. */
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002619 error = xfs_droplink(tp, ip);
2620 if (error)
Dave Chinner27320362013-10-29 22:11:44 +11002621 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002622 } else {
2623 /*
2624 * When removing a non-directory we need to log the parent
2625 * inode here. For a directory this is done implicitly
2626 * by the xfs_droplink call for the ".." entry.
2627 */
2628 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2629 }
Dave Chinner27320362013-10-29 22:11:44 +11002630 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002631
Dave Chinner27320362013-10-29 22:11:44 +11002632 /* Drop the link from dp to ip. */
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002633 error = xfs_droplink(tp, ip);
2634 if (error)
Dave Chinner27320362013-10-29 22:11:44 +11002635 goto out_trans_cancel;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002636
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002637 xfs_defer_init(&dfops, &first_block);
Dave Chinner27320362013-10-29 22:11:44 +11002638 error = xfs_dir_removename(tp, dp, name, ip->i_ino,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002639 &first_block, &dfops, resblks);
Dave Chinner27320362013-10-29 22:11:44 +11002640 if (error) {
Dave Chinner24513372014-06-25 14:58:08 +10002641 ASSERT(error != -ENOENT);
Dave Chinner27320362013-10-29 22:11:44 +11002642 goto out_bmap_cancel;
2643 }
2644
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002645 /*
2646 * If this is a synchronous mount, make sure that the
2647 * remove transaction goes to disk before returning to
2648 * the user.
2649 */
2650 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2651 xfs_trans_set_sync(tp);
2652
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002653 error = xfs_defer_finish(&tp, &dfops, NULL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002654 if (error)
2655 goto out_bmap_cancel;
2656
Christoph Hellwig70393312015-06-04 13:48:08 +10002657 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002658 if (error)
2659 goto std_return;
2660
Christoph Hellwig2cd2ef62014-04-23 07:11:51 +10002661 if (is_dir && xfs_inode_is_filestream(ip))
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002662 xfs_filestream_deassociate(ip);
2663
2664 return 0;
2665
2666 out_bmap_cancel:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002667 xfs_defer_cancel(&dfops);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002668 out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10002669 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10002670 std_return:
2671 return error;
2672}
2673
Dave Chinnerf6bba202013-08-12 20:49:46 +10002674/*
2675 * Enter all inodes for a rename transaction into a sorted array.
2676 */
Dave Chinner95afcf52015-03-25 14:03:32 +11002677#define __XFS_SORT_INODES 5
Dave Chinnerf6bba202013-08-12 20:49:46 +10002678STATIC void
2679xfs_sort_for_rename(
Dave Chinner95afcf52015-03-25 14:03:32 +11002680 struct xfs_inode *dp1, /* in: old (source) directory inode */
2681 struct xfs_inode *dp2, /* in: new (target) directory inode */
2682 struct xfs_inode *ip1, /* in: inode of old entry */
2683 struct xfs_inode *ip2, /* in: inode of new entry */
2684 struct xfs_inode *wip, /* in: whiteout inode */
2685 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2686 int *num_inodes) /* in/out: inodes in array */
Dave Chinnerf6bba202013-08-12 20:49:46 +10002687{
Dave Chinnerf6bba202013-08-12 20:49:46 +10002688 int i, j;
2689
Dave Chinner95afcf52015-03-25 14:03:32 +11002690 ASSERT(*num_inodes == __XFS_SORT_INODES);
2691 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2692
Dave Chinnerf6bba202013-08-12 20:49:46 +10002693 /*
2694 * i_tab contains a list of pointers to inodes. We initialize
2695 * the table here & we'll sort it. We will then use it to
2696 * order the acquisition of the inode locks.
2697 *
2698 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2699 */
Dave Chinner95afcf52015-03-25 14:03:32 +11002700 i = 0;
2701 i_tab[i++] = dp1;
2702 i_tab[i++] = dp2;
2703 i_tab[i++] = ip1;
2704 if (ip2)
2705 i_tab[i++] = ip2;
2706 if (wip)
2707 i_tab[i++] = wip;
2708 *num_inodes = i;
Dave Chinnerf6bba202013-08-12 20:49:46 +10002709
2710 /*
2711 * Sort the elements via bubble sort. (Remember, there are at
Dave Chinner95afcf52015-03-25 14:03:32 +11002712 * most 5 elements to sort, so this is adequate.)
Dave Chinnerf6bba202013-08-12 20:49:46 +10002713 */
2714 for (i = 0; i < *num_inodes; i++) {
2715 for (j = 1; j < *num_inodes; j++) {
2716 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
Dave Chinner95afcf52015-03-25 14:03:32 +11002717 struct xfs_inode *temp = i_tab[j];
Dave Chinnerf6bba202013-08-12 20:49:46 +10002718 i_tab[j] = i_tab[j-1];
2719 i_tab[j-1] = temp;
2720 }
2721 }
2722 }
2723}
2724
Dave Chinner310606b2015-03-25 14:06:07 +11002725static int
2726xfs_finish_rename(
2727 struct xfs_trans *tp,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002728 struct xfs_defer_ops *dfops)
Dave Chinner310606b2015-03-25 14:06:07 +11002729{
Dave Chinner310606b2015-03-25 14:06:07 +11002730 int error;
2731
2732 /*
2733 * If this is a synchronous mount, make sure that the rename transaction
2734 * goes to disk before returning to the user.
2735 */
2736 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2737 xfs_trans_set_sync(tp);
2738
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002739 error = xfs_defer_finish(&tp, dfops, NULL);
Dave Chinner310606b2015-03-25 14:06:07 +11002740 if (error) {
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002741 xfs_defer_cancel(dfops);
Christoph Hellwig4906e212015-06-04 13:47:56 +10002742 xfs_trans_cancel(tp);
Dave Chinner310606b2015-03-25 14:06:07 +11002743 return error;
2744 }
2745
Christoph Hellwig70393312015-06-04 13:48:08 +10002746 return xfs_trans_commit(tp);
Dave Chinner310606b2015-03-25 14:06:07 +11002747}
2748
Dave Chinnerf6bba202013-08-12 20:49:46 +10002749/*
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002750 * xfs_cross_rename()
2751 *
2752 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
2753 */
2754STATIC int
2755xfs_cross_rename(
2756 struct xfs_trans *tp,
2757 struct xfs_inode *dp1,
2758 struct xfs_name *name1,
2759 struct xfs_inode *ip1,
2760 struct xfs_inode *dp2,
2761 struct xfs_name *name2,
2762 struct xfs_inode *ip2,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002763 struct xfs_defer_ops *dfops,
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002764 xfs_fsblock_t *first_block,
2765 int spaceres)
2766{
2767 int error = 0;
2768 int ip1_flags = 0;
2769 int ip2_flags = 0;
2770 int dp2_flags = 0;
2771
2772 /* Swap inode number for dirent in first parent */
2773 error = xfs_dir_replace(tp, dp1, name1,
2774 ip2->i_ino,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002775 first_block, dfops, spaceres);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002776 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11002777 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002778
2779 /* Swap inode number for dirent in second parent */
2780 error = xfs_dir_replace(tp, dp2, name2,
2781 ip1->i_ino,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002782 first_block, dfops, spaceres);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002783 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11002784 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002785
2786 /*
2787 * If we're renaming one or more directories across different parents,
2788 * update the respective ".." entries (and link counts) to match the new
2789 * parents.
2790 */
2791 if (dp1 != dp2) {
2792 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2793
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002794 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002795 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2796 dp1->i_ino, first_block,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002797 dfops, spaceres);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002798 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11002799 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002800
2801 /* transfer ip2 ".." reference to dp1 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002802 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002803 error = xfs_droplink(tp, dp2);
2804 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11002805 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002806 error = xfs_bumplink(tp, dp1);
2807 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11002808 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002809 }
2810
2811 /*
2812 * Although ip1 isn't changed here, userspace needs
2813 * to be warned about the change, so that applications
2814 * relying on it (like backup ones), will properly
2815 * notify the change
2816 */
2817 ip1_flags |= XFS_ICHGTIME_CHG;
2818 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2819 }
2820
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002821 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002822 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2823 dp2->i_ino, first_block,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002824 dfops, spaceres);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002825 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11002826 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002827
2828 /* transfer ip1 ".." reference to dp2 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002829 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002830 error = xfs_droplink(tp, dp1);
2831 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11002832 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002833 error = xfs_bumplink(tp, dp2);
2834 if (error)
Dave Chinnereeacd322015-03-25 14:08:07 +11002835 goto out_trans_abort;
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002836 }
2837
2838 /*
2839 * Although ip2 isn't changed here, userspace needs
2840 * to be warned about the change, so that applications
2841 * relying on it (like backup ones), will properly
2842 * notify the change
2843 */
2844 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2845 ip2_flags |= XFS_ICHGTIME_CHG;
2846 }
2847 }
2848
2849 if (ip1_flags) {
2850 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2851 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2852 }
2853 if (ip2_flags) {
2854 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2855 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2856 }
2857 if (dp2_flags) {
2858 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2859 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2860 }
2861 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2862 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002863 return xfs_finish_rename(tp, dfops);
Dave Chinnereeacd322015-03-25 14:08:07 +11002864
2865out_trans_abort:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002866 xfs_defer_cancel(dfops);
Christoph Hellwig4906e212015-06-04 13:47:56 +10002867 xfs_trans_cancel(tp);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11002868 return error;
2869}
2870
2871/*
Dave Chinner7dcf5c32015-03-25 14:08:08 +11002872 * xfs_rename_alloc_whiteout()
2873 *
2874 * Return a referenced, unlinked, unlocked inode that that can be used as a
2875 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2876 * crash between allocating the inode and linking it into the rename transaction
2877 * recovery will free the inode and we won't leak it.
2878 */
2879static int
2880xfs_rename_alloc_whiteout(
2881 struct xfs_inode *dp,
2882 struct xfs_inode **wip)
2883{
2884 struct xfs_inode *tmpfile;
2885 int error;
2886
2887 error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile);
2888 if (error)
2889 return error;
2890
Brian Foster22419ac2015-05-29 08:14:55 +10002891 /*
2892 * Prepare the tmpfile inode as if it were created through the VFS.
2893 * Otherwise, the link increment paths will complain about nlink 0->1.
2894 * Drop the link count as done by d_tmpfile(), complete the inode setup
2895 * and flag it as linkable.
2896 */
2897 drop_nlink(VFS_I(tmpfile));
Christoph Hellwig2b3d1d42016-04-06 07:48:27 +10002898 xfs_setup_iops(tmpfile);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11002899 xfs_finish_inode_setup(tmpfile);
2900 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2901
2902 *wip = tmpfile;
2903 return 0;
2904}
2905
2906/*
Dave Chinnerf6bba202013-08-12 20:49:46 +10002907 * xfs_rename
2908 */
2909int
2910xfs_rename(
Dave Chinner7dcf5c32015-03-25 14:08:08 +11002911 struct xfs_inode *src_dp,
2912 struct xfs_name *src_name,
2913 struct xfs_inode *src_ip,
2914 struct xfs_inode *target_dp,
2915 struct xfs_name *target_name,
2916 struct xfs_inode *target_ip,
2917 unsigned int flags)
Dave Chinnerf6bba202013-08-12 20:49:46 +10002918{
Dave Chinner7dcf5c32015-03-25 14:08:08 +11002919 struct xfs_mount *mp = src_dp->i_mount;
2920 struct xfs_trans *tp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002921 struct xfs_defer_ops dfops;
Dave Chinner7dcf5c32015-03-25 14:08:08 +11002922 xfs_fsblock_t first_block;
2923 struct xfs_inode *wip = NULL; /* whiteout inode */
2924 struct xfs_inode *inodes[__XFS_SORT_INODES];
2925 int num_inodes = __XFS_SORT_INODES;
Dave Chinner2b936812015-03-25 15:12:30 +11002926 bool new_parent = (src_dp != target_dp);
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002927 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11002928 int spaceres;
2929 int error;
Dave Chinnerf6bba202013-08-12 20:49:46 +10002930
2931 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2932
Dave Chinnereeacd322015-03-25 14:08:07 +11002933 if ((flags & RENAME_EXCHANGE) && !target_ip)
2934 return -EINVAL;
Dave Chinnerf6bba202013-08-12 20:49:46 +10002935
Dave Chinner7dcf5c32015-03-25 14:08:08 +11002936 /*
2937 * If we are doing a whiteout operation, allocate the whiteout inode
2938 * we will be placing at the target and ensure the type is set
2939 * appropriately.
2940 */
2941 if (flags & RENAME_WHITEOUT) {
2942 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
2943 error = xfs_rename_alloc_whiteout(target_dp, &wip);
2944 if (error)
2945 return error;
Dave Chinnerf6bba202013-08-12 20:49:46 +10002946
Dave Chinner7dcf5c32015-03-25 14:08:08 +11002947 /* setup target dirent info as whiteout */
2948 src_name->type = XFS_DIR3_FT_CHRDEV;
2949 }
2950
2951 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
Dave Chinnerf6bba202013-08-12 20:49:46 +10002952 inodes, &num_inodes);
2953
Dave Chinnerf6bba202013-08-12 20:49:46 +10002954 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
Christoph Hellwig253f4912016-04-06 09:19:55 +10002955 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
Dave Chinner24513372014-06-25 14:58:08 +10002956 if (error == -ENOSPC) {
Dave Chinnerf6bba202013-08-12 20:49:46 +10002957 spaceres = 0;
Christoph Hellwig253f4912016-04-06 09:19:55 +10002958 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2959 &tp);
Dave Chinnerf6bba202013-08-12 20:49:46 +10002960 }
Dave Chinner445883e2015-03-25 14:05:43 +11002961 if (error)
Christoph Hellwig253f4912016-04-06 09:19:55 +10002962 goto out_release_wip;
Dave Chinnerf6bba202013-08-12 20:49:46 +10002963
2964 /*
2965 * Attach the dquots to the inodes
2966 */
2967 error = xfs_qm_vop_rename_dqattach(inodes);
Dave Chinner445883e2015-03-25 14:05:43 +11002968 if (error)
2969 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10002970
2971 /*
2972 * Lock all the participating inodes. Depending upon whether
2973 * the target_name exists in the target directory, and
2974 * whether the target directory is the same as the source
2975 * directory, we can lock from 2 to 4 inodes.
2976 */
2977 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2978
2979 /*
2980 * Join all the inodes to the transaction. From this point on,
2981 * we can rely on either trans_commit or trans_cancel to unlock
2982 * them.
2983 */
Christoph Hellwig65523212016-11-30 14:33:25 +11002984 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
Dave Chinnerf6bba202013-08-12 20:49:46 +10002985 if (new_parent)
Christoph Hellwig65523212016-11-30 14:33:25 +11002986 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
Dave Chinnerf6bba202013-08-12 20:49:46 +10002987 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2988 if (target_ip)
2989 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11002990 if (wip)
2991 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
Dave Chinnerf6bba202013-08-12 20:49:46 +10002992
2993 /*
2994 * If we are using project inheritance, we only allow renames
2995 * into our tree when the project IDs are the same; else the
2996 * tree quota mechanism would be circumvented.
2997 */
2998 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
2999 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
Dave Chinner24513372014-06-25 14:58:08 +10003000 error = -EXDEV;
Dave Chinner445883e2015-03-25 14:05:43 +11003001 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003002 }
3003
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003004 xfs_defer_init(&dfops, &first_block);
Dave Chinner445883e2015-03-25 14:05:43 +11003005
Dave Chinnereeacd322015-03-25 14:08:07 +11003006 /* RENAME_EXCHANGE is unique from here on. */
3007 if (flags & RENAME_EXCHANGE)
3008 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3009 target_dp, target_name, target_ip,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003010 &dfops, &first_block, spaceres);
Carlos Maiolinod31a1822014-12-24 08:51:42 +11003011
3012 /*
Dave Chinnerf6bba202013-08-12 20:49:46 +10003013 * Set up the target.
3014 */
3015 if (target_ip == NULL) {
3016 /*
3017 * If there's no space reservation, check the entry will
3018 * fit before actually inserting it.
3019 */
Eric Sandeen94f3cad2014-09-09 11:57:52 +10003020 if (!spaceres) {
3021 error = xfs_dir_canenter(tp, target_dp, target_name);
3022 if (error)
Dave Chinner445883e2015-03-25 14:05:43 +11003023 goto out_trans_cancel;
Eric Sandeen94f3cad2014-09-09 11:57:52 +10003024 }
Dave Chinnerf6bba202013-08-12 20:49:46 +10003025 /*
3026 * If target does not exist and the rename crosses
3027 * directories, adjust the target directory link count
3028 * to account for the ".." reference from the new entry.
3029 */
3030 error = xfs_dir_createname(tp, target_dp, target_name,
3031 src_ip->i_ino, &first_block,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003032 &dfops, spaceres);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003033 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10003034 goto out_bmap_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003035
3036 xfs_trans_ichgtime(tp, target_dp,
3037 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3038
3039 if (new_parent && src_is_directory) {
3040 error = xfs_bumplink(tp, target_dp);
3041 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10003042 goto out_bmap_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003043 }
3044 } else { /* target_ip != NULL */
3045 /*
3046 * If target exists and it's a directory, check that both
3047 * target and source are directories and that target can be
3048 * destroyed, or that neither is a directory.
3049 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003050 if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
Dave Chinnerf6bba202013-08-12 20:49:46 +10003051 /*
3052 * Make sure target dir is empty.
3053 */
3054 if (!(xfs_dir_isempty(target_ip)) ||
Dave Chinner54d7b5c2016-02-09 16:54:58 +11003055 (VFS_I(target_ip)->i_nlink > 2)) {
Dave Chinner24513372014-06-25 14:58:08 +10003056 error = -EEXIST;
Dave Chinner445883e2015-03-25 14:05:43 +11003057 goto out_trans_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003058 }
3059 }
3060
3061 /*
3062 * Link the source inode under the target name.
3063 * If the source inode is a directory and we are moving
3064 * it across directories, its ".." entry will be
3065 * inconsistent until we replace that down below.
3066 *
3067 * In case there is already an entry with the same
3068 * name at the destination directory, remove it first.
3069 */
3070 error = xfs_dir_replace(tp, target_dp, target_name,
3071 src_ip->i_ino,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003072 &first_block, &dfops, spaceres);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003073 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10003074 goto out_bmap_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003075
3076 xfs_trans_ichgtime(tp, target_dp,
3077 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3078
3079 /*
3080 * Decrement the link count on the target since the target
3081 * dir no longer points to it.
3082 */
3083 error = xfs_droplink(tp, target_ip);
3084 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10003085 goto out_bmap_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003086
3087 if (src_is_directory) {
3088 /*
3089 * Drop the link from the old "." entry.
3090 */
3091 error = xfs_droplink(tp, target_ip);
3092 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10003093 goto out_bmap_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003094 }
3095 } /* target_ip != NULL */
3096
3097 /*
3098 * Remove the source.
3099 */
3100 if (new_parent && src_is_directory) {
3101 /*
3102 * Rewrite the ".." entry to point to the new
3103 * directory.
3104 */
3105 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3106 target_dp->i_ino,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003107 &first_block, &dfops, spaceres);
Dave Chinner24513372014-06-25 14:58:08 +10003108 ASSERT(error != -EEXIST);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003109 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10003110 goto out_bmap_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003111 }
3112
3113 /*
3114 * We always want to hit the ctime on the source inode.
3115 *
3116 * This isn't strictly required by the standards since the source
3117 * inode isn't really being changed, but old unix file systems did
3118 * it and some incremental backup programs won't work without it.
3119 */
3120 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3121 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3122
3123 /*
3124 * Adjust the link count on src_dp. This is necessary when
3125 * renaming a directory, either within one parent when
3126 * the target existed, or across two parent directories.
3127 */
3128 if (src_is_directory && (new_parent || target_ip != NULL)) {
3129
3130 /*
3131 * Decrement link count on src_directory since the
3132 * entry that's moved no longer points to it.
3133 */
3134 error = xfs_droplink(tp, src_dp);
3135 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10003136 goto out_bmap_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003137 }
3138
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003139 /*
3140 * For whiteouts, we only need to update the source dirent with the
3141 * inode number of the whiteout inode rather than removing it
3142 * altogether.
3143 */
3144 if (wip) {
3145 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003146 &first_block, &dfops, spaceres);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003147 } else
3148 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003149 &first_block, &dfops, spaceres);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003150 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10003151 goto out_bmap_cancel;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003152
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003153 /*
3154 * For whiteouts, we need to bump the link count on the whiteout inode.
3155 * This means that failures all the way up to this point leave the inode
3156 * on the unlinked list and so cleanup is a simple matter of dropping
3157 * the remaining reference to it. If we fail here after bumping the link
3158 * count, we're shutting down the filesystem so we'll never see the
3159 * intermediate state on disk.
3160 */
3161 if (wip) {
Dave Chinner54d7b5c2016-02-09 16:54:58 +11003162 ASSERT(VFS_I(wip)->i_nlink == 0);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003163 error = xfs_bumplink(tp, wip);
3164 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10003165 goto out_bmap_cancel;
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003166 error = xfs_iunlink_remove(tp, wip);
3167 if (error)
Christoph Hellwig4906e212015-06-04 13:47:56 +10003168 goto out_bmap_cancel;
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003169 xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
3170
3171 /*
3172 * Now we have a real link, clear the "I'm a tmpfile" state
3173 * flag from the inode so it doesn't accidentally get misused in
3174 * future.
3175 */
3176 VFS_I(wip)->i_state &= ~I_LINKABLE;
3177 }
Dave Chinnerf6bba202013-08-12 20:49:46 +10003178
3179 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3180 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3181 if (new_parent)
3182 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3183
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003184 error = xfs_finish_rename(tp, &dfops);
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003185 if (wip)
3186 IRELE(wip);
3187 return error;
Dave Chinnerf6bba202013-08-12 20:49:46 +10003188
Dave Chinner445883e2015-03-25 14:05:43 +11003189out_bmap_cancel:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003190 xfs_defer_cancel(&dfops);
Dave Chinner445883e2015-03-25 14:05:43 +11003191out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10003192 xfs_trans_cancel(tp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10003193out_release_wip:
Dave Chinner7dcf5c32015-03-25 14:08:08 +11003194 if (wip)
3195 IRELE(wip);
Dave Chinnerf6bba202013-08-12 20:49:46 +10003196 return error;
3197}
3198
David Chinnerbad55842008-03-06 13:43:49 +11003199STATIC int
3200xfs_iflush_cluster(
Dave Chinner19429362016-05-18 14:09:46 +10003201 struct xfs_inode *ip,
3202 struct xfs_buf *bp)
David Chinnerbad55842008-03-06 13:43:49 +11003203{
Dave Chinner19429362016-05-18 14:09:46 +10003204 struct xfs_mount *mp = ip->i_mount;
Dave Chinner5017e972010-01-11 11:47:40 +00003205 struct xfs_perag *pag;
David Chinnerbad55842008-03-06 13:43:49 +11003206 unsigned long first_index, mask;
David Chinnerc8f5f122008-05-20 11:30:15 +10003207 unsigned long inodes_per_cluster;
Dave Chinner19429362016-05-18 14:09:46 +10003208 int cilist_size;
3209 struct xfs_inode **cilist;
3210 struct xfs_inode *cip;
David Chinnerbad55842008-03-06 13:43:49 +11003211 int nr_found;
3212 int clcount = 0;
3213 int bufwasdelwri;
3214 int i;
3215
Dave Chinner5017e972010-01-11 11:47:40 +00003216 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
David Chinnerbad55842008-03-06 13:43:49 +11003217
Jie Liu0f49efd2013-12-13 15:51:48 +11003218 inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
Dave Chinner19429362016-05-18 14:09:46 +10003219 cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
3220 cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
3221 if (!cilist)
Dave Chinner44b56e02010-01-11 11:47:43 +00003222 goto out_put;
David Chinnerbad55842008-03-06 13:43:49 +11003223
Jie Liu0f49efd2013-12-13 15:51:48 +11003224 mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
David Chinnerbad55842008-03-06 13:43:49 +11003225 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003226 rcu_read_lock();
David Chinnerbad55842008-03-06 13:43:49 +11003227 /* really need a gang lookup range call here */
Dave Chinner19429362016-05-18 14:09:46 +10003228 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
David Chinnerc8f5f122008-05-20 11:30:15 +10003229 first_index, inodes_per_cluster);
David Chinnerbad55842008-03-06 13:43:49 +11003230 if (nr_found == 0)
3231 goto out_free;
3232
3233 for (i = 0; i < nr_found; i++) {
Dave Chinner19429362016-05-18 14:09:46 +10003234 cip = cilist[i];
3235 if (cip == ip)
David Chinnerbad55842008-03-06 13:43:49 +11003236 continue;
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003237
3238 /*
3239 * because this is an RCU protected lookup, we could find a
3240 * recently freed or even reallocated inode during the lookup.
3241 * We need to check under the i_flags_lock for a valid inode
3242 * here. Skip it if it is not valid or the wrong inode.
3243 */
Dave Chinner19429362016-05-18 14:09:46 +10003244 spin_lock(&cip->i_flags_lock);
3245 if (!cip->i_ino ||
3246 __xfs_iflags_test(cip, XFS_ISTALE)) {
3247 spin_unlock(&cip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003248 continue;
3249 }
Dave Chinner5a90e532016-05-18 14:09:13 +10003250
3251 /*
3252 * Once we fall off the end of the cluster, no point checking
3253 * any more inodes in the list because they will also all be
3254 * outside the cluster.
3255 */
Dave Chinner19429362016-05-18 14:09:46 +10003256 if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
3257 spin_unlock(&cip->i_flags_lock);
Dave Chinner5a90e532016-05-18 14:09:13 +10003258 break;
3259 }
Dave Chinner19429362016-05-18 14:09:46 +10003260 spin_unlock(&cip->i_flags_lock);
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003261
David Chinnerbad55842008-03-06 13:43:49 +11003262 /*
3263 * Do an un-protected check to see if the inode is dirty and
3264 * is a candidate for flushing. These checks will be repeated
3265 * later after the appropriate locks are acquired.
3266 */
Dave Chinner19429362016-05-18 14:09:46 +10003267 if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
David Chinnerbad55842008-03-06 13:43:49 +11003268 continue;
David Chinnerbad55842008-03-06 13:43:49 +11003269
3270 /*
3271 * Try to get locks. If any are unavailable or it is pinned,
3272 * then this inode cannot be flushed and is skipped.
3273 */
3274
Dave Chinner19429362016-05-18 14:09:46 +10003275 if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
David Chinnerbad55842008-03-06 13:43:49 +11003276 continue;
Dave Chinner19429362016-05-18 14:09:46 +10003277 if (!xfs_iflock_nowait(cip)) {
3278 xfs_iunlock(cip, XFS_ILOCK_SHARED);
David Chinnerbad55842008-03-06 13:43:49 +11003279 continue;
3280 }
Dave Chinner19429362016-05-18 14:09:46 +10003281 if (xfs_ipincount(cip)) {
3282 xfs_ifunlock(cip);
3283 xfs_iunlock(cip, XFS_ILOCK_SHARED);
David Chinnerbad55842008-03-06 13:43:49 +11003284 continue;
3285 }
3286
Dave Chinner8a17d7d2016-05-18 14:09:12 +10003287
3288 /*
3289 * Check the inode number again, just to be certain we are not
3290 * racing with freeing in xfs_reclaim_inode(). See the comments
3291 * in that function for more information as to why the initial
3292 * check is not sufficient.
3293 */
Dave Chinner19429362016-05-18 14:09:46 +10003294 if (!cip->i_ino) {
3295 xfs_ifunlock(cip);
3296 xfs_iunlock(cip, XFS_ILOCK_SHARED);
David Chinnerbad55842008-03-06 13:43:49 +11003297 continue;
3298 }
3299
3300 /*
3301 * arriving here means that this inode can be flushed. First
3302 * re-check that it's dirty before flushing.
3303 */
Dave Chinner19429362016-05-18 14:09:46 +10003304 if (!xfs_inode_clean(cip)) {
David Chinner33540402008-03-06 13:43:59 +11003305 int error;
Dave Chinner19429362016-05-18 14:09:46 +10003306 error = xfs_iflush_int(cip, bp);
David Chinnerbad55842008-03-06 13:43:49 +11003307 if (error) {
Dave Chinner19429362016-05-18 14:09:46 +10003308 xfs_iunlock(cip, XFS_ILOCK_SHARED);
David Chinnerbad55842008-03-06 13:43:49 +11003309 goto cluster_corrupt_out;
3310 }
3311 clcount++;
3312 } else {
Dave Chinner19429362016-05-18 14:09:46 +10003313 xfs_ifunlock(cip);
David Chinnerbad55842008-03-06 13:43:49 +11003314 }
Dave Chinner19429362016-05-18 14:09:46 +10003315 xfs_iunlock(cip, XFS_ILOCK_SHARED);
David Chinnerbad55842008-03-06 13:43:49 +11003316 }
3317
3318 if (clcount) {
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11003319 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3320 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
David Chinnerbad55842008-03-06 13:43:49 +11003321 }
3322
3323out_free:
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003324 rcu_read_unlock();
Dave Chinner19429362016-05-18 14:09:46 +10003325 kmem_free(cilist);
Dave Chinner44b56e02010-01-11 11:47:43 +00003326out_put:
3327 xfs_perag_put(pag);
David Chinnerbad55842008-03-06 13:43:49 +11003328 return 0;
3329
3330
3331cluster_corrupt_out:
3332 /*
3333 * Corruption detected in the clustering loop. Invalidate the
3334 * inode buffer and shut down the filesystem.
3335 */
Dave Chinner1a3e8f32010-12-17 17:29:43 +11003336 rcu_read_unlock();
David Chinnerbad55842008-03-06 13:43:49 +11003337 /*
Christoph Hellwig43ff2122012-04-23 15:58:39 +10003338 * Clean up the buffer. If it was delwri, just release it --
David Chinnerbad55842008-03-06 13:43:49 +11003339 * brelse can handle it with no problems. If not, shut down the
3340 * filesystem before releasing the buffer.
3341 */
Christoph Hellwig43ff2122012-04-23 15:58:39 +10003342 bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
David Chinnerbad55842008-03-06 13:43:49 +11003343 if (bufwasdelwri)
3344 xfs_buf_relse(bp);
3345
3346 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3347
3348 if (!bufwasdelwri) {
3349 /*
3350 * Just like incore_relse: if we have b_iodone functions,
3351 * mark the buffer as an error and call them. Otherwise
3352 * mark it as stale and brelse.
3353 */
Christoph Hellwigcb669ca2011-07-13 13:43:49 +02003354 if (bp->b_iodone) {
Dave Chinnerb0388bf2016-02-10 15:01:11 +11003355 bp->b_flags &= ~XBF_DONE;
Christoph Hellwigc867cb62011-10-10 16:52:46 +00003356 xfs_buf_stale(bp);
Dave Chinner24513372014-06-25 14:58:08 +10003357 xfs_buf_ioerror(bp, -EIO);
Dave Chinnere8aaba92014-10-02 09:04:22 +10003358 xfs_buf_ioend(bp);
David Chinnerbad55842008-03-06 13:43:49 +11003359 } else {
Christoph Hellwigc867cb62011-10-10 16:52:46 +00003360 xfs_buf_stale(bp);
David Chinnerbad55842008-03-06 13:43:49 +11003361 xfs_buf_relse(bp);
3362 }
3363 }
3364
3365 /*
3366 * Unlocks the flush lock
3367 */
Dave Chinner19429362016-05-18 14:09:46 +10003368 xfs_iflush_abort(cip, false);
3369 kmem_free(cilist);
Dave Chinner44b56e02010-01-11 11:47:43 +00003370 xfs_perag_put(pag);
Dave Chinner24513372014-06-25 14:58:08 +10003371 return -EFSCORRUPTED;
David Chinnerbad55842008-03-06 13:43:49 +11003372}
3373
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374/*
Christoph Hellwig4c468192012-04-23 15:58:36 +10003375 * Flush dirty inode metadata into the backing buffer.
3376 *
3377 * The caller must have the inode lock and the inode flush lock held. The
3378 * inode lock will still be held upon return to the caller, and the inode
3379 * flush lock will be released after the inode has reached the disk.
3380 *
3381 * The caller must write out the buffer returned in *bpp and release it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 */
3383int
3384xfs_iflush(
Christoph Hellwig4c468192012-04-23 15:58:36 +10003385 struct xfs_inode *ip,
3386 struct xfs_buf **bpp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387{
Christoph Hellwig4c468192012-04-23 15:58:36 +10003388 struct xfs_mount *mp = ip->i_mount;
Dave Chinnerb1438f42016-05-18 13:53:42 +10003389 struct xfs_buf *bp = NULL;
Christoph Hellwig4c468192012-04-23 15:58:36 +10003390 struct xfs_dinode *dip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11003393 XFS_STATS_INC(mp, xs_iflush_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394
Christoph Hellwig579aa9c2008-04-22 17:34:00 +10003395 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
Christoph Hellwig474fce02011-12-18 20:00:09 +00003396 ASSERT(xfs_isiflocked(ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00003398 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399
Christoph Hellwig4c468192012-04-23 15:58:36 +10003400 *bpp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 xfs_iunpin_wait(ip);
3403
3404 /*
Dave Chinner4b6a4682010-01-11 11:45:21 +00003405 * For stale inodes we cannot rely on the backing buffer remaining
3406 * stale in cache for the remaining life of the stale inode and so
Christoph Hellwig475ee412012-07-03 12:21:22 -04003407 * xfs_imap_to_bp() below may give us a buffer that no longer contains
Dave Chinner4b6a4682010-01-11 11:45:21 +00003408 * inodes below. We have to check this after ensuring the inode is
3409 * unpinned so that it is safe to reclaim the stale inode after the
3410 * flush call.
3411 */
3412 if (xfs_iflags_test(ip, XFS_ISTALE)) {
3413 xfs_ifunlock(ip);
3414 return 0;
3415 }
3416
3417 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 * This may have been unpinned because the filesystem is shutting
3419 * down forcibly. If that's the case we must not write this inode
Christoph Hellwig32ce90a2012-04-23 15:58:32 +10003420 * to disk, because the log record didn't make it to disk.
3421 *
3422 * We also have to remove the log item from the AIL in this case,
3423 * as we wait for an empty AIL as part of the unmount process.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 */
3425 if (XFS_FORCED_SHUTDOWN(mp)) {
Dave Chinner24513372014-06-25 14:58:08 +10003426 error = -EIO;
Christoph Hellwig32ce90a2012-04-23 15:58:32 +10003427 goto abort_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 }
3429
3430 /*
Dave Chinnerb1438f42016-05-18 13:53:42 +10003431 * Get the buffer containing the on-disk inode. We are doing a try-lock
3432 * operation here, so we may get an EAGAIN error. In that case, we
3433 * simply want to return with the inode still dirty.
3434 *
3435 * If we get any other error, we effectively have a corruption situation
3436 * and we cannot flush the inode, so we treat it the same as failing
3437 * xfs_iflush_int().
David Chinnera3f74ff2008-03-06 13:43:42 +11003438 */
Christoph Hellwig475ee412012-07-03 12:21:22 -04003439 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3440 0);
Dave Chinnerb1438f42016-05-18 13:53:42 +10003441 if (error == -EAGAIN) {
David Chinnera3f74ff2008-03-06 13:43:42 +11003442 xfs_ifunlock(ip);
3443 return error;
3444 }
Dave Chinnerb1438f42016-05-18 13:53:42 +10003445 if (error)
3446 goto corrupt_out;
David Chinnera3f74ff2008-03-06 13:43:42 +11003447
3448 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 * First flush out the inode that xfs_iflush was called with.
3450 */
3451 error = xfs_iflush_int(ip, bp);
David Chinnerbad55842008-03-06 13:43:49 +11003452 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 goto corrupt_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454
3455 /*
David Chinnera3f74ff2008-03-06 13:43:42 +11003456 * If the buffer is pinned then push on the log now so we won't
3457 * get stuck waiting in the write for too long.
3458 */
Chandra Seetharaman811e64c2011-07-22 23:40:27 +00003459 if (xfs_buf_ispinned(bp))
Christoph Hellwiga14a3482010-01-19 09:56:46 +00003460 xfs_log_force(mp, 0);
David Chinnera3f74ff2008-03-06 13:43:42 +11003461
3462 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 * inode clustering:
3464 * see if other inodes can be gathered into this write
3465 */
David Chinnerbad55842008-03-06 13:43:49 +11003466 error = xfs_iflush_cluster(ip, bp);
3467 if (error)
3468 goto cluster_corrupt_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469
Christoph Hellwig4c468192012-04-23 15:58:36 +10003470 *bpp = bp;
3471 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472
3473corrupt_out:
Dave Chinnerb1438f42016-05-18 13:53:42 +10003474 if (bp)
3475 xfs_buf_relse(bp);
Nathan Scott7d04a332006-06-09 14:58:38 +10003476 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477cluster_corrupt_out:
Dave Chinner24513372014-06-25 14:58:08 +10003478 error = -EFSCORRUPTED;
Christoph Hellwig32ce90a2012-04-23 15:58:32 +10003479abort_out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 /*
3481 * Unlocks the flush lock
3482 */
Dave Chinner04913fd2012-04-23 15:58:41 +10003483 xfs_iflush_abort(ip, false);
Christoph Hellwig32ce90a2012-04-23 15:58:32 +10003484 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485}
3486
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487STATIC int
3488xfs_iflush_int(
Christoph Hellwig93848a92013-04-03 16:11:17 +11003489 struct xfs_inode *ip,
3490 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491{
Christoph Hellwig93848a92013-04-03 16:11:17 +11003492 struct xfs_inode_log_item *iip = ip->i_itemp;
3493 struct xfs_dinode *dip;
3494 struct xfs_mount *mp = ip->i_mount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495
Christoph Hellwig579aa9c2008-04-22 17:34:00 +10003496 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
Christoph Hellwig474fce02011-12-18 20:00:09 +00003497 ASSERT(xfs_isiflocked(ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00003499 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
Christoph Hellwig93848a92013-04-03 16:11:17 +11003500 ASSERT(iip != NULL && iip->ili_fields != 0);
Dave Chinner263997a2014-05-20 07:46:40 +10003501 ASSERT(ip->i_d.di_version > 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 /* set *dip = inode's place in the buffer */
Christoph Hellwig88ee2df2015-06-22 09:44:29 +10003504 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505
Christoph Hellwig69ef9212011-07-08 14:36:05 +02003506 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003507 mp, XFS_ERRTAG_IFLUSH_1)) {
Dave Chinner6a19d932011-03-07 10:02:35 +11003508 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3509 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3510 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 goto corrupt_out;
3512 }
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003513 if (S_ISREG(VFS_I(ip)->i_mode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 if (XFS_TEST_ERROR(
3515 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3516 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003517 mp, XFS_ERRTAG_IFLUSH_3)) {
Dave Chinner6a19d932011-03-07 10:02:35 +11003518 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3519 "%s: Bad regular inode %Lu, ptr 0x%p",
3520 __func__, ip->i_ino, ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 goto corrupt_out;
3522 }
Dave Chinnerc19b3b052016-02-09 16:54:58 +11003523 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 if (XFS_TEST_ERROR(
3525 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3526 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3527 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003528 mp, XFS_ERRTAG_IFLUSH_4)) {
Dave Chinner6a19d932011-03-07 10:02:35 +11003529 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3530 "%s: Bad directory inode %Lu, ptr 0x%p",
3531 __func__, ip->i_ino, ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 goto corrupt_out;
3533 }
3534 }
3535 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003536 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
Dave Chinner6a19d932011-03-07 10:02:35 +11003537 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3538 "%s: detected corrupt incore inode %Lu, "
3539 "total extents = %d, nblocks = %Ld, ptr 0x%p",
3540 __func__, ip->i_ino,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 ip->i_d.di_nextents + ip->i_d.di_anextents,
Dave Chinner6a19d932011-03-07 10:02:35 +11003542 ip->i_d.di_nblocks, ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 goto corrupt_out;
3544 }
3545 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003546 mp, XFS_ERRTAG_IFLUSH_6)) {
Dave Chinner6a19d932011-03-07 10:02:35 +11003547 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3548 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3549 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550 goto corrupt_out;
3551 }
Dave Chinnere60896d2013-07-24 15:47:30 +10003552
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 /*
Dave Chinner263997a2014-05-20 07:46:40 +10003554 * Inode item log recovery for v2 inodes are dependent on the
Dave Chinnere60896d2013-07-24 15:47:30 +10003555 * di_flushiter count for correct sequencing. We bump the flush
3556 * iteration count so we can detect flushes which postdate a log record
3557 * during recovery. This is redundant as we now log every change and
3558 * hence this can't happen but we need to still do it to ensure
3559 * backwards compatibility with old kernels that predate logging all
3560 * inode changes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 */
Dave Chinnere60896d2013-07-24 15:47:30 +10003562 if (ip->i_d.di_version < 3)
3563 ip->i_d.di_flushiter++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564
Darrick J. Wong005c5db2017-03-28 14:51:10 -07003565 /* Check the inline directory data. */
3566 if (S_ISDIR(VFS_I(ip)->i_mode) &&
3567 ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
3568 xfs_dir2_sf_verify(ip))
3569 goto corrupt_out;
3570
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 /*
Dave Chinner39878482016-02-09 16:54:58 +11003572 * Copy the dirty parts of the inode into the on-disk inode. We always
3573 * copy out the core of the inode, because if the inode is dirty at all
3574 * the core must be.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 */
Dave Chinner93f958f2016-02-09 16:54:58 +11003576 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577
3578 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3579 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3580 ip->i_d.di_flushiter = 0;
3581
Darrick J. Wong005c5db2017-03-28 14:51:10 -07003582 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3583 if (XFS_IFORK_Q(ip))
3584 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 xfs_inobp_check(mp, bp);
3586
3587 /*
Christoph Hellwigf5d8d5c2012-02-29 09:53:54 +00003588 * We've recorded everything logged in the inode, so we'd like to clear
3589 * the ili_fields bits so we don't log and flush things unnecessarily.
3590 * However, we can't stop logging all this information until the data
3591 * we've copied into the disk buffer is written to disk. If we did we
3592 * might overwrite the copy of the inode in the log with all the data
3593 * after re-logging only part of it, and in the face of a crash we
3594 * wouldn't have all the data we need to recover.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 *
Christoph Hellwigf5d8d5c2012-02-29 09:53:54 +00003596 * What we do is move the bits to the ili_last_fields field. When
3597 * logging the inode, these bits are moved back to the ili_fields field.
3598 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3599 * know that the information those bits represent is permanently on
3600 * disk. As long as the flush completes before the inode is logged
3601 * again, then both ili_fields and ili_last_fields will be cleared.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 *
Christoph Hellwigf5d8d5c2012-02-29 09:53:54 +00003603 * We can play with the ili_fields bits here, because the inode lock
3604 * must be held exclusively in order to set bits there and the flush
3605 * lock protects the ili_last_fields bits. Set ili_logged so the flush
3606 * done routine can tell whether or not to look in the AIL. Also, store
3607 * the current LSN of the inode so that we can tell whether the item has
3608 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
3609 * need the AIL lock, because it is a 64 bit value that cannot be read
3610 * atomically.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 */
Christoph Hellwig93848a92013-04-03 16:11:17 +11003612 iip->ili_last_fields = iip->ili_fields;
3613 iip->ili_fields = 0;
Dave Chinnerfc0561c2015-11-03 13:14:59 +11003614 iip->ili_fsync_fields = 0;
Christoph Hellwig93848a92013-04-03 16:11:17 +11003615 iip->ili_logged = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
Christoph Hellwig93848a92013-04-03 16:11:17 +11003617 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3618 &iip->ili_item.li_lsn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619
Christoph Hellwig93848a92013-04-03 16:11:17 +11003620 /*
3621 * Attach the function xfs_iflush_done to the inode's
3622 * buffer. This will remove the inode from the AIL
3623 * and unlock the inode's flush lock when the inode is
3624 * completely written to disk.
3625 */
3626 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627
Christoph Hellwig93848a92013-04-03 16:11:17 +11003628 /* generate the checksum. */
3629 xfs_dinode_calc_crc(mp, dip);
3630
3631 ASSERT(bp->b_fspriv != NULL);
3632 ASSERT(bp->b_iodone != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 return 0;
3634
3635corrupt_out:
Dave Chinner24513372014-06-25 14:58:08 +10003636 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637}