blob: 7fcf7569743f47a250f0bf2356f7c040b78cc59c [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2004-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Christoph Hellwig62e194e2009-01-19 02:03:03 +01006#include <linux/mount.h>
Darrick J. Wonge89c0412017-03-28 14:56:37 -07007#include <linux/fsmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include "xfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include "xfs_fs.h"
Darrick J. Wong5467b342019-06-28 19:25:35 -070010#include "xfs_shared.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110011#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110012#include "xfs_log_format.h"
13#include "xfs_trans_resv.h"
Michal Marekfaa63e92007-07-11 11:10:19 +100014#include "xfs_mount.h"
Michal Marekfaa63e92007-07-11 11:10:19 +100015#include "xfs_inode.h"
Darrick J. Wong2810bd62019-07-02 09:39:40 -070016#include "xfs_iwalk.h"
Michal Marekfaa63e92007-07-11 11:10:19 +100017#include "xfs_itable.h"
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060018#include "xfs_fsops.h"
sandeen@sandeen.net471d5912008-11-25 21:20:10 -060019#include "xfs_rtalloc.h"
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -060020#include "xfs_attr.h"
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060021#include "xfs_ioctl.h"
David Chinnera8272ce2007-11-23 16:28:09 +110022#include "xfs_ioctl32.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000023#include "xfs_trace.h"
Darrick J. Wongc368ebc2018-01-08 10:51:27 -080024#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Eric Sandeen526c4202005-09-05 08:25:06 +100026#define _NATIVE_IOC(cmd, type) \
27 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
28
sandeen@sandeen.netffae2632008-11-25 21:20:07 -060029#ifdef BROKEN_X86_ALIGNMENT
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060030STATIC int
31xfs_compat_flock64_copyin(
32 xfs_flock64_t *bf,
33 compat_xfs_flock64_t __user *arg32)
Eric Sandeen526c4202005-09-05 08:25:06 +100034{
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060035 if (get_user(bf->l_type, &arg32->l_type) ||
36 get_user(bf->l_whence, &arg32->l_whence) ||
37 get_user(bf->l_start, &arg32->l_start) ||
38 get_user(bf->l_len, &arg32->l_len) ||
39 get_user(bf->l_sysid, &arg32->l_sysid) ||
40 get_user(bf->l_pid, &arg32->l_pid) ||
41 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +100042 return -EFAULT;
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060043 return 0;
Eric Sandeen526c4202005-09-05 08:25:06 +100044}
45
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060046STATIC int
47xfs_compat_ioc_fsgeometry_v1(
48 struct xfs_mount *mp,
49 compat_xfs_fsop_geom_v1_t __user *arg32)
Michal Marek547e00c2007-07-11 11:09:57 +100050{
Dave Chinner1b6d9682019-04-12 07:41:16 -070051 struct xfs_fsop_geom fsgeo;
Michal Marek547e00c2007-07-11 11:09:57 +100052
Eric Sandeen91083262019-05-01 20:26:30 -070053 xfs_fs_geometry(&mp->m_sb, &fsgeo, 3);
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060054 /* The 32-bit variant simply has some padding at the end */
55 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +100056 return -EFAULT;
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060057 return 0;
Michal Marek547e00c2007-07-11 11:09:57 +100058}
59
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060060STATIC int
sandeen@sandeen.net471d5912008-11-25 21:20:10 -060061xfs_compat_growfs_data_copyin(
62 struct xfs_growfs_data *in,
63 compat_xfs_growfs_data_t __user *arg32)
64{
65 if (get_user(in->newblocks, &arg32->newblocks) ||
66 get_user(in->imaxpct, &arg32->imaxpct))
Eric Sandeenb474c7a2014-06-22 15:04:54 +100067 return -EFAULT;
sandeen@sandeen.net471d5912008-11-25 21:20:10 -060068 return 0;
69}
70
71STATIC int
72xfs_compat_growfs_rt_copyin(
73 struct xfs_growfs_rt *in,
74 compat_xfs_growfs_rt_t __user *arg32)
75{
76 if (get_user(in->newblocks, &arg32->newblocks) ||
77 get_user(in->extsize, &arg32->extsize))
Eric Sandeenb474c7a2014-06-22 15:04:54 +100078 return -EFAULT;
sandeen@sandeen.net471d5912008-11-25 21:20:10 -060079 return 0;
80}
81
82STATIC int
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -070083xfs_fsinumbers_fmt_compat(
Darrick J. Wong5f19c7f2019-07-03 20:36:27 -070084 struct xfs_ibulk *breq,
85 const struct xfs_inumbers *ig)
Michal Marekfaa63e92007-07-11 11:10:19 +100086{
Darrick J. Wong5f19c7f2019-07-03 20:36:27 -070087 struct compat_xfs_inogrp __user *p32 = breq->ubuffer;
88 struct xfs_inogrp ig1;
89 struct xfs_inogrp *igrp = &ig1;
90
91 xfs_inumbers_to_inogrp(&ig1, ig);
Michal Marekfaa63e92007-07-11 11:10:19 +100092
Darrick J. Wong677717f2019-07-02 09:39:43 -070093 if (put_user(igrp->xi_startino, &p32->xi_startino) ||
94 put_user(igrp->xi_alloccount, &p32->xi_alloccount) ||
95 put_user(igrp->xi_allocmask, &p32->xi_allocmask))
96 return -EFAULT;
97
98 return xfs_ibulk_advance(breq, sizeof(struct compat_xfs_inogrp));
Michal Marekfaa63e92007-07-11 11:10:19 +100099}
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#else
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700102#define xfs_fsinumbers_fmt_compat xfs_fsinumbers_fmt
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600103#endif /* BROKEN_X86_ALIGNMENT */
Michal Marekfaa63e92007-07-11 11:10:19 +1000104
sandeen@sandeen.nete94fc4a2008-11-25 21:20:09 -0600105STATIC int
106xfs_ioctl32_bstime_copyin(
107 xfs_bstime_t *bstime,
108 compat_xfs_bstime_t __user *bstime32)
109{
110 compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */
111
112 if (get_user(sec32, &bstime32->tv_sec) ||
113 get_user(bstime->tv_nsec, &bstime32->tv_nsec))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000114 return -EFAULT;
sandeen@sandeen.nete94fc4a2008-11-25 21:20:09 -0600115 bstime->tv_sec = sec32;
116 return 0;
117}
118
Darrick J. Wong6f71fb62019-07-03 20:36:25 -0700119/*
120 * struct xfs_bstat has differing alignment on intel, & bstime_t sizes
121 * everywhere
122 */
sandeen@sandeen.nete94fc4a2008-11-25 21:20:09 -0600123STATIC int
124xfs_ioctl32_bstat_copyin(
Darrick J. Wong6f71fb62019-07-03 20:36:25 -0700125 struct xfs_bstat *bstat,
126 struct compat_xfs_bstat __user *bstat32)
sandeen@sandeen.nete94fc4a2008-11-25 21:20:09 -0600127{
128 if (get_user(bstat->bs_ino, &bstat32->bs_ino) ||
129 get_user(bstat->bs_mode, &bstat32->bs_mode) ||
130 get_user(bstat->bs_nlink, &bstat32->bs_nlink) ||
131 get_user(bstat->bs_uid, &bstat32->bs_uid) ||
132 get_user(bstat->bs_gid, &bstat32->bs_gid) ||
133 get_user(bstat->bs_rdev, &bstat32->bs_rdev) ||
134 get_user(bstat->bs_blksize, &bstat32->bs_blksize) ||
135 get_user(bstat->bs_size, &bstat32->bs_size) ||
136 xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) ||
137 xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) ||
138 xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) ||
139 get_user(bstat->bs_blocks, &bstat32->bs_size) ||
140 get_user(bstat->bs_xflags, &bstat32->bs_size) ||
141 get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
142 get_user(bstat->bs_extents, &bstat32->bs_extents) ||
143 get_user(bstat->bs_gen, &bstat32->bs_gen) ||
Arkadiusz Mi?kiewicz67430992010-09-26 06:10:18 +0000144 get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) ||
145 get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) ||
Dave Chinnerb1d6cc02014-10-02 09:17:58 +1000146 get_user(bstat->bs_forkoff, &bstat32->bs_forkoff) ||
sandeen@sandeen.nete94fc4a2008-11-25 21:20:09 -0600147 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
148 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
149 get_user(bstat->bs_aextents, &bstat32->bs_aextents))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000150 return -EFAULT;
sandeen@sandeen.nete94fc4a2008-11-25 21:20:09 -0600151 return 0;
152}
153
Michal Marekfaa63e92007-07-11 11:10:19 +1000154/* XFS_IOC_FSBULKSTAT and friends */
155
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600156STATIC int
157xfs_bstime_store_compat(
158 compat_xfs_bstime_t __user *p32,
159 const xfs_bstime_t *p)
Michal Marekfaa63e92007-07-11 11:10:19 +1000160{
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600161 __s32 sec32;
Michal Marekfaa63e92007-07-11 11:10:19 +1000162
163 sec32 = p->tv_sec;
164 if (put_user(sec32, &p32->tv_sec) ||
165 put_user(p->tv_nsec, &p32->tv_nsec))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000166 return -EFAULT;
Michal Marekfaa63e92007-07-11 11:10:19 +1000167 return 0;
168}
169
sandeen@sandeen.net65fbaf22008-11-25 21:20:12 -0600170/* Return 0 on success or positive error (to xfs_bulkstat()) */
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600171STATIC int
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700172xfs_fsbulkstat_one_fmt_compat(
Darrick J. Wong7035f972019-07-03 20:36:26 -0700173 struct xfs_ibulk *breq,
174 const struct xfs_bulkstat *bstat)
Michal Marekfaa63e92007-07-11 11:10:19 +1000175{
Darrick J. Wong7035f972019-07-03 20:36:26 -0700176 struct compat_xfs_bstat __user *p32 = breq->ubuffer;
177 struct xfs_bstat bs1;
178 struct xfs_bstat *buffer = &bs1;
179
180 xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
sandeen@sandeen.net65fbaf22008-11-25 21:20:12 -0600181
182 if (put_user(buffer->bs_ino, &p32->bs_ino) ||
183 put_user(buffer->bs_mode, &p32->bs_mode) ||
184 put_user(buffer->bs_nlink, &p32->bs_nlink) ||
185 put_user(buffer->bs_uid, &p32->bs_uid) ||
186 put_user(buffer->bs_gid, &p32->bs_gid) ||
187 put_user(buffer->bs_rdev, &p32->bs_rdev) ||
188 put_user(buffer->bs_blksize, &p32->bs_blksize) ||
189 put_user(buffer->bs_size, &p32->bs_size) ||
Michal Marekfaa63e92007-07-11 11:10:19 +1000190 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
191 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
192 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
sandeen@sandeen.net65fbaf22008-11-25 21:20:12 -0600193 put_user(buffer->bs_blocks, &p32->bs_blocks) ||
194 put_user(buffer->bs_xflags, &p32->bs_xflags) ||
195 put_user(buffer->bs_extsize, &p32->bs_extsize) ||
196 put_user(buffer->bs_extents, &p32->bs_extents) ||
197 put_user(buffer->bs_gen, &p32->bs_gen) ||
198 put_user(buffer->bs_projid, &p32->bs_projid) ||
Arkadiusz Mi?kiewicz67430992010-09-26 06:10:18 +0000199 put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) ||
Dave Chinnerb1d6cc02014-10-02 09:17:58 +1000200 put_user(buffer->bs_forkoff, &p32->bs_forkoff) ||
sandeen@sandeen.net65fbaf22008-11-25 21:20:12 -0600201 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
202 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
Michal Marekfaa63e92007-07-11 11:10:19 +1000203 put_user(buffer->bs_aextents, &p32->bs_aextents))
Dave Chinner24513372014-06-25 14:58:08 +1000204 return -EFAULT;
Michal Marekfaa63e92007-07-11 11:10:19 +1000205
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700206 return xfs_ibulk_advance(breq, sizeof(struct compat_xfs_bstat));
sandeen@sandeen.net2ee4fa52008-11-25 21:20:11 -0600207}
208
Michal Marekfaa63e92007-07-11 11:10:19 +1000209/* copied from xfs_ioctl.c */
210STATIC int
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700211xfs_compat_ioc_fsbulkstat(
sandeen@sandeen.net2ee4fa52008-11-25 21:20:11 -0600212 xfs_mount_t *mp,
213 unsigned int cmd,
Darrick J. Wong6f71fb62019-07-03 20:36:25 -0700214 struct compat_xfs_fsop_bulkreq __user *p32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 u32 addr;
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700217 struct xfs_fsop_bulkreq bulkreq;
218 struct xfs_ibulk breq = {
219 .mp = mp,
220 .ocount = 0,
221 };
222 xfs_ino_t lastino;
Michal Marekfaa63e92007-07-11 11:10:19 +1000223 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Nick Bowler7ca860e2018-12-17 09:35:27 -0800225 /*
226 * Output structure handling functions. Depending on the command,
227 * either the xfs_bstat and xfs_inogrp structures are written out
228 * to userpace memory via bulkreq.ubuffer. Normally the compat
229 * functions and structure size are the correct ones to use ...
230 */
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700231 inumbers_fmt_pf inumbers_func = xfs_fsinumbers_fmt_compat;
232 bulkstat_one_fmt_pf bs_one_func = xfs_fsbulkstat_one_fmt_compat;
Nick Bowler7ca860e2018-12-17 09:35:27 -0800233
234#ifdef CONFIG_X86_X32
235 if (in_x32_syscall()) {
236 /*
237 * ... but on x32 the input xfs_fsop_bulkreq has pointers
238 * which must be handled in the "compat" (32-bit) way, while
239 * the xfs_bstat and xfs_inogrp structures follow native 64-
240 * bit layout convention. So adjust accordingly, otherwise
241 * the data written out in compat layout will not match what
242 * x32 userspace expects.
243 */
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700244 inumbers_func = xfs_fsinumbers_fmt;
245 bs_one_func = xfs_fsbulkstat_one_fmt;
Nick Bowler7ca860e2018-12-17 09:35:27 -0800246 }
247#endif
248
Michal Marekfaa63e92007-07-11 11:10:19 +1000249 /* done = 1 if there are more stats to get and if bulkstat */
250 /* should be called again (unused here, but used in dmapi) */
251
252 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000253 return -EPERM;
Michal Marekfaa63e92007-07-11 11:10:19 +1000254
255 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000256 return -EIO;
Michal Marekfaa63e92007-07-11 11:10:19 +1000257
258 if (get_user(addr, &p32->lastip))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000259 return -EFAULT;
Michal Marekfaa63e92007-07-11 11:10:19 +1000260 bulkreq.lastip = compat_ptr(addr);
261 if (get_user(bulkreq.icount, &p32->icount) ||
262 get_user(addr, &p32->ubuffer))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000263 return -EFAULT;
Michal Marekfaa63e92007-07-11 11:10:19 +1000264 bulkreq.ubuffer = compat_ptr(addr);
265 if (get_user(addr, &p32->ocount))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000266 return -EFAULT;
Michal Marekfaa63e92007-07-11 11:10:19 +1000267 bulkreq.ocount = compat_ptr(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700269 if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000270 return -EFAULT;
Michal Marekfaa63e92007-07-11 11:10:19 +1000271
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700272 if (bulkreq.icount <= 0)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000273 return -EINVAL;
Michal Marekfaa63e92007-07-11 11:10:19 +1000274
Lachlan McIlroycd57e592007-11-23 16:30:32 +1100275 if (bulkreq.ubuffer == NULL)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000276 return -EINVAL;
Lachlan McIlroycd57e592007-11-23 16:30:32 +1100277
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700278 breq.ubuffer = bulkreq.ubuffer;
279 breq.icount = bulkreq.icount;
sandeen@sandeen.netaf819d22008-11-25 21:20:13 -0600280
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700281 /*
282 * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
283 * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect
284 * that *lastip contains either zero or the number of the last inode to
285 * be examined by the previous call and return results starting with
286 * the next inode after that. The new bulk request back end functions
287 * take the inode to start with, so we have to compute the startino
288 * parameter from lastino to maintain correct function. lastino == 0
289 * is a special case because it has traditionally meant "first inode
290 * in filesystem".
291 */
292 if (cmd == XFS_IOC_FSINUMBERS_32) {
Darrick J. Wong677717f2019-07-02 09:39:43 -0700293 breq.startino = lastino ? lastino + 1 : 0;
294 error = xfs_inumbers(&breq, inumbers_func);
295 lastino = breq.startino - 1;
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700296 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
297 breq.startino = lastino;
298 breq.icount = 1;
299 error = xfs_bulkstat_one(&breq, bs_one_func);
300 lastino = breq.startino;
sandeen@sandeen.netaf819d22008-11-25 21:20:13 -0600301 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700302 breq.startino = lastino ? lastino + 1 : 0;
303 error = xfs_bulkstat(&breq, bs_one_func);
304 lastino = breq.startino - 1;
305 } else {
Dave Chinner24513372014-06-25 14:58:08 +1000306 error = -EINVAL;
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700307 }
Michal Marekfaa63e92007-07-11 11:10:19 +1000308 if (error)
Dave Chinner24513372014-06-25 14:58:08 +1000309 return error;
Michal Marekfaa63e92007-07-11 11:10:19 +1000310
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700311 if (bulkreq.lastip != NULL &&
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700312 copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700313 return -EFAULT;
Michal Marekfaa63e92007-07-11 11:10:19 +1000314
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700315 if (bulkreq.ocount != NULL &&
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700316 copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700317 return -EFAULT;
Michal Marekfaa63e92007-07-11 11:10:19 +1000318
319 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
Michal Marekfaa63e92007-07-11 11:10:19 +1000321
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600322STATIC int
323xfs_compat_handlereq_copyin(
324 xfs_fsop_handlereq_t *hreq,
325 compat_xfs_fsop_handlereq_t __user *arg32)
Michal Marek1fa503d2007-07-11 11:10:09 +1000326{
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600327 compat_xfs_fsop_handlereq_t hreq32;
Michal Marek1fa503d2007-07-11 11:10:09 +1000328
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600329 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000330 return -EFAULT;
Michal Marek1fa503d2007-07-11 11:10:09 +1000331
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600332 hreq->fd = hreq32.fd;
333 hreq->path = compat_ptr(hreq32.path);
334 hreq->oflags = hreq32.oflags;
335 hreq->ihandle = compat_ptr(hreq32.ihandle);
336 hreq->ihandlen = hreq32.ihandlen;
337 hreq->ohandle = compat_ptr(hreq32.ohandle);
338 hreq->ohandlen = compat_ptr(hreq32.ohandlen);
339
340 return 0;
Michal Marek1fa503d2007-07-11 11:10:09 +1000341}
342
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100343STATIC struct dentry *
344xfs_compat_handlereq_to_dentry(
345 struct file *parfilp,
346 compat_xfs_fsop_handlereq_t *hreq)
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600347{
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100348 return xfs_handle_to_dentry(parfilp,
349 compat_ptr(hreq->ihandle), hreq->ihandlen);
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600350}
351
352STATIC int
353xfs_compat_attrlist_by_handle(
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100354 struct file *parfilp,
355 void __user *arg)
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600356{
357 int error;
358 attrlist_cursor_kern_t *cursor;
Nick Bowlerc456d642018-12-17 09:35:27 -0800359 compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600360 compat_xfs_fsop_attrlist_handlereq_t al_hreq;
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100361 struct dentry *dentry;
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600362 char *kbuf;
363
364 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000365 return -EPERM;
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600366 if (copy_from_user(&al_hreq, arg,
367 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000368 return -EFAULT;
Dan Carpenter31978b52013-10-31 21:00:10 +0300369 if (al_hreq.buflen < sizeof(struct attrlist) ||
Jan Tulak4e247612015-10-12 16:02:56 +1100370 al_hreq.buflen > XFS_XATTR_LIST_MAX)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000371 return -EINVAL;
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600372
373 /*
374 * Reject flags, only allow namespaces.
375 */
376 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000377 return -EINVAL;
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600378
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100379 dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
380 if (IS_ERR(dentry))
381 return PTR_ERR(dentry);
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600382
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100383 error = -ENOMEM;
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000384 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP);
385 if (!kbuf)
386 goto out_dput;
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600387
388 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
David Howells2b0143b2015-03-17 22:25:59 +0000389 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600390 al_hreq.flags, cursor);
391 if (error)
392 goto out_kfree;
393
Nick Bowlerc456d642018-12-17 09:35:27 -0800394 if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
395 error = -EFAULT;
396 goto out_kfree;
397 }
398
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600399 if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
400 error = -EFAULT;
401
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000402out_kfree:
403 kmem_free(kbuf);
404out_dput:
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100405 dput(dentry);
406 return error;
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600407}
408
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600409STATIC int
410xfs_compat_attrmulti_by_handle(
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100411 struct file *parfilp,
412 void __user *arg)
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600413{
414 int error;
415 compat_xfs_attr_multiop_t *ops;
416 compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100417 struct dentry *dentry;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600418 unsigned int i, size;
Dave Chinnera9273ca2010-01-20 10:47:48 +1100419 unsigned char *attr_name;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600420
421 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000422 return -EPERM;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600423 if (copy_from_user(&am_hreq, arg,
424 sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000425 return -EFAULT;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600426
Zhitong Wangfda168c2010-03-23 09:51:22 +1100427 /* overflow check */
428 if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t))
429 return -E2BIG;
430
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100431 dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
432 if (IS_ERR(dentry))
433 return PTR_ERR(dentry);
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600434
Dave Chinner24513372014-06-25 14:58:08 +1000435 error = -E2BIG;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600436 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
437 if (!size || size > 16 * PAGE_SIZE)
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100438 goto out_dput;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600439
Li Zefan0e639bd2009-04-08 15:08:04 +0800440 ops = memdup_user(compat_ptr(am_hreq.ops), size);
441 if (IS_ERR(ops)) {
Brian Foster4d949022015-01-22 10:04:24 +1100442 error = PTR_ERR(ops);
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100443 goto out_dput;
Li Zefan0e639bd2009-04-08 15:08:04 +0800444 }
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600445
Dave Chinner24513372014-06-25 14:58:08 +1000446 error = -ENOMEM;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600447 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
448 if (!attr_name)
449 goto out_kfree_ops;
450
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600451 error = 0;
452 for (i = 0; i < am_hreq.opcount; i++) {
Dave Chinnera9273ca2010-01-20 10:47:48 +1100453 ops[i].am_error = strncpy_from_user((char *)attr_name,
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600454 compat_ptr(ops[i].am_attrname),
455 MAXNAMELEN);
456 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
Dave Chinner24513372014-06-25 14:58:08 +1000457 error = -ERANGE;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600458 if (ops[i].am_error < 0)
459 break;
460
461 switch (ops[i].am_opcode) {
462 case ATTR_OP_GET:
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100463 ops[i].am_error = xfs_attrmulti_attr_get(
David Howells2b0143b2015-03-17 22:25:59 +0000464 d_inode(dentry), attr_name,
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600465 compat_ptr(ops[i].am_attrvalue),
466 &ops[i].am_length, ops[i].am_flags);
467 break;
468 case ATTR_OP_SET:
Al Viroa561be72011-11-23 11:57:51 -0500469 ops[i].am_error = mnt_want_write_file(parfilp);
Christoph Hellwig62e194e2009-01-19 02:03:03 +0100470 if (ops[i].am_error)
471 break;
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100472 ops[i].am_error = xfs_attrmulti_attr_set(
David Howells2b0143b2015-03-17 22:25:59 +0000473 d_inode(dentry), attr_name,
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600474 compat_ptr(ops[i].am_attrvalue),
475 ops[i].am_length, ops[i].am_flags);
Al Viro2a79f172011-12-09 08:06:57 -0500476 mnt_drop_write_file(parfilp);
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600477 break;
478 case ATTR_OP_REMOVE:
Al Viroa561be72011-11-23 11:57:51 -0500479 ops[i].am_error = mnt_want_write_file(parfilp);
Christoph Hellwig62e194e2009-01-19 02:03:03 +0100480 if (ops[i].am_error)
481 break;
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100482 ops[i].am_error = xfs_attrmulti_attr_remove(
David Howells2b0143b2015-03-17 22:25:59 +0000483 d_inode(dentry), attr_name,
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100484 ops[i].am_flags);
Al Viro2a79f172011-12-09 08:06:57 -0500485 mnt_drop_write_file(parfilp);
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600486 break;
487 default:
Dave Chinner24513372014-06-25 14:58:08 +1000488 ops[i].am_error = -EINVAL;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600489 }
490 }
491
492 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
Dave Chinner24513372014-06-25 14:58:08 +1000493 error = -EFAULT;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600494
495 kfree(attr_name);
496 out_kfree_ops:
497 kfree(ops);
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100498 out_dput:
499 dput(dentry);
Dave Chinner24513372014-06-25 14:58:08 +1000500 return error;
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600501}
502
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600503STATIC int
504xfs_compat_fssetdm_by_handle(
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100505 struct file *parfilp,
506 void __user *arg)
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600507{
508 int error;
509 struct fsdmidata fsd;
510 compat_xfs_fsop_setdm_handlereq_t dmhreq;
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100511 struct dentry *dentry;
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600512
513 if (!capable(CAP_MKNOD))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000514 return -EPERM;
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600515 if (copy_from_user(&dmhreq, arg,
516 sizeof(compat_xfs_fsop_setdm_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000517 return -EFAULT;
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600518
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100519 dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
520 if (IS_ERR(dentry))
521 return PTR_ERR(dentry);
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600522
David Howells2b0143b2015-03-17 22:25:59 +0000523 if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000524 error = -EPERM;
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600525 goto out;
526 }
527
528 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000529 error = -EFAULT;
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600530 goto out;
531 }
532
David Howells2b0143b2015-03-17 22:25:59 +0000533 error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600534 fsd.fsd_dmstate);
535
536out:
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100537 dput(dentry);
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600538 return error;
539}
540
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500541long
542xfs_file_compat_ioctl(
543 struct file *filp,
544 unsigned cmd,
545 unsigned long p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546{
Al Viro496ad9a2013-01-23 17:07:38 -0500547 struct inode *inode = file_inode(filp);
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500548 struct xfs_inode *ip = XFS_I(inode);
549 struct xfs_mount *mp = ip->i_mount;
550 void __user *arg = (void __user *)p;
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500551 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000553 trace_xfs_file_compat_ioctl(ip);
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 switch (cmd) {
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600556 /* No size or alignment issues on any arch */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 case XFS_IOC_DIOINFO:
Dave Chinner1b6d9682019-04-12 07:41:16 -0700558 case XFS_IOC_FSGEOMETRY_V4:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 case XFS_IOC_FSGEOMETRY:
Darrick J. Wong7cd50062019-04-12 07:41:17 -0700560 case XFS_IOC_AG_GEOMETRY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 case XFS_IOC_FSGETXATTR:
562 case XFS_IOC_FSSETXATTR:
563 case XFS_IOC_FSGETXATTRA:
564 case XFS_IOC_FSSETDM:
565 case XFS_IOC_GETBMAP:
566 case XFS_IOC_GETBMAPA:
567 case XFS_IOC_GETBMAPX:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 case XFS_IOC_FSCOUNTS:
569 case XFS_IOC_SET_RESBLKS:
570 case XFS_IOC_GET_RESBLKS:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 case XFS_IOC_FSGROWFSLOG:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 case XFS_IOC_GOINGDOWN:
573 case XFS_IOC_ERROR_INJECTION:
574 case XFS_IOC_ERROR_CLEARALL:
Darrick J. Wonge89c0412017-03-28 14:56:37 -0700575 case FS_IOC_GETFSMAP:
Darrick J. Wong36fd6e82017-10-17 21:37:34 -0700576 case XFS_IOC_SCRUB_METADATA:
Darrick J. Wong0448b6f2019-07-03 20:36:27 -0700577 case XFS_IOC_BULKSTAT:
Darrick J. Wongfba97602019-07-03 20:36:28 -0700578 case XFS_IOC_INUMBERS:
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500579 return xfs_file_ioctl(filp, cmd, p);
Nick Bowlera9d25bd2018-12-17 09:35:27 -0800580#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
581 /*
582 * These are handled fine if no alignment issues. To support x32
583 * which uses native 64-bit alignment we must emit these cases in
584 * addition to the ia-32 compat set below.
585 */
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600586 case XFS_IOC_ALLOCSP:
587 case XFS_IOC_FREESP:
588 case XFS_IOC_RESVSP:
589 case XFS_IOC_UNRESVSP:
590 case XFS_IOC_ALLOCSP64:
591 case XFS_IOC_FREESP64:
592 case XFS_IOC_RESVSP64:
593 case XFS_IOC_UNRESVSP64:
594 case XFS_IOC_FSGEOMETRY_V1:
595 case XFS_IOC_FSGROWFSDATA:
596 case XFS_IOC_FSGROWFSRT:
Dave Chinner44722352010-08-24 12:02:11 +1000597 case XFS_IOC_ZERO_RANGE:
Nick Bowlera9d25bd2018-12-17 09:35:27 -0800598#ifdef CONFIG_X86_X32
599 /*
600 * x32 special: this gets a different cmd number from the ia-32 compat
601 * case below; the associated data will match native 64-bit alignment.
602 */
603 case XFS_IOC_SWAPEXT:
604#endif
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500605 return xfs_file_ioctl(filp, cmd, p);
Nick Bowlera9d25bd2018-12-17 09:35:27 -0800606#endif
607#if defined(BROKEN_X86_ALIGNMENT)
Eric Sandeen526c4202005-09-05 08:25:06 +1000608 case XFS_IOC_ALLOCSP_32:
609 case XFS_IOC_FREESP_32:
610 case XFS_IOC_ALLOCSP64_32:
611 case XFS_IOC_FREESP64_32:
612 case XFS_IOC_RESVSP_32:
613 case XFS_IOC_UNRESVSP_32:
614 case XFS_IOC_RESVSP64_32:
Christoph Hellwig8c1fdd02011-04-21 13:21:03 +0000615 case XFS_IOC_UNRESVSP64_32:
616 case XFS_IOC_ZERO_RANGE_32: {
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600617 struct xfs_flock64 bf;
618
619 if (xfs_compat_flock64_copyin(&bf, arg))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000620 return -EFAULT;
Eric Sandeen526c4202005-09-05 08:25:06 +1000621 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
Christoph Hellwig8f3e2052016-07-20 11:29:35 +1000622 return xfs_ioc_space(filp, cmd, &bf);
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600623 }
Michal Marek547e00c2007-07-11 11:09:57 +1000624 case XFS_IOC_FSGEOMETRY_V1_32:
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600625 return xfs_compat_ioc_fsgeometry_v1(mp, arg);
sandeen@sandeen.net471d5912008-11-25 21:20:10 -0600626 case XFS_IOC_FSGROWFSDATA_32: {
627 struct xfs_growfs_data in;
628
629 if (xfs_compat_growfs_data_copyin(&in, arg))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000630 return -EFAULT;
Jan Karad9457dc2012-06-12 16:20:39 +0200631 error = mnt_want_write_file(filp);
632 if (error)
633 return error;
sandeen@sandeen.net471d5912008-11-25 21:20:10 -0600634 error = xfs_growfs_data(mp, &in);
Jan Karad9457dc2012-06-12 16:20:39 +0200635 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +1000636 return error;
sandeen@sandeen.net471d5912008-11-25 21:20:10 -0600637 }
638 case XFS_IOC_FSGROWFSRT_32: {
639 struct xfs_growfs_rt in;
640
641 if (xfs_compat_growfs_rt_copyin(&in, arg))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000642 return -EFAULT;
Jan Karad9457dc2012-06-12 16:20:39 +0200643 error = mnt_want_write_file(filp);
644 if (error)
645 return error;
sandeen@sandeen.net471d5912008-11-25 21:20:10 -0600646 error = xfs_growfs_rt(mp, &in);
Jan Karad9457dc2012-06-12 16:20:39 +0200647 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +1000648 return error;
sandeen@sandeen.net471d5912008-11-25 21:20:10 -0600649 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650#endif
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600651 /* long changes size, but xfs only copiese out 32 bits */
652 case XFS_IOC_GETXFLAGS_32:
653 case XFS_IOC_SETXFLAGS_32:
654 case XFS_IOC_GETVERSION_32:
655 cmd = _NATIVE_IOC(cmd, long);
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500656 return xfs_file_ioctl(filp, cmd, p);
Christoph Hellwig37258672009-09-01 14:03:08 -0400657 case XFS_IOC_SWAPEXT_32: {
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600658 struct xfs_swapext sxp;
659 struct compat_xfs_swapext __user *sxu = arg;
660
661 /* Bulk copy in up to the sx_stat field, then copy bstat */
662 if (copy_from_user(&sxp, sxu,
663 offsetof(struct xfs_swapext, sx_stat)) ||
664 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000665 return -EFAULT;
Jan Karad9457dc2012-06-12 16:20:39 +0200666 error = mnt_want_write_file(filp);
667 if (error)
668 return error;
Dave Chinnera133d952013-08-12 20:49:48 +1000669 error = xfs_ioc_swapext(&sxp);
Jan Karad9457dc2012-06-12 16:20:39 +0200670 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +1000671 return error;
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600672 }
Michal Marekfaa63e92007-07-11 11:10:19 +1000673 case XFS_IOC_FSBULKSTAT_32:
674 case XFS_IOC_FSBULKSTAT_SINGLE_32:
675 case XFS_IOC_FSINUMBERS_32:
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700676 return xfs_compat_ioc_fsbulkstat(mp, cmd, arg);
Michal Marek1fa503d2007-07-11 11:10:09 +1000677 case XFS_IOC_FD_TO_HANDLE_32:
678 case XFS_IOC_PATH_TO_HANDLE_32:
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600679 case XFS_IOC_PATH_TO_FSHANDLE_32: {
680 struct xfs_fsop_handlereq hreq;
681
682 if (xfs_compat_handlereq_copyin(&hreq, arg))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000683 return -EFAULT;
Michal Marek1fa503d2007-07-11 11:10:09 +1000684 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600685 return xfs_find_handle(cmd, &hreq);
686 }
687 case XFS_IOC_OPEN_BY_HANDLE_32: {
688 struct xfs_fsop_handlereq hreq;
689
690 if (xfs_compat_handlereq_copyin(&hreq, arg))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000691 return -EFAULT;
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100692 return xfs_open_by_handle(filp, &hreq);
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600693 }
694 case XFS_IOC_READLINK_BY_HANDLE_32: {
695 struct xfs_fsop_handlereq hreq;
696
697 if (xfs_compat_handlereq_copyin(&hreq, arg))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000698 return -EFAULT;
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100699 return xfs_readlink_by_handle(filp, &hreq);
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600700 }
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600701 case XFS_IOC_ATTRLIST_BY_HANDLE_32:
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100702 return xfs_compat_attrlist_by_handle(filp, arg);
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600703 case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100704 return xfs_compat_attrmulti_by_handle(filp, arg);
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600705 case XFS_IOC_FSSETDM_BY_HANDLE_32:
Christoph Hellwigab596ad2009-01-19 02:02:57 +0100706 return xfs_compat_fssetdm_by_handle(filp, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 default:
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000708 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710}