blob: 0504cece9f6668768fb4c91b019d6f55438f6ba3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2004-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/ioctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "xfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_fs.h"
Michal Marekfaa63e92007-07-11 11:10:19 +100023#include "xfs_bit.h"
24#include "xfs_log.h"
25#include "xfs_inum.h"
26#include "xfs_trans.h"
27#include "xfs_sb.h"
28#include "xfs_ag.h"
29#include "xfs_dir2.h"
30#include "xfs_dmapi.h"
31#include "xfs_mount.h"
32#include "xfs_bmap_btree.h"
33#include "xfs_attr_sf.h"
34#include "xfs_dir2_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include "xfs_vnode.h"
Michal Marekfaa63e92007-07-11 11:10:19 +100036#include "xfs_dinode.h"
37#include "xfs_inode.h"
38#include "xfs_itable.h"
39#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include "xfs_dfrag.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100041#include "xfs_vnodeops.h"
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060042#include "xfs_fsops.h"
sandeen@sandeen.net471d5912008-11-25 21:20:10 -060043#include "xfs_alloc.h"
44#include "xfs_rtalloc.h"
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -060045#include "xfs_attr.h"
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060046#include "xfs_ioctl.h"
David Chinnera8272ce2007-11-23 16:28:09 +110047#include "xfs_ioctl32.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Eric Sandeen526c4202005-09-05 08:25:06 +100049#define _NATIVE_IOC(cmd, type) \
50 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
51
sandeen@sandeen.netffae2632008-11-25 21:20:07 -060052#ifdef BROKEN_X86_ALIGNMENT
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060053STATIC int
54xfs_compat_flock64_copyin(
55 xfs_flock64_t *bf,
56 compat_xfs_flock64_t __user *arg32)
Eric Sandeen526c4202005-09-05 08:25:06 +100057{
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060058 if (get_user(bf->l_type, &arg32->l_type) ||
59 get_user(bf->l_whence, &arg32->l_whence) ||
60 get_user(bf->l_start, &arg32->l_start) ||
61 get_user(bf->l_len, &arg32->l_len) ||
62 get_user(bf->l_sysid, &arg32->l_sysid) ||
63 get_user(bf->l_pid, &arg32->l_pid) ||
64 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
65 return -XFS_ERROR(EFAULT);
66 return 0;
Eric Sandeen526c4202005-09-05 08:25:06 +100067}
68
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060069STATIC int
70xfs_compat_ioc_fsgeometry_v1(
71 struct xfs_mount *mp,
72 compat_xfs_fsop_geom_v1_t __user *arg32)
Michal Marek547e00c2007-07-11 11:09:57 +100073{
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060074 xfs_fsop_geom_t fsgeo;
75 int error;
Michal Marek547e00c2007-07-11 11:09:57 +100076
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060077 error = xfs_fs_geometry(mp, &fsgeo, 3);
78 if (error)
79 return -error;
80 /* The 32-bit variant simply has some padding at the end */
81 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1)))
82 return -XFS_ERROR(EFAULT);
83 return 0;
Michal Marek547e00c2007-07-11 11:09:57 +100084}
85
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060086STATIC int
sandeen@sandeen.net471d5912008-11-25 21:20:10 -060087xfs_compat_growfs_data_copyin(
88 struct xfs_growfs_data *in,
89 compat_xfs_growfs_data_t __user *arg32)
90{
91 if (get_user(in->newblocks, &arg32->newblocks) ||
92 get_user(in->imaxpct, &arg32->imaxpct))
93 return -XFS_ERROR(EFAULT);
94 return 0;
95}
96
97STATIC int
98xfs_compat_growfs_rt_copyin(
99 struct xfs_growfs_rt *in,
100 compat_xfs_growfs_rt_t __user *arg32)
101{
102 if (get_user(in->newblocks, &arg32->newblocks) ||
103 get_user(in->extsize, &arg32->extsize))
104 return -XFS_ERROR(EFAULT);
105 return 0;
106}
107
108STATIC int
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600109xfs_inumbers_fmt_compat(
110 void __user *ubuffer,
111 const xfs_inogrp_t *buffer,
112 long count,
113 long *written)
Michal Marekfaa63e92007-07-11 11:10:19 +1000114{
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600115 compat_xfs_inogrp_t __user *p32 = ubuffer;
116 long i;
Michal Marekfaa63e92007-07-11 11:10:19 +1000117
118 for (i = 0; i < count; i++) {
119 if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) ||
120 put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) ||
121 put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask))
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600122 return -XFS_ERROR(EFAULT);
Michal Marekfaa63e92007-07-11 11:10:19 +1000123 }
124 *written = count * sizeof(*p32);
125 return 0;
126}
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#else
Michal Marekfaa63e92007-07-11 11:10:19 +1000129#define xfs_inumbers_fmt_compat xfs_inumbers_fmt
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600130#endif /* BROKEN_X86_ALIGNMENT */
Michal Marekfaa63e92007-07-11 11:10:19 +1000131
sandeen@sandeen.nete94fc4a2008-11-25 21:20:09 -0600132STATIC int
133xfs_ioctl32_bstime_copyin(
134 xfs_bstime_t *bstime,
135 compat_xfs_bstime_t __user *bstime32)
136{
137 compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */
138
139 if (get_user(sec32, &bstime32->tv_sec) ||
140 get_user(bstime->tv_nsec, &bstime32->tv_nsec))
141 return -XFS_ERROR(EFAULT);
142 bstime->tv_sec = sec32;
143 return 0;
144}
145
146/* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */
147STATIC int
148xfs_ioctl32_bstat_copyin(
149 xfs_bstat_t *bstat,
150 compat_xfs_bstat_t __user *bstat32)
151{
152 if (get_user(bstat->bs_ino, &bstat32->bs_ino) ||
153 get_user(bstat->bs_mode, &bstat32->bs_mode) ||
154 get_user(bstat->bs_nlink, &bstat32->bs_nlink) ||
155 get_user(bstat->bs_uid, &bstat32->bs_uid) ||
156 get_user(bstat->bs_gid, &bstat32->bs_gid) ||
157 get_user(bstat->bs_rdev, &bstat32->bs_rdev) ||
158 get_user(bstat->bs_blksize, &bstat32->bs_blksize) ||
159 get_user(bstat->bs_size, &bstat32->bs_size) ||
160 xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) ||
161 xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) ||
162 xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) ||
163 get_user(bstat->bs_blocks, &bstat32->bs_size) ||
164 get_user(bstat->bs_xflags, &bstat32->bs_size) ||
165 get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
166 get_user(bstat->bs_extents, &bstat32->bs_extents) ||
167 get_user(bstat->bs_gen, &bstat32->bs_gen) ||
168 get_user(bstat->bs_projid, &bstat32->bs_projid) ||
169 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
170 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
171 get_user(bstat->bs_aextents, &bstat32->bs_aextents))
172 return -XFS_ERROR(EFAULT);
173 return 0;
174}
175
Michal Marekfaa63e92007-07-11 11:10:19 +1000176/* XFS_IOC_FSBULKSTAT and friends */
177
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600178STATIC int
179xfs_bstime_store_compat(
180 compat_xfs_bstime_t __user *p32,
181 const xfs_bstime_t *p)
Michal Marekfaa63e92007-07-11 11:10:19 +1000182{
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600183 __s32 sec32;
Michal Marekfaa63e92007-07-11 11:10:19 +1000184
185 sec32 = p->tv_sec;
186 if (put_user(sec32, &p32->tv_sec) ||
187 put_user(p->tv_nsec, &p32->tv_nsec))
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600188 return -XFS_ERROR(EFAULT);
Michal Marekfaa63e92007-07-11 11:10:19 +1000189 return 0;
190}
191
sandeen@sandeen.net65fbaf22008-11-25 21:20:12 -0600192/* Return 0 on success or positive error (to xfs_bulkstat()) */
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600193STATIC int
194xfs_bulkstat_one_fmt_compat(
Michal Marekfaa63e92007-07-11 11:10:19 +1000195 void __user *ubuffer,
sandeen@sandeen.net65fbaf22008-11-25 21:20:12 -0600196 int ubsize,
197 int *ubused,
Michal Marekfaa63e92007-07-11 11:10:19 +1000198 const xfs_bstat_t *buffer)
199{
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600200 compat_xfs_bstat_t __user *p32 = ubuffer;
Michal Marekfaa63e92007-07-11 11:10:19 +1000201
sandeen@sandeen.net65fbaf22008-11-25 21:20:12 -0600202 if (ubsize < sizeof(*p32))
203 return XFS_ERROR(ENOMEM);
204
205 if (put_user(buffer->bs_ino, &p32->bs_ino) ||
206 put_user(buffer->bs_mode, &p32->bs_mode) ||
207 put_user(buffer->bs_nlink, &p32->bs_nlink) ||
208 put_user(buffer->bs_uid, &p32->bs_uid) ||
209 put_user(buffer->bs_gid, &p32->bs_gid) ||
210 put_user(buffer->bs_rdev, &p32->bs_rdev) ||
211 put_user(buffer->bs_blksize, &p32->bs_blksize) ||
212 put_user(buffer->bs_size, &p32->bs_size) ||
Michal Marekfaa63e92007-07-11 11:10:19 +1000213 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
214 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
215 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
sandeen@sandeen.net65fbaf22008-11-25 21:20:12 -0600216 put_user(buffer->bs_blocks, &p32->bs_blocks) ||
217 put_user(buffer->bs_xflags, &p32->bs_xflags) ||
218 put_user(buffer->bs_extsize, &p32->bs_extsize) ||
219 put_user(buffer->bs_extents, &p32->bs_extents) ||
220 put_user(buffer->bs_gen, &p32->bs_gen) ||
221 put_user(buffer->bs_projid, &p32->bs_projid) ||
222 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
223 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
Michal Marekfaa63e92007-07-11 11:10:19 +1000224 put_user(buffer->bs_aextents, &p32->bs_aextents))
sandeen@sandeen.net65fbaf22008-11-25 21:20:12 -0600225 return XFS_ERROR(EFAULT);
226 if (ubused)
227 *ubused = sizeof(*p32);
228 return 0;
Michal Marekfaa63e92007-07-11 11:10:19 +1000229}
230
sandeen@sandeen.net2ee4fa52008-11-25 21:20:11 -0600231STATIC int
232xfs_bulkstat_one_compat(
233 xfs_mount_t *mp, /* mount point for filesystem */
234 xfs_ino_t ino, /* inode number to get data for */
235 void __user *buffer, /* buffer to place output in */
236 int ubsize, /* size of buffer */
237 void *private_data, /* my private data */
238 xfs_daddr_t bno, /* starting bno of inode cluster */
239 int *ubused, /* bytes used by me */
240 void *dibuff, /* on-disk inode buffer */
241 int *stat) /* BULKSTAT_RV_... */
242{
243 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
244 xfs_bulkstat_one_fmt_compat, bno,
245 ubused, dibuff, stat);
246}
247
Michal Marekfaa63e92007-07-11 11:10:19 +1000248/* copied from xfs_ioctl.c */
249STATIC int
sandeen@sandeen.net2ee4fa52008-11-25 21:20:11 -0600250xfs_compat_ioc_bulkstat(
251 xfs_mount_t *mp,
252 unsigned int cmd,
253 compat_xfs_fsop_bulkreq_t __user *p32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 u32 addr;
Michal Marekfaa63e92007-07-11 11:10:19 +1000256 xfs_fsop_bulkreq_t bulkreq;
257 int count; /* # of records returned */
258 xfs_ino_t inlast; /* last inode number */
259 int done;
260 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Michal Marekfaa63e92007-07-11 11:10:19 +1000262 /* done = 1 if there are more stats to get and if bulkstat */
263 /* should be called again (unused here, but used in dmapi) */
264
265 if (!capable(CAP_SYS_ADMIN))
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600266 return -XFS_ERROR(EPERM);
Michal Marekfaa63e92007-07-11 11:10:19 +1000267
268 if (XFS_FORCED_SHUTDOWN(mp))
269 return -XFS_ERROR(EIO);
270
271 if (get_user(addr, &p32->lastip))
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600272 return -XFS_ERROR(EFAULT);
Michal Marekfaa63e92007-07-11 11:10:19 +1000273 bulkreq.lastip = compat_ptr(addr);
274 if (get_user(bulkreq.icount, &p32->icount) ||
275 get_user(addr, &p32->ubuffer))
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600276 return -XFS_ERROR(EFAULT);
Michal Marekfaa63e92007-07-11 11:10:19 +1000277 bulkreq.ubuffer = compat_ptr(addr);
278 if (get_user(addr, &p32->ocount))
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600279 return -XFS_ERROR(EFAULT);
Michal Marekfaa63e92007-07-11 11:10:19 +1000280 bulkreq.ocount = compat_ptr(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
Michal Marekfaa63e92007-07-11 11:10:19 +1000282 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
283 return -XFS_ERROR(EFAULT);
284
285 if ((count = bulkreq.icount) <= 0)
286 return -XFS_ERROR(EINVAL);
287
Lachlan McIlroycd57e592007-11-23 16:30:32 +1100288 if (bulkreq.ubuffer == NULL)
289 return -XFS_ERROR(EINVAL);
290
sandeen@sandeen.netaf819d22008-11-25 21:20:13 -0600291 if (cmd == XFS_IOC_FSINUMBERS_32) {
Michal Marekfaa63e92007-07-11 11:10:19 +1000292 error = xfs_inumbers(mp, &inlast, &count,
293 bulkreq.ubuffer, xfs_inumbers_fmt_compat);
sandeen@sandeen.netaf819d22008-11-25 21:20:13 -0600294 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
295 int res;
296
297 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
298 sizeof(compat_xfs_bstat_t),
299 NULL, 0, NULL, NULL, &res);
300 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
Michal Marekfaa63e92007-07-11 11:10:19 +1000301 error = xfs_bulkstat(mp, &inlast, &count,
sandeen@sandeen.net2ee4fa52008-11-25 21:20:11 -0600302 xfs_bulkstat_one_compat, NULL,
Michal Marekfaa63e92007-07-11 11:10:19 +1000303 sizeof(compat_xfs_bstat_t), bulkreq.ubuffer,
304 BULKSTAT_FG_QUICK, &done);
sandeen@sandeen.netaf819d22008-11-25 21:20:13 -0600305 } else
306 error = XFS_ERROR(EINVAL);
Michal Marekfaa63e92007-07-11 11:10:19 +1000307 if (error)
308 return -error;
309
310 if (bulkreq.ocount != NULL) {
311 if (copy_to_user(bulkreq.lastip, &inlast,
312 sizeof(xfs_ino_t)))
313 return -XFS_ERROR(EFAULT);
314
315 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
316 return -XFS_ERROR(EFAULT);
317 }
318
319 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
Michal Marekfaa63e92007-07-11 11:10:19 +1000321
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600322STATIC int
323xfs_compat_handlereq_copyin(
324 xfs_fsop_handlereq_t *hreq,
325 compat_xfs_fsop_handlereq_t __user *arg32)
Michal Marek1fa503d2007-07-11 11:10:09 +1000326{
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600327 compat_xfs_fsop_handlereq_t hreq32;
Michal Marek1fa503d2007-07-11 11:10:09 +1000328
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600329 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t)))
330 return -XFS_ERROR(EFAULT);
Michal Marek1fa503d2007-07-11 11:10:09 +1000331
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600332 hreq->fd = hreq32.fd;
333 hreq->path = compat_ptr(hreq32.path);
334 hreq->oflags = hreq32.oflags;
335 hreq->ihandle = compat_ptr(hreq32.ihandle);
336 hreq->ihandlen = hreq32.ihandlen;
337 hreq->ohandle = compat_ptr(hreq32.ohandle);
338 hreq->ohandlen = compat_ptr(hreq32.ohandlen);
339
340 return 0;
Michal Marek1fa503d2007-07-11 11:10:09 +1000341}
342
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600343/*
344 * Convert userspace handle data into inode.
345 *
346 * We use the fact that all the fsop_handlereq ioctl calls have a data
347 * structure argument whose first component is always a xfs_fsop_handlereq_t,
348 * so we can pass that sub structure into this handy, shared routine.
349 *
350 * If no error, caller must always iput the returned inode.
351 */
352STATIC int
353xfs_vget_fsop_handlereq_compat(
354 xfs_mount_t *mp,
355 struct inode *parinode, /* parent inode pointer */
356 compat_xfs_fsop_handlereq_t *hreq,
357 struct inode **inode)
358{
359 void __user *hanp;
360 size_t hlen;
361 xfs_fid_t *xfid;
362 xfs_handle_t *handlep;
363 xfs_handle_t handle;
364 xfs_inode_t *ip;
365 xfs_ino_t ino;
366 __u32 igen;
367 int error;
368
369 /*
370 * Only allow handle opens under a directory.
371 */
372 if (!S_ISDIR(parinode->i_mode))
373 return XFS_ERROR(ENOTDIR);
374
375 hanp = compat_ptr(hreq->ihandle);
376 hlen = hreq->ihandlen;
377 handlep = &handle;
378
379 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
380 return XFS_ERROR(EINVAL);
381 if (copy_from_user(handlep, hanp, hlen))
382 return XFS_ERROR(EFAULT);
383 if (hlen < sizeof(*handlep))
384 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
385 if (hlen > sizeof(handlep->ha_fsid)) {
386 if (handlep->ha_fid.fid_len !=
387 (hlen - sizeof(handlep->ha_fsid) -
388 sizeof(handlep->ha_fid.fid_len)) ||
389 handlep->ha_fid.fid_pad)
390 return XFS_ERROR(EINVAL);
391 }
392
393 /*
394 * Crack the handle, obtain the inode # & generation #
395 */
396 xfid = (struct xfs_fid *)&handlep->ha_fid;
397 if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) {
398 ino = xfid->fid_ino;
399 igen = xfid->fid_gen;
400 } else {
401 return XFS_ERROR(EINVAL);
402 }
403
404 /*
405 * Get the XFS inode, building a Linux inode to go with it.
406 */
407 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
408 if (error)
409 return error;
410 if (ip == NULL)
411 return XFS_ERROR(EIO);
412 if (ip->i_d.di_gen != igen) {
413 xfs_iput_new(ip, XFS_ILOCK_SHARED);
414 return XFS_ERROR(ENOENT);
415 }
416
417 xfs_iunlock(ip, XFS_ILOCK_SHARED);
418
419 *inode = VFS_I(ip);
420 return 0;
421}
422
423STATIC int
424xfs_compat_attrlist_by_handle(
425 xfs_mount_t *mp,
426 void __user *arg,
427 struct inode *parinode)
428{
429 int error;
430 attrlist_cursor_kern_t *cursor;
431 compat_xfs_fsop_attrlist_handlereq_t al_hreq;
432 struct inode *inode;
433 char *kbuf;
434
435 if (!capable(CAP_SYS_ADMIN))
436 return -XFS_ERROR(EPERM);
437 if (copy_from_user(&al_hreq, arg,
438 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
439 return -XFS_ERROR(EFAULT);
440 if (al_hreq.buflen > XATTR_LIST_MAX)
441 return -XFS_ERROR(EINVAL);
442
443 /*
444 * Reject flags, only allow namespaces.
445 */
446 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
447 return -XFS_ERROR(EINVAL);
448
449 error = xfs_vget_fsop_handlereq_compat(mp, parinode, &al_hreq.hreq,
450 &inode);
451 if (error)
452 goto out;
453
454 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
455 if (!kbuf)
456 goto out_vn_rele;
457
458 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
459 error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen,
460 al_hreq.flags, cursor);
461 if (error)
462 goto out_kfree;
463
464 if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
465 error = -EFAULT;
466
467 out_kfree:
468 kfree(kbuf);
469 out_vn_rele:
470 iput(inode);
471 out:
472 return -error;
473}
474
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600475STATIC int
476xfs_compat_attrmulti_by_handle(
477 xfs_mount_t *mp,
478 void __user *arg,
479 struct inode *parinode)
480{
481 int error;
482 compat_xfs_attr_multiop_t *ops;
483 compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
484 struct inode *inode;
485 unsigned int i, size;
486 char *attr_name;
487
488 if (!capable(CAP_SYS_ADMIN))
489 return -XFS_ERROR(EPERM);
490 if (copy_from_user(&am_hreq, arg,
491 sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
492 return -XFS_ERROR(EFAULT);
493
494 error = xfs_vget_fsop_handlereq_compat(mp, parinode, &am_hreq.hreq,
495 &inode);
496 if (error)
497 goto out;
498
499 error = E2BIG;
500 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
501 if (!size || size > 16 * PAGE_SIZE)
502 goto out_vn_rele;
503
504 error = ENOMEM;
505 ops = kmalloc(size, GFP_KERNEL);
506 if (!ops)
507 goto out_vn_rele;
508
509 error = EFAULT;
510 if (copy_from_user(ops, compat_ptr(am_hreq.ops), size))
511 goto out_kfree_ops;
512
513 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
514 if (!attr_name)
515 goto out_kfree_ops;
516
517
518 error = 0;
519 for (i = 0; i < am_hreq.opcount; i++) {
520 ops[i].am_error = strncpy_from_user(attr_name,
521 compat_ptr(ops[i].am_attrname),
522 MAXNAMELEN);
523 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
524 error = -ERANGE;
525 if (ops[i].am_error < 0)
526 break;
527
528 switch (ops[i].am_opcode) {
529 case ATTR_OP_GET:
530 ops[i].am_error = xfs_attrmulti_attr_get(inode,
531 attr_name,
532 compat_ptr(ops[i].am_attrvalue),
533 &ops[i].am_length, ops[i].am_flags);
534 break;
535 case ATTR_OP_SET:
536 ops[i].am_error = xfs_attrmulti_attr_set(inode,
537 attr_name,
538 compat_ptr(ops[i].am_attrvalue),
539 ops[i].am_length, ops[i].am_flags);
540 break;
541 case ATTR_OP_REMOVE:
542 ops[i].am_error = xfs_attrmulti_attr_remove(inode,
543 attr_name, ops[i].am_flags);
544 break;
545 default:
546 ops[i].am_error = EINVAL;
547 }
548 }
549
550 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
551 error = XFS_ERROR(EFAULT);
552
553 kfree(attr_name);
554 out_kfree_ops:
555 kfree(ops);
556 out_vn_rele:
557 iput(inode);
558 out:
559 return -error;
560}
561
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600562STATIC int
563xfs_compat_fssetdm_by_handle(
564 xfs_mount_t *mp,
565 void __user *arg,
566 struct inode *parinode)
567{
568 int error;
569 struct fsdmidata fsd;
570 compat_xfs_fsop_setdm_handlereq_t dmhreq;
571 struct inode *inode;
572
573 if (!capable(CAP_MKNOD))
574 return -XFS_ERROR(EPERM);
575 if (copy_from_user(&dmhreq, arg,
576 sizeof(compat_xfs_fsop_setdm_handlereq_t)))
577 return -XFS_ERROR(EFAULT);
578
579 error = xfs_vget_fsop_handlereq_compat(mp, parinode, &dmhreq.hreq,
580 &inode);
581 if (error)
582 return -error;
583
584 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
585 error = -XFS_ERROR(EPERM);
586 goto out;
587 }
588
589 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
590 error = -XFS_ERROR(EFAULT);
591 goto out;
592 }
593
594 error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask,
595 fsd.fsd_dmstate);
596
597out:
598 iput(inode);
599 return error;
600}
601
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500602long
603xfs_file_compat_ioctl(
604 struct file *filp,
605 unsigned cmd,
606 unsigned long p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607{
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500608 struct inode *inode = filp->f_path.dentry->d_inode;
609 struct xfs_inode *ip = XFS_I(inode);
610 struct xfs_mount *mp = ip->i_mount;
611 void __user *arg = (void __user *)p;
612 int ioflags = 0;
613 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500615 if (filp->f_mode & FMODE_NOCMTIME)
616 ioflags |= IO_INVIS;
617
618 xfs_itrace_entry(ip);
619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 switch (cmd) {
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600621 /* No size or alignment issues on any arch */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 case XFS_IOC_DIOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 case XFS_IOC_FSGEOMETRY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 case XFS_IOC_FSGETXATTR:
625 case XFS_IOC_FSSETXATTR:
626 case XFS_IOC_FSGETXATTRA:
627 case XFS_IOC_FSSETDM:
628 case XFS_IOC_GETBMAP:
629 case XFS_IOC_GETBMAPA:
630 case XFS_IOC_GETBMAPX:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 case XFS_IOC_FSCOUNTS:
632 case XFS_IOC_SET_RESBLKS:
633 case XFS_IOC_GET_RESBLKS:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 case XFS_IOC_FSGROWFSLOG:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 case XFS_IOC_FREEZE:
636 case XFS_IOC_THAW:
637 case XFS_IOC_GOINGDOWN:
638 case XFS_IOC_ERROR_INJECTION:
639 case XFS_IOC_ERROR_CLEARALL:
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500640 return xfs_file_ioctl(filp, cmd, p);
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600641#ifndef BROKEN_X86_ALIGNMENT
642 /* These are handled fine if no alignment issues */
643 case XFS_IOC_ALLOCSP:
644 case XFS_IOC_FREESP:
645 case XFS_IOC_RESVSP:
646 case XFS_IOC_UNRESVSP:
647 case XFS_IOC_ALLOCSP64:
648 case XFS_IOC_FREESP64:
649 case XFS_IOC_RESVSP64:
650 case XFS_IOC_UNRESVSP64:
651 case XFS_IOC_FSGEOMETRY_V1:
652 case XFS_IOC_FSGROWFSDATA:
653 case XFS_IOC_FSGROWFSRT:
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500654 return xfs_file_ioctl(filp, cmd, p);
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600655#else
Eric Sandeen526c4202005-09-05 08:25:06 +1000656 case XFS_IOC_ALLOCSP_32:
657 case XFS_IOC_FREESP_32:
658 case XFS_IOC_ALLOCSP64_32:
659 case XFS_IOC_FREESP64_32:
660 case XFS_IOC_RESVSP_32:
661 case XFS_IOC_UNRESVSP_32:
662 case XFS_IOC_RESVSP64_32:
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600663 case XFS_IOC_UNRESVSP64_32: {
664 struct xfs_flock64 bf;
665
666 if (xfs_compat_flock64_copyin(&bf, arg))
667 return -XFS_ERROR(EFAULT);
Eric Sandeen526c4202005-09-05 08:25:06 +1000668 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600669 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf);
670 }
Michal Marek547e00c2007-07-11 11:09:57 +1000671 case XFS_IOC_FSGEOMETRY_V1_32:
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600672 return xfs_compat_ioc_fsgeometry_v1(mp, arg);
sandeen@sandeen.net471d5912008-11-25 21:20:10 -0600673 case XFS_IOC_FSGROWFSDATA_32: {
674 struct xfs_growfs_data in;
675
676 if (xfs_compat_growfs_data_copyin(&in, arg))
677 return -XFS_ERROR(EFAULT);
678 error = xfs_growfs_data(mp, &in);
679 return -error;
680 }
681 case XFS_IOC_FSGROWFSRT_32: {
682 struct xfs_growfs_rt in;
683
684 if (xfs_compat_growfs_rt_copyin(&in, arg))
685 return -XFS_ERROR(EFAULT);
686 error = xfs_growfs_rt(mp, &in);
687 return -error;
688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689#endif
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600690 /* long changes size, but xfs only copiese out 32 bits */
691 case XFS_IOC_GETXFLAGS_32:
692 case XFS_IOC_SETXFLAGS_32:
693 case XFS_IOC_GETVERSION_32:
694 cmd = _NATIVE_IOC(cmd, long);
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500695 return xfs_file_ioctl(filp, cmd, p);
sandeen@sandeen.nete5d412f2008-11-25 21:20:17 -0600696 case XFS_IOC_SWAPEXT: {
697 struct xfs_swapext sxp;
698 struct compat_xfs_swapext __user *sxu = arg;
699
700 /* Bulk copy in up to the sx_stat field, then copy bstat */
701 if (copy_from_user(&sxp, sxu,
702 offsetof(struct xfs_swapext, sx_stat)) ||
703 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
704 return -XFS_ERROR(EFAULT);
705 error = xfs_swapext(&sxp);
706 return -error;
707 }
Michal Marekfaa63e92007-07-11 11:10:19 +1000708 case XFS_IOC_FSBULKSTAT_32:
709 case XFS_IOC_FSBULKSTAT_SINGLE_32:
710 case XFS_IOC_FSINUMBERS_32:
sandeen@sandeen.net2ee4fa52008-11-25 21:20:11 -0600711 return xfs_compat_ioc_bulkstat(mp, cmd, arg);
Michal Marek1fa503d2007-07-11 11:10:09 +1000712 case XFS_IOC_FD_TO_HANDLE_32:
713 case XFS_IOC_PATH_TO_HANDLE_32:
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600714 case XFS_IOC_PATH_TO_FSHANDLE_32: {
715 struct xfs_fsop_handlereq hreq;
716
717 if (xfs_compat_handlereq_copyin(&hreq, arg))
718 return -XFS_ERROR(EFAULT);
Michal Marek1fa503d2007-07-11 11:10:09 +1000719 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600720 return xfs_find_handle(cmd, &hreq);
721 }
722 case XFS_IOC_OPEN_BY_HANDLE_32: {
723 struct xfs_fsop_handlereq hreq;
724
725 if (xfs_compat_handlereq_copyin(&hreq, arg))
726 return -XFS_ERROR(EFAULT);
727 return xfs_open_by_handle(mp, &hreq, filp, inode);
728 }
729 case XFS_IOC_READLINK_BY_HANDLE_32: {
730 struct xfs_fsop_handlereq hreq;
731
732 if (xfs_compat_handlereq_copyin(&hreq, arg))
733 return -XFS_ERROR(EFAULT);
734 return xfs_readlink_by_handle(mp, &hreq, inode);
735 }
sandeen@sandeen.netebeecd22008-11-25 21:20:14 -0600736 case XFS_IOC_ATTRLIST_BY_HANDLE_32:
737 return xfs_compat_attrlist_by_handle(mp, arg, inode);
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600738 case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
739 return xfs_compat_attrmulti_by_handle(mp, arg, inode);
sandeen@sandeen.net710d62a2008-11-25 21:20:16 -0600740 case XFS_IOC_FSSETDM_BY_HANDLE_32:
741 return xfs_compat_fssetdm_by_handle(mp, arg, inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 default:
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600743 return -XFS_ERROR(ENOIOCTLCMD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}