blob: 7cf0f6328d99d2e740a29dcb34626adf28140a8f [file] [log] [blame]
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/file.h>
23#include <linux/fs.h>
Christoph Hellwigcb8e7092008-10-09 13:39:39 -040024#include <linux/fsnotify.h>
Christoph Hellwigf46b5a62008-06-11 21:53:53 -040025#include <linux/pagemap.h>
26#include <linux/highmem.h>
27#include <linux/time.h>
28#include <linux/init.h>
29#include <linux/string.h>
Christoph Hellwigf46b5a62008-06-11 21:53:53 -040030#include <linux/backing-dev.h>
Christoph Hellwigcb8e7092008-10-09 13:39:39 -040031#include <linux/mount.h>
Christoph Hellwigf46b5a62008-06-11 21:53:53 -040032#include <linux/mpage.h>
Christoph Hellwigcb8e7092008-10-09 13:39:39 -040033#include <linux/namei.h>
Christoph Hellwigf46b5a62008-06-11 21:53:53 -040034#include <linux/swap.h>
35#include <linux/writeback.h>
36#include <linux/statfs.h>
37#include <linux/compat.h>
38#include <linux/bit_spinlock.h>
Christoph Hellwigcb8e7092008-10-09 13:39:39 -040039#include <linux/security.h>
Christoph Hellwigf46b5a62008-06-11 21:53:53 -040040#include <linux/xattr.h>
Yan Zheng7ea394f2008-08-05 13:05:02 -040041#include <linux/vmalloc.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/slab.h>
Li Dongyangf7039b12011-03-24 10:24:28 +000043#include <linux/blkdev.h>
Alexander Block8ea05e32012-07-25 17:35:53 +020044#include <linux/uuid.h>
Filipe Brandenburger55e301f2013-01-29 06:04:50 +000045#include <linux/btrfs.h>
Mark Fasheh416161d2013-08-06 11:42:51 -070046#include <linux/uaccess.h>
Christoph Hellwigf46b5a62008-06-11 21:53:53 -040047#include "ctree.h"
48#include "disk-io.h"
49#include "transaction.h"
50#include "btrfs_inode.h"
Christoph Hellwigf46b5a62008-06-11 21:53:53 -040051#include "print-tree.h"
52#include "volumes.h"
Chris Mason925baed2008-06-25 16:01:30 -040053#include "locking.h"
Li Zefan581bb052011-04-20 10:06:11 +080054#include "inode-map.h"
Jan Schmidtd7728c92011-07-07 16:48:38 +020055#include "backref.h"
Josef Bacik606686e2012-06-04 14:03:51 -040056#include "rcu-string.h"
Alexander Block31db9f72012-07-25 23:19:24 +020057#include "send.h"
Stefan Behrens3f6bcfb2012-11-06 15:08:53 +010058#include "dev-replace.h"
Filipe David Borba Manana63541922014-01-07 11:47:46 +000059#include "props.h"
Jeff Mahoney3b02a682013-11-01 13:07:02 -040060#include "sysfs.h"
Josef Bacikfcebe452014-05-13 17:30:47 -070061#include "qgroup.h"
Filipe Manana1ec9a1a2016-02-10 10:42:25 +000062#include "tree-log.h"
Anand Jainebb87652016-03-10 17:26:59 +080063#include "compression.h"
Christoph Hellwigf46b5a62008-06-11 21:53:53 -040064
Hugo Millsabccd002014-01-30 20:17:00 +000065#ifdef CONFIG_64BIT
66/* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
67 * structures are incorrect, as the timespec structure from userspace
68 * is 4 bytes too small. We define these alternatives here to teach
69 * the kernel about the 32-bit struct packing.
70 */
71struct btrfs_ioctl_timespec_32 {
72 __u64 sec;
73 __u32 nsec;
74} __attribute__ ((__packed__));
75
76struct btrfs_ioctl_received_subvol_args_32 {
77 char uuid[BTRFS_UUID_SIZE]; /* in */
78 __u64 stransid; /* in */
79 __u64 rtransid; /* out */
80 struct btrfs_ioctl_timespec_32 stime; /* in */
81 struct btrfs_ioctl_timespec_32 rtime; /* out */
82 __u64 flags; /* in */
83 __u64 reserved[16]; /* in */
84} __attribute__ ((__packed__));
85
86#define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
87 struct btrfs_ioctl_received_subvol_args_32)
88#endif
89
90
Mark Fasheh416161d2013-08-06 11:42:51 -070091static int btrfs_clone(struct inode *src, struct inode *inode,
Mark Fasheh1c919a52015-06-30 14:42:08 -070092 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
93 int no_time_update);
Mark Fasheh416161d2013-08-06 11:42:51 -070094
Christoph Hellwig6cbff002009-04-17 10:37:41 +020095/* Mask out flags that are inappropriate for the given type of inode. */
96static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
97{
98 if (S_ISDIR(mode))
99 return flags;
100 else if (S_ISREG(mode))
101 return flags & ~FS_DIRSYNC_FL;
102 else
103 return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
104}
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400105
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200106/*
107 * Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl.
108 */
109static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
110{
111 unsigned int iflags = 0;
112
113 if (flags & BTRFS_INODE_SYNC)
114 iflags |= FS_SYNC_FL;
115 if (flags & BTRFS_INODE_IMMUTABLE)
116 iflags |= FS_IMMUTABLE_FL;
117 if (flags & BTRFS_INODE_APPEND)
118 iflags |= FS_APPEND_FL;
119 if (flags & BTRFS_INODE_NODUMP)
120 iflags |= FS_NODUMP_FL;
121 if (flags & BTRFS_INODE_NOATIME)
122 iflags |= FS_NOATIME_FL;
123 if (flags & BTRFS_INODE_DIRSYNC)
124 iflags |= FS_DIRSYNC_FL;
Li Zefand0092bd2011-04-15 03:03:06 +0000125 if (flags & BTRFS_INODE_NODATACOW)
126 iflags |= FS_NOCOW_FL;
127
128 if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS))
129 iflags |= FS_COMPR_FL;
130 else if (flags & BTRFS_INODE_NOCOMPRESS)
131 iflags |= FS_NOCOMP_FL;
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200132
133 return iflags;
134}
135
136/*
137 * Update inode->i_flags based on the btrfs internal flags.
138 */
139void btrfs_update_iflags(struct inode *inode)
140{
141 struct btrfs_inode *ip = BTRFS_I(inode);
Filipe Manana3cc79392014-06-25 22:36:02 +0100142 unsigned int new_fl = 0;
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200143
144 if (ip->flags & BTRFS_INODE_SYNC)
Filipe Manana3cc79392014-06-25 22:36:02 +0100145 new_fl |= S_SYNC;
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200146 if (ip->flags & BTRFS_INODE_IMMUTABLE)
Filipe Manana3cc79392014-06-25 22:36:02 +0100147 new_fl |= S_IMMUTABLE;
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200148 if (ip->flags & BTRFS_INODE_APPEND)
Filipe Manana3cc79392014-06-25 22:36:02 +0100149 new_fl |= S_APPEND;
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200150 if (ip->flags & BTRFS_INODE_NOATIME)
Filipe Manana3cc79392014-06-25 22:36:02 +0100151 new_fl |= S_NOATIME;
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200152 if (ip->flags & BTRFS_INODE_DIRSYNC)
Filipe Manana3cc79392014-06-25 22:36:02 +0100153 new_fl |= S_DIRSYNC;
154
155 set_mask_bits(&inode->i_flags,
156 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
157 new_fl);
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200158}
159
160/*
161 * Inherit flags from the parent inode.
162 *
Josef Bacike27425d2011-09-27 11:01:30 -0400163 * Currently only the compression flags and the cow flags are inherited.
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200164 */
165void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
166{
Chris Mason0b4dcea2009-06-11 11:13:35 -0400167 unsigned int flags;
168
169 if (!dir)
170 return;
171
172 flags = BTRFS_I(dir)->flags;
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200173
Josef Bacike27425d2011-09-27 11:01:30 -0400174 if (flags & BTRFS_INODE_NOCOMPRESS) {
175 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
176 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
177 } else if (flags & BTRFS_INODE_COMPRESS) {
178 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
179 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
180 }
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200181
Liu Bo213490b2012-09-11 08:33:50 -0600182 if (flags & BTRFS_INODE_NODATACOW) {
Josef Bacike27425d2011-09-27 11:01:30 -0400183 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
Liu Bo213490b2012-09-11 08:33:50 -0600184 if (S_ISREG(inode->i_mode))
185 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
186 }
Josef Bacike27425d2011-09-27 11:01:30 -0400187
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200188 btrfs_update_iflags(inode);
189}
190
191static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
192{
Al Viro496ad9a2013-01-23 17:07:38 -0500193 struct btrfs_inode *ip = BTRFS_I(file_inode(file));
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200194 unsigned int flags = btrfs_flags_to_ioctl(ip->flags);
195
196 if (copy_to_user(arg, &flags, sizeof(flags)))
197 return -EFAULT;
198 return 0;
199}
200
Liu Bo75e7cb72011-03-22 10:12:20 +0000201static int check_flags(unsigned int flags)
202{
203 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
204 FS_NOATIME_FL | FS_NODUMP_FL | \
205 FS_SYNC_FL | FS_DIRSYNC_FL | \
Li Zefane1e8fb62011-04-15 03:02:49 +0000206 FS_NOCOMP_FL | FS_COMPR_FL |
207 FS_NOCOW_FL))
Liu Bo75e7cb72011-03-22 10:12:20 +0000208 return -EOPNOTSUPP;
209
210 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
211 return -EINVAL;
212
Liu Bo75e7cb72011-03-22 10:12:20 +0000213 return 0;
214}
215
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200216static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
217{
Al Viro496ad9a2013-01-23 17:07:38 -0500218 struct inode *inode = file_inode(file);
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200219 struct btrfs_inode *ip = BTRFS_I(inode);
220 struct btrfs_root *root = ip->root;
221 struct btrfs_trans_handle *trans;
222 unsigned int flags, oldflags;
223 int ret;
Li Zefanf062abf2011-12-29 13:36:45 +0800224 u64 ip_oldflags;
225 unsigned int i_oldflags;
David Sterba7e97b8d2012-09-07 05:56:55 -0600226 umode_t mode;
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200227
David Sterbabd60ea02014-01-16 15:50:22 +0100228 if (!inode_owner_or_capable(inode))
229 return -EPERM;
230
Li Zefanb83cc962010-12-20 16:04:08 +0800231 if (btrfs_root_readonly(root))
232 return -EROFS;
233
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200234 if (copy_from_user(&flags, arg, sizeof(flags)))
235 return -EFAULT;
236
Liu Bo75e7cb72011-03-22 10:12:20 +0000237 ret = check_flags(flags);
238 if (ret)
239 return ret;
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200240
Jan Karae7848682012-06-12 16:20:32 +0200241 ret = mnt_want_write_file(file);
242 if (ret)
243 return ret;
244
Al Viro59551022016-01-22 15:40:57 -0500245 inode_lock(inode);
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200246
Li Zefanf062abf2011-12-29 13:36:45 +0800247 ip_oldflags = ip->flags;
248 i_oldflags = inode->i_flags;
David Sterba7e97b8d2012-09-07 05:56:55 -0600249 mode = inode->i_mode;
Li Zefanf062abf2011-12-29 13:36:45 +0800250
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200251 flags = btrfs_mask_flags(inode->i_mode, flags);
252 oldflags = btrfs_flags_to_ioctl(ip->flags);
253 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
254 if (!capable(CAP_LINUX_IMMUTABLE)) {
255 ret = -EPERM;
256 goto out_unlock;
257 }
258 }
259
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200260 if (flags & FS_SYNC_FL)
261 ip->flags |= BTRFS_INODE_SYNC;
262 else
263 ip->flags &= ~BTRFS_INODE_SYNC;
264 if (flags & FS_IMMUTABLE_FL)
265 ip->flags |= BTRFS_INODE_IMMUTABLE;
266 else
267 ip->flags &= ~BTRFS_INODE_IMMUTABLE;
268 if (flags & FS_APPEND_FL)
269 ip->flags |= BTRFS_INODE_APPEND;
270 else
271 ip->flags &= ~BTRFS_INODE_APPEND;
272 if (flags & FS_NODUMP_FL)
273 ip->flags |= BTRFS_INODE_NODUMP;
274 else
275 ip->flags &= ~BTRFS_INODE_NODUMP;
276 if (flags & FS_NOATIME_FL)
277 ip->flags |= BTRFS_INODE_NOATIME;
278 else
279 ip->flags &= ~BTRFS_INODE_NOATIME;
280 if (flags & FS_DIRSYNC_FL)
281 ip->flags |= BTRFS_INODE_DIRSYNC;
282 else
283 ip->flags &= ~BTRFS_INODE_DIRSYNC;
David Sterba7e97b8d2012-09-07 05:56:55 -0600284 if (flags & FS_NOCOW_FL) {
285 if (S_ISREG(mode)) {
286 /*
287 * It's safe to turn csums off here, no extents exist.
288 * Otherwise we want the flag to reflect the real COW
289 * status of the file and will not set it.
290 */
291 if (inode->i_size == 0)
292 ip->flags |= BTRFS_INODE_NODATACOW
293 | BTRFS_INODE_NODATASUM;
294 } else {
295 ip->flags |= BTRFS_INODE_NODATACOW;
296 }
297 } else {
298 /*
299 * Revert back under same assuptions as above
300 */
301 if (S_ISREG(mode)) {
302 if (inode->i_size == 0)
303 ip->flags &= ~(BTRFS_INODE_NODATACOW
304 | BTRFS_INODE_NODATASUM);
305 } else {
306 ip->flags &= ~BTRFS_INODE_NODATACOW;
307 }
308 }
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200309
Liu Bo75e7cb72011-03-22 10:12:20 +0000310 /*
311 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
312 * flag may be changed automatically if compression code won't make
313 * things smaller.
314 */
315 if (flags & FS_NOCOMP_FL) {
316 ip->flags &= ~BTRFS_INODE_COMPRESS;
317 ip->flags |= BTRFS_INODE_NOCOMPRESS;
Filipe David Borba Manana63541922014-01-07 11:47:46 +0000318
319 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
320 if (ret && ret != -ENODATA)
321 goto out_drop;
Liu Bo75e7cb72011-03-22 10:12:20 +0000322 } else if (flags & FS_COMPR_FL) {
Filipe David Borba Manana63541922014-01-07 11:47:46 +0000323 const char *comp;
324
Liu Bo75e7cb72011-03-22 10:12:20 +0000325 ip->flags |= BTRFS_INODE_COMPRESS;
326 ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
Filipe David Borba Manana63541922014-01-07 11:47:46 +0000327
328 if (root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
329 comp = "lzo";
330 else
331 comp = "zlib";
332 ret = btrfs_set_prop(inode, "btrfs.compression",
333 comp, strlen(comp), 0);
334 if (ret)
335 goto out_drop;
336
Li Zefanebcb9042011-04-15 03:03:17 +0000337 } else {
Filipe Manana78a017a2014-09-11 11:44:49 +0100338 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
339 if (ret && ret != -ENODATA)
340 goto out_drop;
Li Zefanebcb9042011-04-15 03:03:17 +0000341 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
Liu Bo75e7cb72011-03-22 10:12:20 +0000342 }
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200343
Li Zefan4da6f1a2011-12-29 13:39:50 +0800344 trans = btrfs_start_transaction(root, 1);
Li Zefanf062abf2011-12-29 13:36:45 +0800345 if (IS_ERR(trans)) {
346 ret = PTR_ERR(trans);
347 goto out_drop;
348 }
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200349
Li Zefan306424cc2011-12-14 20:12:02 -0500350 btrfs_update_iflags(inode);
Josef Bacik0c4d2d92012-04-05 15:03:02 -0400351 inode_inc_iversion(inode);
Deepa Dinamani04b285f2016-02-06 23:57:21 -0800352 inode->i_ctime = current_fs_time(inode->i_sb);
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200353 ret = btrfs_update_inode(trans, root, inode);
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200354
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200355 btrfs_end_transaction(trans, root);
Li Zefanf062abf2011-12-29 13:36:45 +0800356 out_drop:
357 if (ret) {
358 ip->flags = ip_oldflags;
359 inode->i_flags = i_oldflags;
360 }
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200361
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200362 out_unlock:
Al Viro59551022016-01-22 15:40:57 -0500363 inode_unlock(inode);
Jan Karae7848682012-06-12 16:20:32 +0200364 mnt_drop_write_file(file);
liubo2d4e6f62011-02-24 09:38:16 +0000365 return ret;
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200366}
367
368static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
369{
Al Viro496ad9a2013-01-23 17:07:38 -0500370 struct inode *inode = file_inode(file);
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200371
372 return put_user(inode->i_generation, arg);
373}
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400374
Li Dongyangf7039b12011-03-24 10:24:28 +0000375static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
376{
Al Viro54563d42013-09-01 15:57:51 -0400377 struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
Li Dongyangf7039b12011-03-24 10:24:28 +0000378 struct btrfs_device *device;
379 struct request_queue *q;
380 struct fstrim_range range;
381 u64 minlen = ULLONG_MAX;
382 u64 num_devices = 0;
Al Viro815745c2011-11-17 15:40:49 -0500383 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
Li Dongyangf7039b12011-03-24 10:24:28 +0000384 int ret;
385
386 if (!capable(CAP_SYS_ADMIN))
387 return -EPERM;
388
Xiao Guangrong1f781602011-04-20 10:09:16 +0000389 rcu_read_lock();
390 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
391 dev_list) {
Li Dongyangf7039b12011-03-24 10:24:28 +0000392 if (!device->bdev)
393 continue;
394 q = bdev_get_queue(device->bdev);
395 if (blk_queue_discard(q)) {
396 num_devices++;
397 minlen = min((u64)q->limits.discard_granularity,
398 minlen);
399 }
400 }
Xiao Guangrong1f781602011-04-20 10:09:16 +0000401 rcu_read_unlock();
Lukas Czernerf4c697e2011-09-05 16:34:54 +0200402
Li Dongyangf7039b12011-03-24 10:24:28 +0000403 if (!num_devices)
404 return -EOPNOTSUPP;
Li Dongyangf7039b12011-03-24 10:24:28 +0000405 if (copy_from_user(&range, arg, sizeof(range)))
406 return -EFAULT;
Lukas Czernere515c182012-10-16 09:34:36 +0000407 if (range.start > total_bytes ||
408 range.len < fs_info->sb->s_blocksize)
Lukas Czernerf4c697e2011-09-05 16:34:54 +0200409 return -EINVAL;
Li Dongyangf7039b12011-03-24 10:24:28 +0000410
Lukas Czernerf4c697e2011-09-05 16:34:54 +0200411 range.len = min(range.len, total_bytes - range.start);
Li Dongyangf7039b12011-03-24 10:24:28 +0000412 range.minlen = max(range.minlen, minlen);
Al Viro815745c2011-11-17 15:40:49 -0500413 ret = btrfs_trim_fs(fs_info->tree_root, &range);
Li Dongyangf7039b12011-03-24 10:24:28 +0000414 if (ret < 0)
415 return ret;
416
417 if (copy_to_user(arg, &range, sizeof(range)))
418 return -EFAULT;
419
420 return 0;
421}
422
Stefan Behrensdd5f9612013-08-15 17:11:20 +0200423int btrfs_is_empty_uuid(u8 *uuid)
424{
Chris Mason46e0f662013-11-15 12:14:55 +0100425 int i;
426
427 for (i = 0; i < BTRFS_UUID_SIZE; i++) {
428 if (uuid[i])
429 return 0;
430 }
431 return 1;
Stefan Behrensdd5f9612013-08-15 17:11:20 +0200432}
433
Miao Xied5c12072013-02-28 10:04:33 +0000434static noinline int create_subvol(struct inode *dir,
Christoph Hellwigcb8e7092008-10-09 13:39:39 -0400435 struct dentry *dentry,
Sage Weil72fd0322010-10-29 15:41:32 -0400436 char *name, int namelen,
Arne Jansen6f72c7e2011-09-14 15:58:21 +0200437 u64 *async_transid,
Miao Xie8696c532013-02-07 06:02:44 +0000438 struct btrfs_qgroup_inherit *inherit)
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400439{
440 struct btrfs_trans_handle *trans;
441 struct btrfs_key key;
442 struct btrfs_root_item root_item;
443 struct btrfs_inode_item *inode_item;
444 struct extent_buffer *leaf;
Miao Xied5c12072013-02-28 10:04:33 +0000445 struct btrfs_root *root = BTRFS_I(dir)->root;
Yan, Zheng76dda932009-09-21 16:00:26 -0400446 struct btrfs_root *new_root;
Miao Xied5c12072013-02-28 10:04:33 +0000447 struct btrfs_block_rsv block_rsv;
Deepa Dinamani04b285f2016-02-06 23:57:21 -0800448 struct timespec cur_time = current_fs_time(dir->i_sb);
Tsutomu Itoh5662344b32013-12-13 09:51:42 +0900449 struct inode *inode;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400450 int ret;
451 int err;
452 u64 objectid;
453 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
Chris Mason3de45862008-11-17 21:02:50 -0500454 u64 index = 0;
Miao Xied5c12072013-02-28 10:04:33 +0000455 u64 qgroup_reserved;
Alexander Block8ea05e32012-07-25 17:35:53 +0200456 uuid_le new_uuid;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400457
Li Zefan581bb052011-04-20 10:06:11 +0800458 ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
Al Viro2fbe8c82011-07-16 21:38:06 -0400459 if (ret)
Yan, Zhenga22285a2010-05-16 10:48:46 -0400460 return ret;
Josef Bacik6a912212010-11-20 09:48:00 +0000461
Qu Wenruoe09fe2d2015-02-27 16:24:23 +0800462 /*
463 * Don't create subvolume whose level is not zero. Or qgroup will be
464 * screwed up since it assume subvolme qgroup's level to be 0.
465 */
466 if (btrfs_qgroup_level(objectid))
467 return -ENOSPC;
468
Miao Xied5c12072013-02-28 10:04:33 +0000469 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400470 /*
Miao Xied5c12072013-02-28 10:04:33 +0000471 * The same as the snapshot creation, please see the comment
472 * of create_snapshot().
Josef Bacik9ed74f22009-09-11 16:12:44 -0400473 */
Miao Xied5c12072013-02-28 10:04:33 +0000474 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
Stefan Behrensdd5f9612013-08-15 17:11:20 +0200475 8, &qgroup_reserved, false);
Miao Xied5c12072013-02-28 10:04:33 +0000476 if (ret)
477 return ret;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400478
Miao Xied5c12072013-02-28 10:04:33 +0000479 trans = btrfs_start_transaction(root, 0);
480 if (IS_ERR(trans)) {
481 ret = PTR_ERR(trans);
Liu Bode6e8202014-01-09 14:57:06 +0800482 btrfs_subvolume_release_metadata(root, &block_rsv,
483 qgroup_reserved);
484 return ret;
Miao Xied5c12072013-02-28 10:04:33 +0000485 }
486 trans->block_rsv = &block_rsv;
487 trans->bytes_reserved = block_rsv.size;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400488
Miao Xie8696c532013-02-07 06:02:44 +0000489 ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit);
Arne Jansen6f72c7e2011-09-14 15:58:21 +0200490 if (ret)
491 goto fail;
492
David Sterba4d75f8a2014-06-15 01:54:12 +0200493 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
Josef Bacik8e8a1e32008-07-24 12:17:14 -0400494 if (IS_ERR(leaf)) {
495 ret = PTR_ERR(leaf);
496 goto fail;
497 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400498
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400499 memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400500 btrfs_set_header_bytenr(leaf, leaf->start);
501 btrfs_set_header_generation(leaf, trans->transid);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400502 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400503 btrfs_set_header_owner(leaf, objectid);
504
Ross Kirk0a4e5582013-09-24 10:12:38 +0100505 write_extent_buffer(leaf, root->fs_info->fsid, btrfs_header_fsid(),
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400506 BTRFS_FSID_SIZE);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400507 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
Geert Uytterhoevenb308bc22013-08-20 13:20:15 +0200508 btrfs_header_chunk_tree_uuid(leaf),
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400509 BTRFS_UUID_SIZE);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400510 btrfs_mark_buffer_dirty(leaf);
511
Alexander Block8ea05e32012-07-25 17:35:53 +0200512 memset(&root_item, 0, sizeof(root_item));
513
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400514 inode_item = &root_item.inode;
Qu Wenruo3cae2102013-07-16 11:19:18 +0800515 btrfs_set_stack_inode_generation(inode_item, 1);
516 btrfs_set_stack_inode_size(inode_item, 3);
517 btrfs_set_stack_inode_nlink(inode_item, 1);
David Sterba707e8a02014-06-04 19:22:26 +0200518 btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
Qu Wenruo3cae2102013-07-16 11:19:18 +0800519 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400520
Qu Wenruo3cae2102013-07-16 11:19:18 +0800521 btrfs_set_root_flags(&root_item, 0);
522 btrfs_set_root_limit(&root_item, 0);
523 btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
Li Zefan08fe4db2011-03-28 02:01:25 +0000524
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400525 btrfs_set_root_bytenr(&root_item, leaf->start);
Yan Zheng84234f32008-10-29 14:49:05 -0400526 btrfs_set_root_generation(&root_item, trans->transid);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400527 btrfs_set_root_level(&root_item, 0);
528 btrfs_set_root_refs(&root_item, 1);
Yan, Zheng86b9f2e2009-11-12 09:36:50 +0000529 btrfs_set_root_used(&root_item, leaf->len);
Yan Zheng80ff3852008-10-30 14:20:02 -0400530 btrfs_set_root_last_snapshot(&root_item, 0);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400531
Alexander Block8ea05e32012-07-25 17:35:53 +0200532 btrfs_set_root_generation_v2(&root_item,
533 btrfs_root_generation(&root_item));
534 uuid_le_gen(&new_uuid);
535 memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE);
Qu Wenruo3cae2102013-07-16 11:19:18 +0800536 btrfs_set_stack_timespec_sec(&root_item.otime, cur_time.tv_sec);
537 btrfs_set_stack_timespec_nsec(&root_item.otime, cur_time.tv_nsec);
Alexander Block8ea05e32012-07-25 17:35:53 +0200538 root_item.ctime = root_item.otime;
539 btrfs_set_root_ctransid(&root_item, trans->transid);
540 btrfs_set_root_otransid(&root_item, trans->transid);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400541
Chris Mason925baed2008-06-25 16:01:30 -0400542 btrfs_tree_unlock(leaf);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400543 free_extent_buffer(leaf);
544 leaf = NULL;
545
546 btrfs_set_root_dirid(&root_item, new_dirid);
547
548 key.objectid = objectid;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400549 key.offset = 0;
David Sterba962a2982014-06-04 18:41:45 +0200550 key.type = BTRFS_ROOT_ITEM_KEY;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400551 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
552 &root_item);
553 if (ret)
554 goto fail;
555
Yan, Zheng76dda932009-09-21 16:00:26 -0400556 key.offset = (u64)-1;
557 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100558 if (IS_ERR(new_root)) {
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100559 ret = PTR_ERR(new_root);
David Sterba6d13f542015-04-24 19:12:01 +0200560 btrfs_abort_transaction(trans, root, ret);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100561 goto fail;
562 }
Yan, Zheng76dda932009-09-21 16:00:26 -0400563
564 btrfs_record_root_in_trans(trans, new_root);
565
Filipe David Borba Manana63541922014-01-07 11:47:46 +0000566 ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
Mark Fashehce598972011-07-26 11:32:23 -0700567 if (ret) {
568 /* We potentially lose an unused inode item here */
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100569 btrfs_abort_transaction(trans, root, ret);
Mark Fashehce598972011-07-26 11:32:23 -0700570 goto fail;
571 }
572
Chandan Rajendraf32e48e2016-01-07 18:56:59 +0530573 mutex_lock(&new_root->objectid_mutex);
574 new_root->highest_objectid = new_dirid;
575 mutex_unlock(&new_root->objectid_mutex);
576
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400577 /*
578 * insert the directory item
579 */
Chris Mason3de45862008-11-17 21:02:50 -0500580 ret = btrfs_set_inode_index(dir, &index);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100581 if (ret) {
582 btrfs_abort_transaction(trans, root, ret);
583 goto fail;
584 }
Chris Mason3de45862008-11-17 21:02:50 -0500585
586 ret = btrfs_insert_dir_item(trans, root,
Miao Xie16cdcec2011-04-22 18:12:22 +0800587 name, namelen, dir, &key,
Chris Mason3de45862008-11-17 21:02:50 -0500588 BTRFS_FT_DIR, index);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100589 if (ret) {
590 btrfs_abort_transaction(trans, root, ret);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400591 goto fail;
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100592 }
Chris Mason0660b5a2008-11-17 20:37:39 -0500593
Yan Zheng52c26172009-01-05 15:43:43 -0500594 btrfs_i_size_write(dir, dir->i_size + namelen * 2);
595 ret = btrfs_update_inode(trans, root, dir);
596 BUG_ON(ret);
597
Chris Mason0660b5a2008-11-17 20:37:39 -0500598 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
Yan, Zheng4df27c4d2009-09-21 15:56:00 -0400599 objectid, root->root_key.objectid,
Li Zefan33345d012011-04-20 10:31:50 +0800600 btrfs_ino(dir), index, name, namelen);
Chris Mason0660b5a2008-11-17 20:37:39 -0500601 BUG_ON(ret);
602
Stefan Behrensdd5f9612013-08-15 17:11:20 +0200603 ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
604 root_item.uuid, BTRFS_UUID_KEY_SUBVOL,
605 objectid);
606 if (ret)
607 btrfs_abort_transaction(trans, root, ret);
608
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400609fail:
Miao Xied5c12072013-02-28 10:04:33 +0000610 trans->block_rsv = NULL;
611 trans->bytes_reserved = 0;
Liu Bode6e8202014-01-09 14:57:06 +0800612 btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
613
Sage Weil72fd0322010-10-29 15:41:32 -0400614 if (async_transid) {
615 *async_transid = trans->transid;
616 err = btrfs_commit_transaction_async(trans, root, 1);
Miao Xie00d71c92013-03-04 09:45:06 +0000617 if (err)
618 err = btrfs_commit_transaction(trans, root);
Sage Weil72fd0322010-10-29 15:41:32 -0400619 } else {
620 err = btrfs_commit_transaction(trans, root);
621 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400622 if (err && !ret)
623 ret = err;
Chris Mason1a65e242013-02-06 12:06:02 -0500624
Tsutomu Itoh5662344b32013-12-13 09:51:42 +0900625 if (!ret) {
626 inode = btrfs_lookup_dentry(dir, dentry);
Liu Bode6e8202014-01-09 14:57:06 +0800627 if (IS_ERR(inode))
628 return PTR_ERR(inode);
Tsutomu Itoh5662344b32013-12-13 09:51:42 +0900629 d_instantiate(dentry, inode);
630 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400631 return ret;
632}
633
Filipe Manana9ea24bb2014-10-29 11:57:59 +0000634static void btrfs_wait_for_no_snapshoting_writes(struct btrfs_root *root)
Miao Xie8257b2d2014-03-06 13:38:19 +0800635{
636 s64 writers;
637 DEFINE_WAIT(wait);
638
639 do {
640 prepare_to_wait(&root->subv_writers->wait, &wait,
641 TASK_UNINTERRUPTIBLE);
642
643 writers = percpu_counter_sum(&root->subv_writers->counter);
644 if (writers)
645 schedule();
646
647 finish_wait(&root->subv_writers->wait, &wait);
648 } while (writers);
649}
650
Miao Xiee9662f72013-02-28 10:01:15 +0000651static int create_snapshot(struct btrfs_root *root, struct inode *dir,
652 struct dentry *dentry, char *name, int namelen,
653 u64 *async_transid, bool readonly,
654 struct btrfs_qgroup_inherit *inherit)
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400655{
Yan, Zheng2e4bfab2009-11-12 09:37:02 +0000656 struct inode *inode;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400657 struct btrfs_pending_snapshot *pending_snapshot;
658 struct btrfs_trans_handle *trans;
Yan, Zheng2e4bfab2009-11-12 09:37:02 +0000659 int ret;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400660
Miao Xie27cdeb72014-04-02 19:51:05 +0800661 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400662 return -EINVAL;
663
David Sterbaa1ee7362015-11-10 18:53:56 +0100664 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
665 if (!pending_snapshot)
666 return -ENOMEM;
667
David Sterbab0c0ea62015-11-10 18:54:00 +0100668 pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
669 GFP_NOFS);
David Sterba8546b572015-11-10 18:54:03 +0100670 pending_snapshot->path = btrfs_alloc_path();
671 if (!pending_snapshot->root_item || !pending_snapshot->path) {
David Sterbab0c0ea62015-11-10 18:54:00 +0100672 ret = -ENOMEM;
673 goto free_pending;
674 }
675
Miao Xie8257b2d2014-03-06 13:38:19 +0800676 atomic_inc(&root->will_be_snapshoted);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100677 smp_mb__after_atomic();
Filipe Manana9ea24bb2014-10-29 11:57:59 +0000678 btrfs_wait_for_no_snapshoting_writes(root);
Miao Xie8257b2d2014-03-06 13:38:19 +0800679
Miao Xie6a038432013-05-15 07:48:24 +0000680 ret = btrfs_start_delalloc_inodes(root, 0);
681 if (ret)
David Sterbaa1ee7362015-11-10 18:53:56 +0100682 goto dec_and_free;
Miao Xie6a038432013-05-15 07:48:24 +0000683
Miao Xieb0244192013-11-04 23:13:25 +0800684 btrfs_wait_ordered_extents(root, -1);
Miao Xie6a038432013-05-15 07:48:24 +0000685
Miao Xie66d8f3d2012-09-06 04:02:28 -0600686 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
687 BTRFS_BLOCK_RSV_TEMP);
Miao Xied5c12072013-02-28 10:04:33 +0000688 /*
689 * 1 - parent dir inode
690 * 2 - dir entries
691 * 1 - root item
692 * 2 - root ref/backref
693 * 1 - root of snapshot
Stefan Behrensdd5f9612013-08-15 17:11:20 +0200694 * 1 - UUID item
Miao Xied5c12072013-02-28 10:04:33 +0000695 */
696 ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
Stefan Behrensdd5f9612013-08-15 17:11:20 +0200697 &pending_snapshot->block_rsv, 8,
Jeff Mahoneyee3441b2013-07-09 16:37:21 -0400698 &pending_snapshot->qgroup_reserved,
699 false);
Miao Xied5c12072013-02-28 10:04:33 +0000700 if (ret)
David Sterbaa1ee7362015-11-10 18:53:56 +0100701 goto dec_and_free;
Miao Xied5c12072013-02-28 10:04:33 +0000702
Yan, Zhenga22285a2010-05-16 10:48:46 -0400703 pending_snapshot->dentry = dentry;
704 pending_snapshot->root = root;
Li Zefanb83cc962010-12-20 16:04:08 +0800705 pending_snapshot->readonly = readonly;
Miao Xiee9662f72013-02-28 10:01:15 +0000706 pending_snapshot->dir = dir;
Miao Xie8696c532013-02-07 06:02:44 +0000707 pending_snapshot->inherit = inherit;
Yan, Zhenga22285a2010-05-16 10:48:46 -0400708
Miao Xied5c12072013-02-28 10:04:33 +0000709 trans = btrfs_start_transaction(root, 0);
Yan, Zhenga22285a2010-05-16 10:48:46 -0400710 if (IS_ERR(trans)) {
711 ret = PTR_ERR(trans);
712 goto fail;
713 }
714
Josef Bacik83515832011-06-14 15:16:14 -0400715 spin_lock(&root->fs_info->trans_lock);
Yan, Zhenga22285a2010-05-16 10:48:46 -0400716 list_add(&pending_snapshot->list,
717 &trans->transaction->pending_snapshots);
Josef Bacik83515832011-06-14 15:16:14 -0400718 spin_unlock(&root->fs_info->trans_lock);
Sage Weil72fd0322010-10-29 15:41:32 -0400719 if (async_transid) {
720 *async_transid = trans->transid;
721 ret = btrfs_commit_transaction_async(trans,
722 root->fs_info->extent_root, 1);
Miao Xie00d71c92013-03-04 09:45:06 +0000723 if (ret)
724 ret = btrfs_commit_transaction(trans, root);
Sage Weil72fd0322010-10-29 15:41:32 -0400725 } else {
726 ret = btrfs_commit_transaction(trans,
727 root->fs_info->extent_root);
728 }
Miao Xieaec80302013-03-04 09:44:29 +0000729 if (ret)
Josef Bacikc37b2b62012-10-22 15:51:44 -0400730 goto fail;
Yan, Zhenga22285a2010-05-16 10:48:46 -0400731
732 ret = pending_snapshot->error;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400733 if (ret)
Yan, Zheng2e4bfab2009-11-12 09:37:02 +0000734 goto fail;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400735
Chris Masond3797302014-10-15 13:50:56 -0700736 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
737 if (ret)
738 goto fail;
739
David Howells2b0143b2015-03-17 22:25:59 +0000740 inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
Yan, Zheng2e4bfab2009-11-12 09:37:02 +0000741 if (IS_ERR(inode)) {
742 ret = PTR_ERR(inode);
743 goto fail;
744 }
Tsutomu Itoh5662344b32013-12-13 09:51:42 +0900745
Yan, Zheng2e4bfab2009-11-12 09:37:02 +0000746 d_instantiate(dentry, inode);
747 ret = 0;
748fail:
Miao Xied5c12072013-02-28 10:04:33 +0000749 btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
750 &pending_snapshot->block_rsv,
751 pending_snapshot->qgroup_reserved);
David Sterbaa1ee7362015-11-10 18:53:56 +0100752dec_and_free:
Filipe Manana9ea24bb2014-10-29 11:57:59 +0000753 if (atomic_dec_and_test(&root->will_be_snapshoted))
754 wake_up_atomic_t(&root->will_be_snapshoted);
David Sterbab0c0ea62015-11-10 18:54:00 +0100755free_pending:
756 kfree(pending_snapshot->root_item);
David Sterba8546b572015-11-10 18:54:03 +0100757 btrfs_free_path(pending_snapshot->path);
David Sterbaa1ee7362015-11-10 18:53:56 +0100758 kfree(pending_snapshot);
759
Christoph Hellwigf46b5a62008-06-11 21:53:53 -0400760 return ret;
761}
762
Sage Weil4260f7c2010-10-29 15:46:43 -0400763/* copy of may_delete in fs/namei.c()
764 * Check whether we can remove a link victim from directory dir, check
765 * whether the type of victim is right.
766 * 1. We can't do it if dir is read-only (done in permission())
767 * 2. We should have write and exec permissions on dir
768 * 3. We can't remove anything from append-only dir
769 * 4. We can't do anything with immutable dir (done in permission())
770 * 5. If the sticky bit on dir is set we should either
771 * a. be owner of dir, or
772 * b. be owner of victim, or
773 * c. have CAP_FOWNER capability
774 * 6. If the victim is append-only or immutable we can't do antyhing with
775 * links pointing to it.
776 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
777 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
778 * 9. We can't remove a root or mountpoint.
779 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
780 * nfs_async_unlink().
781 */
782
Dulshani Gunawardhana67871252013-10-31 10:33:04 +0530783static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
Sage Weil4260f7c2010-10-29 15:46:43 -0400784{
785 int error;
786
David Howells2b0143b2015-03-17 22:25:59 +0000787 if (d_really_is_negative(victim))
Sage Weil4260f7c2010-10-29 15:46:43 -0400788 return -ENOENT;
789
David Howells2b0143b2015-03-17 22:25:59 +0000790 BUG_ON(d_inode(victim->d_parent) != dir);
Jeff Layton4fa6b5e2012-10-10 15:25:25 -0400791 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
Sage Weil4260f7c2010-10-29 15:46:43 -0400792
793 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
794 if (error)
795 return error;
796 if (IS_APPEND(dir))
797 return -EPERM;
David Howells2b0143b2015-03-17 22:25:59 +0000798 if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
799 IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
Sage Weil4260f7c2010-10-29 15:46:43 -0400800 return -EPERM;
801 if (isdir) {
David Howellse36cb0b2015-01-29 12:02:35 +0000802 if (!d_is_dir(victim))
Sage Weil4260f7c2010-10-29 15:46:43 -0400803 return -ENOTDIR;
804 if (IS_ROOT(victim))
805 return -EBUSY;
David Howellse36cb0b2015-01-29 12:02:35 +0000806 } else if (d_is_dir(victim))
Sage Weil4260f7c2010-10-29 15:46:43 -0400807 return -EISDIR;
808 if (IS_DEADDIR(dir))
809 return -ENOENT;
810 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
811 return -EBUSY;
812 return 0;
813}
814
Christoph Hellwigcb8e7092008-10-09 13:39:39 -0400815/* copy of may_create in fs/namei.c() */
816static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
817{
David Howells2b0143b2015-03-17 22:25:59 +0000818 if (d_really_is_positive(child))
Christoph Hellwigcb8e7092008-10-09 13:39:39 -0400819 return -EEXIST;
820 if (IS_DEADDIR(dir))
821 return -ENOENT;
822 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
823}
824
825/*
826 * Create a new subvolume below @parent. This is largely modeled after
827 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
828 * inside this filesystem so it's quite a bit simpler.
829 */
Yan, Zheng76dda932009-09-21 16:00:26 -0400830static noinline int btrfs_mksubvol(struct path *parent,
831 char *name, int namelen,
Sage Weil72fd0322010-10-29 15:41:32 -0400832 struct btrfs_root *snap_src,
Arne Jansen6f72c7e2011-09-14 15:58:21 +0200833 u64 *async_transid, bool readonly,
Miao Xie8696c532013-02-07 06:02:44 +0000834 struct btrfs_qgroup_inherit *inherit)
Christoph Hellwigcb8e7092008-10-09 13:39:39 -0400835{
David Howells2b0143b2015-03-17 22:25:59 +0000836 struct inode *dir = d_inode(parent->dentry);
Christoph Hellwigcb8e7092008-10-09 13:39:39 -0400837 struct dentry *dentry;
838 int error;
839
David Sterba5c50c9b2013-03-22 18:12:51 +0000840 error = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
841 if (error == -EINTR)
842 return error;
Christoph Hellwigcb8e7092008-10-09 13:39:39 -0400843
844 dentry = lookup_one_len(name, parent->dentry, namelen);
845 error = PTR_ERR(dentry);
846 if (IS_ERR(dentry))
847 goto out_unlock;
848
Yan, Zheng76dda932009-09-21 16:00:26 -0400849 error = btrfs_may_create(dir, dentry);
Christoph Hellwigcb8e7092008-10-09 13:39:39 -0400850 if (error)
Liu Boa874a632012-06-29 03:58:46 -0600851 goto out_dput;
Christoph Hellwigcb8e7092008-10-09 13:39:39 -0400852
Chris Mason9c520572012-12-17 14:26:57 -0500853 /*
854 * even if this name doesn't exist, we may get hash collisions.
855 * check for them now when we can safely fail
856 */
857 error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
858 dir->i_ino, name,
859 namelen);
860 if (error)
861 goto out_dput;
862
Yan, Zheng76dda932009-09-21 16:00:26 -0400863 down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
864
865 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
866 goto out_up_read;
867
Chris Mason3de45862008-11-17 21:02:50 -0500868 if (snap_src) {
Miao Xiee9662f72013-02-28 10:01:15 +0000869 error = create_snapshot(snap_src, dir, dentry, name, namelen,
Arne Jansen6f72c7e2011-09-14 15:58:21 +0200870 async_transid, readonly, inherit);
Chris Mason3de45862008-11-17 21:02:50 -0500871 } else {
Miao Xied5c12072013-02-28 10:04:33 +0000872 error = create_subvol(dir, dentry, name, namelen,
873 async_transid, inherit);
Chris Mason3de45862008-11-17 21:02:50 -0500874 }
Yan, Zheng76dda932009-09-21 16:00:26 -0400875 if (!error)
876 fsnotify_mkdir(dir, dentry);
877out_up_read:
878 up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
Christoph Hellwigcb8e7092008-10-09 13:39:39 -0400879out_dput:
880 dput(dentry);
881out_unlock:
Al Viro59551022016-01-22 15:40:57 -0500882 inode_unlock(dir);
Christoph Hellwigcb8e7092008-10-09 13:39:39 -0400883 return error;
884}
885
Chris Mason4cb53002011-05-24 15:35:30 -0400886/*
887 * When we're defragging a range, we don't want to kick it off again
888 * if it is really just waiting for delalloc to send it down.
889 * If we find a nice big extent or delalloc range for the bytes in the
890 * file you want to defrag, we return 0 to let you know to skip this
891 * part of the file
892 */
David Sterbaaab110a2014-07-29 17:32:10 +0200893static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
Chris Mason4cb53002011-05-24 15:35:30 -0400894{
895 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
896 struct extent_map *em = NULL;
897 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
898 u64 end;
899
900 read_lock(&em_tree->lock);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300901 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
Chris Mason4cb53002011-05-24 15:35:30 -0400902 read_unlock(&em_tree->lock);
903
904 if (em) {
905 end = extent_map_end(em);
906 free_extent_map(em);
907 if (end - offset > thresh)
908 return 0;
909 }
910 /* if we already have a nice delalloc here, just stop */
911 thresh /= 2;
912 end = count_range_bits(io_tree, &offset, offset + thresh,
913 thresh, EXTENT_DELALLOC, 1);
914 if (end >= thresh)
915 return 0;
916 return 1;
917}
918
919/*
920 * helper function to walk through a file and find extents
921 * newer than a specific transid, and smaller than thresh.
922 *
923 * This is used by the defragging code to find new and small
924 * extents
925 */
926static int find_new_extents(struct btrfs_root *root,
927 struct inode *inode, u64 newer_than,
David Sterbaaab110a2014-07-29 17:32:10 +0200928 u64 *off, u32 thresh)
Chris Mason4cb53002011-05-24 15:35:30 -0400929{
930 struct btrfs_path *path;
931 struct btrfs_key min_key;
Chris Mason4cb53002011-05-24 15:35:30 -0400932 struct extent_buffer *leaf;
933 struct btrfs_file_extent_item *extent;
934 int type;
935 int ret;
David Sterbaa4689d22011-05-31 17:08:14 +0000936 u64 ino = btrfs_ino(inode);
Chris Mason4cb53002011-05-24 15:35:30 -0400937
938 path = btrfs_alloc_path();
939 if (!path)
940 return -ENOMEM;
941
David Sterbaa4689d22011-05-31 17:08:14 +0000942 min_key.objectid = ino;
Chris Mason4cb53002011-05-24 15:35:30 -0400943 min_key.type = BTRFS_EXTENT_DATA_KEY;
944 min_key.offset = *off;
945
Dulshani Gunawardhana67871252013-10-31 10:33:04 +0530946 while (1) {
Filipe David Borba Manana6174d3c2013-10-01 16:13:42 +0100947 ret = btrfs_search_forward(root, &min_key, path, newer_than);
Chris Mason4cb53002011-05-24 15:35:30 -0400948 if (ret != 0)
949 goto none;
Filipe Mananaf094c9bd2014-03-12 01:28:24 +0000950process_slot:
David Sterbaa4689d22011-05-31 17:08:14 +0000951 if (min_key.objectid != ino)
Chris Mason4cb53002011-05-24 15:35:30 -0400952 goto none;
953 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
954 goto none;
955
956 leaf = path->nodes[0];
957 extent = btrfs_item_ptr(leaf, path->slots[0],
958 struct btrfs_file_extent_item);
959
960 type = btrfs_file_extent_type(leaf, extent);
961 if (type == BTRFS_FILE_EXTENT_REG &&
962 btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
963 check_defrag_in_cache(inode, min_key.offset, thresh)) {
964 *off = min_key.offset;
965 btrfs_free_path(path);
966 return 0;
967 }
968
Filipe Mananaf094c9bd2014-03-12 01:28:24 +0000969 path->slots[0]++;
970 if (path->slots[0] < btrfs_header_nritems(leaf)) {
971 btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
972 goto process_slot;
973 }
974
Chris Mason4cb53002011-05-24 15:35:30 -0400975 if (min_key.offset == (u64)-1)
976 goto none;
977
978 min_key.offset++;
979 btrfs_release_path(path);
980 }
981none:
982 btrfs_free_path(path);
983 return -ENOENT;
984}
985
Li Zefan6c282eb2012-06-11 16:03:35 +0800986static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
Liu Bo17ce6ef2012-03-29 09:57:45 -0400987{
988 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
Chris Mason940100a2010-03-10 10:52:59 -0500989 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
Li Zefan6c282eb2012-06-11 16:03:35 +0800990 struct extent_map *em;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300991 u64 len = PAGE_SIZE;
Chris Mason940100a2010-03-10 10:52:59 -0500992
993 /*
994 * hopefully we have this extent in the tree already, try without
995 * the full extent lock
996 */
997 read_lock(&em_tree->lock);
998 em = lookup_extent_mapping(em_tree, start, len);
999 read_unlock(&em_tree->lock);
1000
1001 if (!em) {
Filipe Manana308d9802014-03-11 13:56:15 +00001002 struct extent_state *cached = NULL;
1003 u64 end = start + len - 1;
1004
Chris Mason940100a2010-03-10 10:52:59 -05001005 /* get the big lock and read metadata off disk */
David Sterbaff13db42015-12-03 14:30:40 +01001006 lock_extent_bits(io_tree, start, end, &cached);
Chris Mason940100a2010-03-10 10:52:59 -05001007 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
Filipe Manana308d9802014-03-11 13:56:15 +00001008 unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
Chris Mason940100a2010-03-10 10:52:59 -05001009
Dan Carpenter6cf8bfb2010-03-20 11:22:10 +00001010 if (IS_ERR(em))
Li Zefan6c282eb2012-06-11 16:03:35 +08001011 return NULL;
Chris Mason940100a2010-03-10 10:52:59 -05001012 }
1013
Li Zefan6c282eb2012-06-11 16:03:35 +08001014 return em;
1015}
1016
1017static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
1018{
1019 struct extent_map *next;
1020 bool ret = true;
1021
1022 /* this is the last extent */
1023 if (em->start + em->len >= i_size_read(inode))
1024 return false;
1025
1026 next = defrag_lookup_extent(inode, em->start + em->len);
Chris Masone9512d72014-08-26 13:55:54 -07001027 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1028 ret = false;
1029 else if ((em->block_start + em->block_len == next->block_start) &&
Byongho Leeee221842015-12-15 01:42:10 +09001030 (em->block_len > SZ_128K && next->block_len > SZ_128K))
Li Zefan6c282eb2012-06-11 16:03:35 +08001031 ret = false;
1032
1033 free_extent_map(next);
1034 return ret;
1035}
1036
David Sterbaaab110a2014-07-29 17:32:10 +02001037static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
Andrew Mahonea43a2112012-06-19 21:08:32 -04001038 u64 *last_len, u64 *skip, u64 *defrag_end,
1039 int compress)
Li Zefan6c282eb2012-06-11 16:03:35 +08001040{
1041 struct extent_map *em;
1042 int ret = 1;
1043 bool next_mergeable = true;
Liu Bo4a3560c2015-08-07 16:48:41 +08001044 bool prev_mergeable = true;
Li Zefan6c282eb2012-06-11 16:03:35 +08001045
1046 /*
1047 * make sure that once we start defragging an extent, we keep on
1048 * defragging it
1049 */
1050 if (start < *defrag_end)
1051 return 1;
1052
1053 *skip = 0;
1054
1055 em = defrag_lookup_extent(inode, start);
1056 if (!em)
1057 return 0;
1058
Chris Mason940100a2010-03-10 10:52:59 -05001059 /* this will cover holes, and inline extents */
Liu Bo17ce6ef2012-03-29 09:57:45 -04001060 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
Chris Mason940100a2010-03-10 10:52:59 -05001061 ret = 0;
Liu Bo17ce6ef2012-03-29 09:57:45 -04001062 goto out;
1063 }
1064
Liu Bo4a3560c2015-08-07 16:48:41 +08001065 if (!*defrag_end)
1066 prev_mergeable = false;
1067
Li Zefan6c282eb2012-06-11 16:03:35 +08001068 next_mergeable = defrag_check_next_extent(inode, em);
Chris Mason940100a2010-03-10 10:52:59 -05001069 /*
Li Zefan6c282eb2012-06-11 16:03:35 +08001070 * we hit a real extent, if it is big or the next extent is not a
1071 * real extent, don't bother defragging it
Chris Mason940100a2010-03-10 10:52:59 -05001072 */
Andrew Mahonea43a2112012-06-19 21:08:32 -04001073 if (!compress && (*last_len == 0 || *last_len >= thresh) &&
Liu Bo4a3560c2015-08-07 16:48:41 +08001074 (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
Chris Mason940100a2010-03-10 10:52:59 -05001075 ret = 0;
Liu Bo17ce6ef2012-03-29 09:57:45 -04001076out:
Chris Mason940100a2010-03-10 10:52:59 -05001077 /*
1078 * last_len ends up being a counter of how many bytes we've defragged.
1079 * every time we choose not to defrag an extent, we reset *last_len
1080 * so that the next tiny extent will force a defrag.
1081 *
1082 * The end result of this is that tiny extents before a single big
1083 * extent will force at least part of that big extent to be defragged.
1084 */
1085 if (ret) {
Chris Mason940100a2010-03-10 10:52:59 -05001086 *defrag_end = extent_map_end(em);
1087 } else {
1088 *last_len = 0;
1089 *skip = extent_map_end(em);
1090 *defrag_end = 0;
1091 }
1092
1093 free_extent_map(em);
1094 return ret;
1095}
1096
Chris Mason4cb53002011-05-24 15:35:30 -04001097/*
1098 * it doesn't do much good to defrag one or two pages
1099 * at a time. This pulls in a nice chunk of pages
1100 * to COW and defrag.
1101 *
1102 * It also makes sure the delalloc code has enough
1103 * dirty data to avoid making new small extents as part
1104 * of the defrag
1105 *
1106 * It's a good idea to start RA on this range
1107 * before calling this.
1108 */
1109static int cluster_pages_for_defrag(struct inode *inode,
1110 struct page **pages,
1111 unsigned long start_index,
Justin Maggardc41570c2014-01-21 11:18:29 -08001112 unsigned long num_pages)
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001113{
Chris Mason4cb53002011-05-24 15:35:30 -04001114 unsigned long file_end;
1115 u64 isize = i_size_read(inode);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001116 u64 page_start;
1117 u64 page_end;
Liu Bo1f12bd02012-03-29 09:57:44 -04001118 u64 page_cnt;
Chris Mason4cb53002011-05-24 15:35:30 -04001119 int ret;
1120 int i;
1121 int i_done;
1122 struct btrfs_ordered_extent *ordered;
1123 struct extent_state *cached_state = NULL;
Miao Xie600a45e2012-02-16 15:01:24 +08001124 struct extent_io_tree *tree;
Josef Bacik3b16a4e2011-09-21 15:05:58 -04001125 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
Chris Mason4cb53002011-05-24 15:35:30 -04001126
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001127 file_end = (isize - 1) >> PAGE_SHIFT;
Liu Bo1f12bd02012-03-29 09:57:44 -04001128 if (!isize || start_index > file_end)
1129 return 0;
1130
1131 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
Chris Mason4cb53002011-05-24 15:35:30 -04001132
1133 ret = btrfs_delalloc_reserve_space(inode,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001134 start_index << PAGE_SHIFT,
1135 page_cnt << PAGE_SHIFT);
Chris Mason4cb53002011-05-24 15:35:30 -04001136 if (ret)
1137 return ret;
Chris Mason4cb53002011-05-24 15:35:30 -04001138 i_done = 0;
Miao Xie600a45e2012-02-16 15:01:24 +08001139 tree = &BTRFS_I(inode)->io_tree;
Chris Mason4cb53002011-05-24 15:35:30 -04001140
1141 /* step one, lock all the pages */
Liu Bo1f12bd02012-03-29 09:57:44 -04001142 for (i = 0; i < page_cnt; i++) {
Chris Mason4cb53002011-05-24 15:35:30 -04001143 struct page *page;
Miao Xie600a45e2012-02-16 15:01:24 +08001144again:
Josef Bacika94733d2011-07-11 10:47:06 -04001145 page = find_or_create_page(inode->i_mapping,
Miao Xie600a45e2012-02-16 15:01:24 +08001146 start_index + i, mask);
Chris Mason4cb53002011-05-24 15:35:30 -04001147 if (!page)
1148 break;
1149
Miao Xie600a45e2012-02-16 15:01:24 +08001150 page_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001151 page_end = page_start + PAGE_SIZE - 1;
Miao Xie600a45e2012-02-16 15:01:24 +08001152 while (1) {
Filipe Manana308d9802014-03-11 13:56:15 +00001153 lock_extent_bits(tree, page_start, page_end,
David Sterbaff13db42015-12-03 14:30:40 +01001154 &cached_state);
Miao Xie600a45e2012-02-16 15:01:24 +08001155 ordered = btrfs_lookup_ordered_extent(inode,
1156 page_start);
Filipe Manana308d9802014-03-11 13:56:15 +00001157 unlock_extent_cached(tree, page_start, page_end,
1158 &cached_state, GFP_NOFS);
Miao Xie600a45e2012-02-16 15:01:24 +08001159 if (!ordered)
1160 break;
1161
1162 unlock_page(page);
1163 btrfs_start_ordered_extent(inode, ordered, 1);
1164 btrfs_put_ordered_extent(ordered);
1165 lock_page(page);
Liu Bo1f12bd02012-03-29 09:57:44 -04001166 /*
1167 * we unlocked the page above, so we need check if
1168 * it was released or not.
1169 */
1170 if (page->mapping != inode->i_mapping) {
1171 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001172 put_page(page);
Liu Bo1f12bd02012-03-29 09:57:44 -04001173 goto again;
1174 }
Miao Xie600a45e2012-02-16 15:01:24 +08001175 }
1176
Chris Mason4cb53002011-05-24 15:35:30 -04001177 if (!PageUptodate(page)) {
1178 btrfs_readpage(NULL, page);
1179 lock_page(page);
1180 if (!PageUptodate(page)) {
1181 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001182 put_page(page);
Chris Mason4cb53002011-05-24 15:35:30 -04001183 ret = -EIO;
1184 break;
1185 }
1186 }
Miao Xie600a45e2012-02-16 15:01:24 +08001187
Miao Xie600a45e2012-02-16 15:01:24 +08001188 if (page->mapping != inode->i_mapping) {
1189 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001190 put_page(page);
Miao Xie600a45e2012-02-16 15:01:24 +08001191 goto again;
1192 }
1193
Chris Mason4cb53002011-05-24 15:35:30 -04001194 pages[i] = page;
1195 i_done++;
1196 }
1197 if (!i_done || ret)
1198 goto out;
1199
1200 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1201 goto out;
1202
1203 /*
1204 * so now we have a nice long stream of locked
1205 * and up to date pages, lets wait on them
1206 */
1207 for (i = 0; i < i_done; i++)
1208 wait_on_page_writeback(pages[i]);
1209
1210 page_start = page_offset(pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001211 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
Chris Mason4cb53002011-05-24 15:35:30 -04001212
1213 lock_extent_bits(&BTRFS_I(inode)->io_tree,
David Sterbaff13db42015-12-03 14:30:40 +01001214 page_start, page_end - 1, &cached_state);
Chris Mason4cb53002011-05-24 15:35:30 -04001215 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
1216 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
Liu Bo9e8a4a82012-09-05 19:10:51 -06001217 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
1218 &cached_state, GFP_NOFS);
Chris Mason4cb53002011-05-24 15:35:30 -04001219
Liu Bo1f12bd02012-03-29 09:57:44 -04001220 if (i_done != page_cnt) {
Josef Bacik9e0baf62011-07-15 15:16:44 +00001221 spin_lock(&BTRFS_I(inode)->lock);
1222 BTRFS_I(inode)->outstanding_extents++;
1223 spin_unlock(&BTRFS_I(inode)->lock);
Chris Mason4cb53002011-05-24 15:35:30 -04001224 btrfs_delalloc_release_space(inode,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001225 start_index << PAGE_SHIFT,
1226 (page_cnt - i_done) << PAGE_SHIFT);
Chris Mason4cb53002011-05-24 15:35:30 -04001227 }
1228
1229
Liu Bo9e8a4a82012-09-05 19:10:51 -06001230 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1231 &cached_state, GFP_NOFS);
Chris Mason4cb53002011-05-24 15:35:30 -04001232
1233 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1234 page_start, page_end - 1, &cached_state,
1235 GFP_NOFS);
1236
1237 for (i = 0; i < i_done; i++) {
1238 clear_page_dirty_for_io(pages[i]);
1239 ClearPageChecked(pages[i]);
1240 set_page_extent_mapped(pages[i]);
1241 set_page_dirty(pages[i]);
1242 unlock_page(pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001243 put_page(pages[i]);
Chris Mason4cb53002011-05-24 15:35:30 -04001244 }
1245 return i_done;
1246out:
1247 for (i = 0; i < i_done; i++) {
1248 unlock_page(pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001249 put_page(pages[i]);
Chris Mason4cb53002011-05-24 15:35:30 -04001250 }
Qu Wenruo7cf5b972015-09-08 17:25:55 +08001251 btrfs_delalloc_release_space(inode,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001252 start_index << PAGE_SHIFT,
1253 page_cnt << PAGE_SHIFT);
Chris Mason4cb53002011-05-24 15:35:30 -04001254 return ret;
1255
1256}
1257
1258int btrfs_defrag_file(struct inode *inode, struct file *file,
1259 struct btrfs_ioctl_defrag_range_args *range,
1260 u64 newer_than, unsigned long max_to_defrag)
1261{
1262 struct btrfs_root *root = BTRFS_I(inode)->root;
Chris Mason4cb53002011-05-24 15:35:30 -04001263 struct file_ra_state *ra = NULL;
1264 unsigned long last_index;
Li Zefan151a31b2011-09-02 15:56:39 +08001265 u64 isize = i_size_read(inode);
Chris Mason940100a2010-03-10 10:52:59 -05001266 u64 last_len = 0;
1267 u64 skip = 0;
1268 u64 defrag_end = 0;
Chris Mason4cb53002011-05-24 15:35:30 -04001269 u64 newer_off = range->start;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001270 unsigned long i;
Li Zefan008873e2011-09-02 15:57:07 +08001271 unsigned long ra_index = 0;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001272 int ret;
Chris Mason4cb53002011-05-24 15:35:30 -04001273 int defrag_count = 0;
Li Zefan1a419d82010-10-25 15:12:50 +08001274 int compress_type = BTRFS_COMPRESS_ZLIB;
David Sterbaaab110a2014-07-29 17:32:10 +02001275 u32 extent_thresh = range->extent_thresh;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001276 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
Justin Maggardc41570c2014-01-21 11:18:29 -08001277 unsigned long cluster = max_cluster;
Byongho Leeee221842015-12-15 01:42:10 +09001278 u64 new_align = ~((u64)SZ_128K - 1);
Chris Mason4cb53002011-05-24 15:35:30 -04001279 struct page **pages = NULL;
1280
Liu Bo0abd5b12013-04-16 09:20:28 +00001281 if (isize == 0)
1282 return 0;
1283
1284 if (range->start >= isize)
1285 return -EINVAL;
Li Zefan1a419d82010-10-25 15:12:50 +08001286
1287 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
1288 if (range->compress_type > BTRFS_COMPRESS_TYPES)
1289 return -EINVAL;
1290 if (range->compress_type)
1291 compress_type = range->compress_type;
1292 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001293
Liu Bo0abd5b12013-04-16 09:20:28 +00001294 if (extent_thresh == 0)
Byongho Leeee221842015-12-15 01:42:10 +09001295 extent_thresh = SZ_256K;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001296
Chris Mason4cb53002011-05-24 15:35:30 -04001297 /*
1298 * if we were not given a file, allocate a readahead
1299 * context
1300 */
1301 if (!file) {
1302 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1303 if (!ra)
1304 return -ENOMEM;
1305 file_ra_state_init(ra, inode->i_mapping);
1306 } else {
1307 ra = &file->f_ra;
1308 }
1309
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +05301310 pages = kmalloc_array(max_cluster, sizeof(struct page *),
Chris Mason4cb53002011-05-24 15:35:30 -04001311 GFP_NOFS);
1312 if (!pages) {
1313 ret = -ENOMEM;
1314 goto out_ra;
1315 }
1316
1317 /* find the last page to defrag */
Chris Mason1e701a32010-03-11 09:42:04 -05001318 if (range->start + range->len > range->start) {
Li Zefan151a31b2011-09-02 15:56:39 +08001319 last_index = min_t(u64, isize - 1,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001320 range->start + range->len - 1) >> PAGE_SHIFT;
Chris Mason1e701a32010-03-11 09:42:04 -05001321 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001322 last_index = (isize - 1) >> PAGE_SHIFT;
Chris Mason1e701a32010-03-11 09:42:04 -05001323 }
1324
Chris Mason4cb53002011-05-24 15:35:30 -04001325 if (newer_than) {
1326 ret = find_new_extents(root, inode, newer_than,
Byongho Leeee221842015-12-15 01:42:10 +09001327 &newer_off, SZ_64K);
Chris Mason4cb53002011-05-24 15:35:30 -04001328 if (!ret) {
1329 range->start = newer_off;
1330 /*
1331 * we always align our defrag to help keep
1332 * the extents in the file evenly spaced
1333 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001334 i = (newer_off & new_align) >> PAGE_SHIFT;
Chris Mason4cb53002011-05-24 15:35:30 -04001335 } else
1336 goto out_ra;
1337 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001338 i = range->start >> PAGE_SHIFT;
Chris Mason4cb53002011-05-24 15:35:30 -04001339 }
1340 if (!max_to_defrag)
chandan070034b2015-06-09 10:35:11 +05301341 max_to_defrag = last_index - i + 1;
Chris Mason4cb53002011-05-24 15:35:30 -04001342
Li Zefan2a0f7f52011-10-10 15:43:34 -04001343 /*
1344 * make writeback starts from i, so the defrag range can be
1345 * written sequentially.
1346 */
1347 if (i < inode->i_mapping->writeback_index)
1348 inode->i_mapping->writeback_index = i;
1349
Chris Masonf7f43cc2011-10-11 11:41:40 -04001350 while (i <= last_index && defrag_count < max_to_defrag &&
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001351 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
Chris Mason4cb53002011-05-24 15:35:30 -04001352 /*
1353 * make sure we stop running if someone unmounts
1354 * the FS
1355 */
1356 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1357 break;
1358
David Sterba210549e2013-02-09 23:38:06 +00001359 if (btrfs_defrag_cancelled(root->fs_info)) {
David Sterbaf14d1042015-10-08 11:37:06 +02001360 btrfs_debug(root->fs_info, "defrag_file cancelled");
David Sterba210549e2013-02-09 23:38:06 +00001361 ret = -EAGAIN;
1362 break;
1363 }
1364
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001365 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
Li Zefan6c282eb2012-06-11 16:03:35 +08001366 extent_thresh, &last_len, &skip,
Andrew Mahonea43a2112012-06-19 21:08:32 -04001367 &defrag_end, range->flags &
1368 BTRFS_DEFRAG_RANGE_COMPRESS)) {
Chris Mason940100a2010-03-10 10:52:59 -05001369 unsigned long next;
1370 /*
1371 * the should_defrag function tells us how much to skip
1372 * bump our counter by the suggested amount
1373 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001374 next = DIV_ROUND_UP(skip, PAGE_SIZE);
Chris Mason940100a2010-03-10 10:52:59 -05001375 i = max(i + 1, next);
1376 continue;
1377 }
Li Zefan008873e2011-09-02 15:57:07 +08001378
1379 if (!newer_than) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001380 cluster = (PAGE_ALIGN(defrag_end) >>
1381 PAGE_SHIFT) - i;
Li Zefan008873e2011-09-02 15:57:07 +08001382 cluster = min(cluster, max_cluster);
1383 } else {
1384 cluster = max_cluster;
1385 }
1386
Li Zefan008873e2011-09-02 15:57:07 +08001387 if (i + cluster > ra_index) {
1388 ra_index = max(i, ra_index);
1389 btrfs_force_ra(inode->i_mapping, ra, file, ra_index,
1390 cluster);
chandane4826a52015-06-09 17:38:32 +05301391 ra_index += cluster;
Li Zefan008873e2011-09-02 15:57:07 +08001392 }
Chris Mason940100a2010-03-10 10:52:59 -05001393
Al Viro59551022016-01-22 15:40:57 -05001394 inode_lock(inode);
Filipe David Borba Manana633085c2013-08-16 15:23:33 +01001395 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
1396 BTRFS_I(inode)->force_compress = compress_type;
Li Zefan008873e2011-09-02 15:57:07 +08001397 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
Liu Boecb8bea2012-03-29 09:57:44 -04001398 if (ret < 0) {
Al Viro59551022016-01-22 15:40:57 -05001399 inode_unlock(inode);
Chris Mason4cb53002011-05-24 15:35:30 -04001400 goto out_ra;
Liu Boecb8bea2012-03-29 09:57:44 -04001401 }
Chris Mason940100a2010-03-10 10:52:59 -05001402
Chris Mason4cb53002011-05-24 15:35:30 -04001403 defrag_count += ret;
Namjae Jeond0e1d662012-12-11 16:00:21 -08001404 balance_dirty_pages_ratelimited(inode->i_mapping);
Al Viro59551022016-01-22 15:40:57 -05001405 inode_unlock(inode);
Chris Mason4cb53002011-05-24 15:35:30 -04001406
1407 if (newer_than) {
1408 if (newer_off == (u64)-1)
1409 break;
1410
Liu Boe1f041e2012-03-29 09:57:45 -04001411 if (ret > 0)
1412 i += ret;
1413
Chris Mason4cb53002011-05-24 15:35:30 -04001414 newer_off = max(newer_off + 1,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001415 (u64)i << PAGE_SHIFT);
Chris Mason4cb53002011-05-24 15:35:30 -04001416
Byongho Leeee221842015-12-15 01:42:10 +09001417 ret = find_new_extents(root, inode, newer_than,
1418 &newer_off, SZ_64K);
Chris Mason4cb53002011-05-24 15:35:30 -04001419 if (!ret) {
1420 range->start = newer_off;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001421 i = (newer_off & new_align) >> PAGE_SHIFT;
Chris Mason4cb53002011-05-24 15:35:30 -04001422 } else {
1423 break;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001424 }
Chris Mason4cb53002011-05-24 15:35:30 -04001425 } else {
Li Zefan008873e2011-09-02 15:57:07 +08001426 if (ret > 0) {
Li Zefancbcc8322011-09-02 15:56:25 +08001427 i += ret;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001428 last_len += ret << PAGE_SHIFT;
Li Zefan008873e2011-09-02 15:57:07 +08001429 } else {
Li Zefancbcc8322011-09-02 15:56:25 +08001430 i++;
Li Zefan008873e2011-09-02 15:57:07 +08001431 last_len = 0;
1432 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001433 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001434 }
1435
Filipe Mananadec8ef92014-03-01 10:55:54 +00001436 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
Chris Mason1e701a32010-03-11 09:42:04 -05001437 filemap_flush(inode->i_mapping);
Filipe Mananadec8ef92014-03-01 10:55:54 +00001438 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1439 &BTRFS_I(inode)->runtime_flags))
1440 filemap_flush(inode->i_mapping);
1441 }
Chris Mason1e701a32010-03-11 09:42:04 -05001442
1443 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
1444 /* the filemap_flush will queue IO into the worker threads, but
1445 * we have to make sure the IO is actually started and that
1446 * ordered extents get created before we return
1447 */
1448 atomic_inc(&root->fs_info->async_submit_draining);
1449 while (atomic_read(&root->fs_info->nr_async_submits) ||
1450 atomic_read(&root->fs_info->async_delalloc_pages)) {
1451 wait_event(root->fs_info->async_submit_wait,
1452 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
1453 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
1454 }
1455 atomic_dec(&root->fs_info->async_submit_draining);
Chris Mason1e701a32010-03-11 09:42:04 -05001456 }
1457
Li Zefan1a419d82010-10-25 15:12:50 +08001458 if (range->compress_type == BTRFS_COMPRESS_LZO) {
Mitch Harder2b0ce2c2012-07-24 11:58:43 -06001459 btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZO);
Li Zefan1a419d82010-10-25 15:12:50 +08001460 }
1461
Diego Calleja60ccf822011-09-01 16:33:57 +02001462 ret = defrag_count;
Chris Mason940100a2010-03-10 10:52:59 -05001463
Chris Mason4cb53002011-05-24 15:35:30 -04001464out_ra:
Filipe David Borba Manana633085c2013-08-16 15:23:33 +01001465 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
Al Viro59551022016-01-22 15:40:57 -05001466 inode_lock(inode);
Filipe David Borba Manana633085c2013-08-16 15:23:33 +01001467 BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE;
Al Viro59551022016-01-22 15:40:57 -05001468 inode_unlock(inode);
Filipe David Borba Manana633085c2013-08-16 15:23:33 +01001469 }
Chris Mason4cb53002011-05-24 15:35:30 -04001470 if (!file)
1471 kfree(ra);
1472 kfree(pages);
Chris Mason940100a2010-03-10 10:52:59 -05001473 return ret;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001474}
1475
Miao Xie198605a2012-11-26 08:43:45 +00001476static noinline int btrfs_ioctl_resize(struct file *file,
Yan, Zheng76dda932009-09-21 16:00:26 -04001477 void __user *arg)
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001478{
1479 u64 new_size;
1480 u64 old_size;
1481 u64 devid = 1;
Al Viro496ad9a2013-01-23 17:07:38 -05001482 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001483 struct btrfs_ioctl_vol_args *vol_args;
1484 struct btrfs_trans_handle *trans;
1485 struct btrfs_device *device = NULL;
1486 char *sizestr;
Gui Hecheng9a40f122014-03-31 18:03:25 +08001487 char *retptr;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001488 char *devstr = NULL;
1489 int ret = 0;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001490 int mod = 0;
1491
Chris Masone441d542009-01-05 16:57:23 -05001492 if (!capable(CAP_SYS_ADMIN))
1493 return -EPERM;
1494
Miao Xie198605a2012-11-26 08:43:45 +00001495 ret = mnt_want_write_file(file);
1496 if (ret)
1497 return ret;
1498
Stefan Behrens5ac00ad2012-11-05 17:54:08 +01001499 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
1500 1)) {
Miao Xie97547672012-12-21 10:38:50 +00001501 mnt_drop_write_file(file);
Anand Jaine57138b2013-08-21 11:44:48 +08001502 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02001503 }
1504
Stefan Behrens5ac00ad2012-11-05 17:54:08 +01001505 mutex_lock(&root->fs_info->volume_mutex);
Li Zefandae7b662009-04-08 15:06:54 +08001506 vol_args = memdup_user(arg, sizeof(*vol_args));
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02001507 if (IS_ERR(vol_args)) {
1508 ret = PTR_ERR(vol_args);
1509 goto out;
1510 }
Mark Fasheh5516e592008-07-24 12:20:14 -04001511
1512 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001513
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001514 sizestr = vol_args->name;
1515 devstr = strchr(sizestr, ':');
1516 if (devstr) {
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001517 sizestr = devstr + 1;
1518 *devstr = '\0';
1519 devstr = vol_args->name;
ZhangZhen58dfae62014-05-13 16:36:08 +08001520 ret = kstrtoull(devstr, 10, &devid);
1521 if (ret)
1522 goto out_free;
Miao Xiedfd79822012-12-21 09:21:30 +00001523 if (!devid) {
1524 ret = -EINVAL;
1525 goto out_free;
1526 }
Frank Holtonefe120a2013-12-20 11:37:06 -05001527 btrfs_info(root->fs_info, "resizing devid %llu", devid);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001528 }
Miao Xiedba60f32012-12-21 09:19:51 +00001529
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01001530 device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001531 if (!device) {
Frank Holtonefe120a2013-12-20 11:37:06 -05001532 btrfs_info(root->fs_info, "resizer unable to find device %llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001533 devid);
Miao Xiedfd79822012-12-21 09:21:30 +00001534 ret = -ENODEV;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02001535 goto out_free;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001536 }
Miao Xiedba60f32012-12-21 09:19:51 +00001537
1538 if (!device->writeable) {
Frank Holtonefe120a2013-12-20 11:37:06 -05001539 btrfs_info(root->fs_info,
1540 "resizer unable to apply on readonly device %llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001541 devid);
Miao Xiedfd79822012-12-21 09:21:30 +00001542 ret = -EPERM;
Liu Bo4e42ae12012-06-14 02:23:19 -06001543 goto out_free;
1544 }
1545
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001546 if (!strcmp(sizestr, "max"))
1547 new_size = device->bdev->bd_inode->i_size;
1548 else {
1549 if (sizestr[0] == '-') {
1550 mod = -1;
1551 sizestr++;
1552 } else if (sizestr[0] == '+') {
1553 mod = 1;
1554 sizestr++;
1555 }
Gui Hecheng9a40f122014-03-31 18:03:25 +08001556 new_size = memparse(sizestr, &retptr);
1557 if (*retptr != '\0' || new_size == 0) {
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001558 ret = -EINVAL;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02001559 goto out_free;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001560 }
1561 }
1562
Stefan Behrens63a212a2012-11-05 18:29:28 +01001563 if (device->is_tgtdev_for_dev_replace) {
Miao Xiedfd79822012-12-21 09:21:30 +00001564 ret = -EPERM;
Stefan Behrens63a212a2012-11-05 18:29:28 +01001565 goto out_free;
1566 }
1567
Miao Xie7cc8e582014-09-03 21:35:38 +08001568 old_size = btrfs_device_get_total_bytes(device);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001569
1570 if (mod < 0) {
1571 if (new_size > old_size) {
1572 ret = -EINVAL;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02001573 goto out_free;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001574 }
1575 new_size = old_size - new_size;
1576 } else if (mod > 0) {
Wenliang Faneb8052e2013-12-20 15:28:56 +08001577 if (new_size > ULLONG_MAX - old_size) {
Gui Hecheng902c68a2014-05-29 09:19:58 +08001578 ret = -ERANGE;
Wenliang Faneb8052e2013-12-20 15:28:56 +08001579 goto out_free;
1580 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001581 new_size = old_size + new_size;
1582 }
1583
Byongho Leeee221842015-12-15 01:42:10 +09001584 if (new_size < SZ_256M) {
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001585 ret = -EINVAL;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02001586 goto out_free;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001587 }
1588 if (new_size > device->bdev->bd_inode->i_size) {
1589 ret = -EFBIG;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02001590 goto out_free;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001591 }
1592
David Sterbab8b93ad2015-01-16 17:26:13 +01001593 new_size = div_u64(new_size, root->sectorsize);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001594 new_size *= root->sectorsize;
1595
David Sterbaecaeb142015-10-08 09:01:03 +02001596 btrfs_info_in_rcu(root->fs_info, "new size for %s is %llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001597 rcu_str_deref(device->name), new_size);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001598
1599 if (new_size > old_size) {
Yan, Zhenga22285a2010-05-16 10:48:46 -04001600 trans = btrfs_start_transaction(root, 0);
Tsutomu Itoh98d5dc12011-01-20 06:19:37 +00001601 if (IS_ERR(trans)) {
1602 ret = PTR_ERR(trans);
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02001603 goto out_free;
Tsutomu Itoh98d5dc12011-01-20 06:19:37 +00001604 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001605 ret = btrfs_grow_device(trans, device, new_size);
1606 btrfs_commit_transaction(trans, root);
Mike Fleetwoodece7d202011-11-18 18:55:01 +00001607 } else if (new_size < old_size) {
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001608 ret = btrfs_shrink_device(device, new_size);
jeff.liu0253f402012-10-27 12:06:39 +00001609 } /* equal, nothing need to do */
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001610
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02001611out_free:
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001612 kfree(vol_args);
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02001613out:
1614 mutex_unlock(&root->fs_info->volume_mutex);
Stefan Behrens5ac00ad2012-11-05 17:54:08 +01001615 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
Ilya Dryomov18f39c42013-01-20 15:57:57 +02001616 mnt_drop_write_file(file);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001617 return ret;
1618}
1619
Sage Weil72fd0322010-10-29 15:41:32 -04001620static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001621 char *name, unsigned long fd, int subvol,
1622 u64 *transid, bool readonly,
Miao Xie8696c532013-02-07 06:02:44 +00001623 struct btrfs_qgroup_inherit *inherit)
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001624{
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001625 int namelen;
Chris Mason3de45862008-11-17 21:02:50 -05001626 int ret = 0;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001627
Liu Boa874a632012-06-29 03:58:46 -06001628 ret = mnt_want_write_file(file);
1629 if (ret)
1630 goto out;
1631
Sage Weil72fd0322010-10-29 15:41:32 -04001632 namelen = strlen(name);
1633 if (strchr(name, '/')) {
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001634 ret = -EINVAL;
Liu Boa874a632012-06-29 03:58:46 -06001635 goto out_drop_write;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001636 }
1637
Chris Mason16780ca2012-02-20 22:14:55 -05001638 if (name[0] == '.' &&
1639 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1640 ret = -EEXIST;
Liu Boa874a632012-06-29 03:58:46 -06001641 goto out_drop_write;
Chris Mason16780ca2012-02-20 22:14:55 -05001642 }
1643
Chris Mason3de45862008-11-17 21:02:50 -05001644 if (subvol) {
Sage Weil72fd0322010-10-29 15:41:32 -04001645 ret = btrfs_mksubvol(&file->f_path, name, namelen,
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001646 NULL, transid, readonly, inherit);
Christoph Hellwigcb8e7092008-10-09 13:39:39 -04001647 } else {
Al Viro2903ff02012-08-28 12:52:22 -04001648 struct fd src = fdget(fd);
Chris Mason3de45862008-11-17 21:02:50 -05001649 struct inode *src_inode;
Al Viro2903ff02012-08-28 12:52:22 -04001650 if (!src.file) {
Chris Mason3de45862008-11-17 21:02:50 -05001651 ret = -EINVAL;
Liu Boa874a632012-06-29 03:58:46 -06001652 goto out_drop_write;
Chris Mason3de45862008-11-17 21:02:50 -05001653 }
1654
Al Viro496ad9a2013-01-23 17:07:38 -05001655 src_inode = file_inode(src.file);
1656 if (src_inode->i_sb != file_inode(file)->i_sb) {
Josef Bacikc79b4712016-03-25 10:02:41 -04001657 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
Frank Holtonefe120a2013-12-20 11:37:06 -05001658 "Snapshot src from another FS");
Kusanagi Kouichi23ad5b12014-01-30 16:32:02 +09001659 ret = -EXDEV;
David Sterbad0242062014-01-15 18:15:52 +01001660 } else if (!inode_owner_or_capable(src_inode)) {
1661 /*
1662 * Subvolume creation is not restricted, but snapshots
1663 * are limited to own subvolumes only
1664 */
1665 ret = -EPERM;
Al Viroecd18812012-08-26 21:20:24 -04001666 } else {
1667 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1668 BTRFS_I(src_inode)->root,
1669 transid, readonly, inherit);
Chris Mason3de45862008-11-17 21:02:50 -05001670 }
Al Viro2903ff02012-08-28 12:52:22 -04001671 fdput(src);
Christoph Hellwigcb8e7092008-10-09 13:39:39 -04001672 }
Liu Boa874a632012-06-29 03:58:46 -06001673out_drop_write:
1674 mnt_drop_write_file(file);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001675out:
Sage Weil72fd0322010-10-29 15:41:32 -04001676 return ret;
1677}
1678
1679static noinline int btrfs_ioctl_snap_create(struct file *file,
Li Zefanfa0d2b92010-12-20 15:53:28 +08001680 void __user *arg, int subvol)
Sage Weil72fd0322010-10-29 15:41:32 -04001681{
Li Zefanfa0d2b92010-12-20 15:53:28 +08001682 struct btrfs_ioctl_vol_args *vol_args;
Sage Weil72fd0322010-10-29 15:41:32 -04001683 int ret;
1684
Li Zefanfa0d2b92010-12-20 15:53:28 +08001685 vol_args = memdup_user(arg, sizeof(*vol_args));
1686 if (IS_ERR(vol_args))
1687 return PTR_ERR(vol_args);
1688 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
Sage Weil72fd0322010-10-29 15:41:32 -04001689
Li Zefanfa0d2b92010-12-20 15:53:28 +08001690 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
Li Zefanb83cc962010-12-20 16:04:08 +08001691 vol_args->fd, subvol,
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001692 NULL, false, NULL);
Li Zefanfdfb1e42010-12-10 06:41:56 +00001693
Li Zefanfa0d2b92010-12-20 15:53:28 +08001694 kfree(vol_args);
1695 return ret;
1696}
Li Zefanfdfb1e42010-12-10 06:41:56 +00001697
Li Zefanfa0d2b92010-12-20 15:53:28 +08001698static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1699 void __user *arg, int subvol)
1700{
1701 struct btrfs_ioctl_vol_args_v2 *vol_args;
1702 int ret;
1703 u64 transid = 0;
1704 u64 *ptr = NULL;
Li Zefanb83cc962010-12-20 16:04:08 +08001705 bool readonly = false;
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001706 struct btrfs_qgroup_inherit *inherit = NULL;
Li Zefanfdfb1e42010-12-10 06:41:56 +00001707
Li Zefanfa0d2b92010-12-20 15:53:28 +08001708 vol_args = memdup_user(arg, sizeof(*vol_args));
1709 if (IS_ERR(vol_args))
1710 return PTR_ERR(vol_args);
1711 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
Sage Weil75eaa0e2010-12-10 00:36:28 +00001712
Li Zefanb83cc962010-12-20 16:04:08 +08001713 if (vol_args->flags &
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001714 ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
1715 BTRFS_SUBVOL_QGROUP_INHERIT)) {
Li Zefanb83cc962010-12-20 16:04:08 +08001716 ret = -EOPNOTSUPP;
Dan Carpenterc47ca322014-09-04 14:09:15 +03001717 goto free_args;
Sage Weil72fd0322010-10-29 15:41:32 -04001718 }
Li Zefanfa0d2b92010-12-20 15:53:28 +08001719
1720 if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
1721 ptr = &transid;
Li Zefanb83cc962010-12-20 16:04:08 +08001722 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1723 readonly = true;
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001724 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001725 if (vol_args->size > PAGE_SIZE) {
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001726 ret = -EINVAL;
Dan Carpenterc47ca322014-09-04 14:09:15 +03001727 goto free_args;
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001728 }
1729 inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
1730 if (IS_ERR(inherit)) {
1731 ret = PTR_ERR(inherit);
Dan Carpenterc47ca322014-09-04 14:09:15 +03001732 goto free_args;
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001733 }
1734 }
Li Zefanfa0d2b92010-12-20 15:53:28 +08001735
1736 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001737 vol_args->fd, subvol, ptr,
Miao Xie8696c532013-02-07 06:02:44 +00001738 readonly, inherit);
Dan Carpenterc47ca322014-09-04 14:09:15 +03001739 if (ret)
1740 goto free_inherit;
Li Zefanfa0d2b92010-12-20 15:53:28 +08001741
Dan Carpenterc47ca322014-09-04 14:09:15 +03001742 if (ptr && copy_to_user(arg +
1743 offsetof(struct btrfs_ioctl_vol_args_v2,
1744 transid),
1745 ptr, sizeof(*ptr)))
Li Zefanfa0d2b92010-12-20 15:53:28 +08001746 ret = -EFAULT;
Dan Carpenterc47ca322014-09-04 14:09:15 +03001747
1748free_inherit:
Arne Jansen6f72c7e2011-09-14 15:58:21 +02001749 kfree(inherit);
Dan Carpenterc47ca322014-09-04 14:09:15 +03001750free_args:
1751 kfree(vol_args);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04001752 return ret;
1753}
1754
Li Zefan0caa1022010-12-20 16:30:25 +08001755static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1756 void __user *arg)
1757{
Al Viro496ad9a2013-01-23 17:07:38 -05001758 struct inode *inode = file_inode(file);
Li Zefan0caa1022010-12-20 16:30:25 +08001759 struct btrfs_root *root = BTRFS_I(inode)->root;
1760 int ret = 0;
1761 u64 flags = 0;
1762
Li Zefan33345d012011-04-20 10:31:50 +08001763 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
Li Zefan0caa1022010-12-20 16:30:25 +08001764 return -EINVAL;
1765
1766 down_read(&root->fs_info->subvol_sem);
1767 if (btrfs_root_readonly(root))
1768 flags |= BTRFS_SUBVOL_RDONLY;
1769 up_read(&root->fs_info->subvol_sem);
1770
1771 if (copy_to_user(arg, &flags, sizeof(flags)))
1772 ret = -EFAULT;
1773
1774 return ret;
1775}
1776
1777static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1778 void __user *arg)
1779{
Al Viro496ad9a2013-01-23 17:07:38 -05001780 struct inode *inode = file_inode(file);
Li Zefan0caa1022010-12-20 16:30:25 +08001781 struct btrfs_root *root = BTRFS_I(inode)->root;
1782 struct btrfs_trans_handle *trans;
1783 u64 root_flags;
1784 u64 flags;
1785 int ret = 0;
1786
David Sterbabd60ea02014-01-16 15:50:22 +01001787 if (!inode_owner_or_capable(inode))
1788 return -EPERM;
1789
Liu Bob9ca0662012-06-29 03:58:49 -06001790 ret = mnt_want_write_file(file);
1791 if (ret)
1792 goto out;
Li Zefan0caa1022010-12-20 16:30:25 +08001793
Liu Bob9ca0662012-06-29 03:58:49 -06001794 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
1795 ret = -EINVAL;
1796 goto out_drop_write;
1797 }
Li Zefan0caa1022010-12-20 16:30:25 +08001798
Liu Bob9ca0662012-06-29 03:58:49 -06001799 if (copy_from_user(&flags, arg, sizeof(flags))) {
1800 ret = -EFAULT;
1801 goto out_drop_write;
1802 }
Li Zefan0caa1022010-12-20 16:30:25 +08001803
Liu Bob9ca0662012-06-29 03:58:49 -06001804 if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
1805 ret = -EINVAL;
1806 goto out_drop_write;
1807 }
Li Zefan0caa1022010-12-20 16:30:25 +08001808
Liu Bob9ca0662012-06-29 03:58:49 -06001809 if (flags & ~BTRFS_SUBVOL_RDONLY) {
1810 ret = -EOPNOTSUPP;
1811 goto out_drop_write;
1812 }
Li Zefan0caa1022010-12-20 16:30:25 +08001813
1814 down_write(&root->fs_info->subvol_sem);
1815
1816 /* nothing to do */
1817 if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
Liu Bob9ca0662012-06-29 03:58:49 -06001818 goto out_drop_sem;
Li Zefan0caa1022010-12-20 16:30:25 +08001819
1820 root_flags = btrfs_root_flags(&root->root_item);
David Sterba2c686532013-12-16 17:34:17 +01001821 if (flags & BTRFS_SUBVOL_RDONLY) {
Li Zefan0caa1022010-12-20 16:30:25 +08001822 btrfs_set_root_flags(&root->root_item,
1823 root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
David Sterba2c686532013-12-16 17:34:17 +01001824 } else {
1825 /*
1826 * Block RO -> RW transition if this subvolume is involved in
1827 * send
1828 */
1829 spin_lock(&root->root_item_lock);
1830 if (root->send_in_progress == 0) {
1831 btrfs_set_root_flags(&root->root_item,
Li Zefan0caa1022010-12-20 16:30:25 +08001832 root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
David Sterba2c686532013-12-16 17:34:17 +01001833 spin_unlock(&root->root_item_lock);
1834 } else {
1835 spin_unlock(&root->root_item_lock);
1836 btrfs_warn(root->fs_info,
1837 "Attempt to set subvolume %llu read-write during send",
1838 root->root_key.objectid);
1839 ret = -EPERM;
1840 goto out_drop_sem;
1841 }
1842 }
Li Zefan0caa1022010-12-20 16:30:25 +08001843
1844 trans = btrfs_start_transaction(root, 1);
1845 if (IS_ERR(trans)) {
1846 ret = PTR_ERR(trans);
1847 goto out_reset;
1848 }
1849
Li Zefanb4dc2b82011-02-16 06:06:34 +00001850 ret = btrfs_update_root(trans, root->fs_info->tree_root,
Li Zefan0caa1022010-12-20 16:30:25 +08001851 &root->root_key, &root->root_item);
1852
1853 btrfs_commit_transaction(trans, root);
1854out_reset:
1855 if (ret)
1856 btrfs_set_root_flags(&root->root_item, root_flags);
Liu Bob9ca0662012-06-29 03:58:49 -06001857out_drop_sem:
Li Zefan0caa1022010-12-20 16:30:25 +08001858 up_write(&root->fs_info->subvol_sem);
Liu Bob9ca0662012-06-29 03:58:49 -06001859out_drop_write:
1860 mnt_drop_write_file(file);
1861out:
Li Zefan0caa1022010-12-20 16:30:25 +08001862 return ret;
1863}
1864
Yan, Zheng76dda932009-09-21 16:00:26 -04001865/*
1866 * helper to check if the subvolume references other subvolumes
1867 */
1868static noinline int may_destroy_subvol(struct btrfs_root *root)
1869{
1870 struct btrfs_path *path;
Josef Bacik175a2b82013-08-12 15:36:44 -04001871 struct btrfs_dir_item *di;
Yan, Zheng76dda932009-09-21 16:00:26 -04001872 struct btrfs_key key;
Josef Bacik175a2b82013-08-12 15:36:44 -04001873 u64 dir_id;
Yan, Zheng76dda932009-09-21 16:00:26 -04001874 int ret;
1875
1876 path = btrfs_alloc_path();
1877 if (!path)
1878 return -ENOMEM;
1879
Josef Bacik175a2b82013-08-12 15:36:44 -04001880 /* Make sure this root isn't set as the default subvol */
1881 dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
1882 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root, path,
1883 dir_id, "default", 7, 0);
1884 if (di && !IS_ERR(di)) {
1885 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1886 if (key.objectid == root->root_key.objectid) {
Guangyu Sun72de6b52014-03-11 11:24:18 -07001887 ret = -EPERM;
1888 btrfs_err(root->fs_info, "deleting default subvolume "
1889 "%llu is not allowed", key.objectid);
Josef Bacik175a2b82013-08-12 15:36:44 -04001890 goto out;
1891 }
1892 btrfs_release_path(path);
1893 }
1894
Yan, Zheng76dda932009-09-21 16:00:26 -04001895 key.objectid = root->root_key.objectid;
1896 key.type = BTRFS_ROOT_REF_KEY;
1897 key.offset = (u64)-1;
1898
1899 ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
1900 &key, path, 0, 0);
1901 if (ret < 0)
1902 goto out;
1903 BUG_ON(ret == 0);
1904
1905 ret = 0;
1906 if (path->slots[0] > 0) {
1907 path->slots[0]--;
1908 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1909 if (key.objectid == root->root_key.objectid &&
1910 key.type == BTRFS_ROOT_REF_KEY)
1911 ret = -ENOTEMPTY;
1912 }
1913out:
1914 btrfs_free_path(path);
1915 return ret;
1916}
1917
Chris Masonac8e9812010-02-28 15:39:26 -05001918static noinline int key_in_sk(struct btrfs_key *key,
1919 struct btrfs_ioctl_search_key *sk)
1920{
Chris Masonabc6e132010-03-18 12:10:08 -04001921 struct btrfs_key test;
1922 int ret;
1923
1924 test.objectid = sk->min_objectid;
1925 test.type = sk->min_type;
1926 test.offset = sk->min_offset;
1927
1928 ret = btrfs_comp_cpu_keys(key, &test);
1929 if (ret < 0)
Chris Masonac8e9812010-02-28 15:39:26 -05001930 return 0;
Chris Masonabc6e132010-03-18 12:10:08 -04001931
1932 test.objectid = sk->max_objectid;
1933 test.type = sk->max_type;
1934 test.offset = sk->max_offset;
1935
1936 ret = btrfs_comp_cpu_keys(key, &test);
1937 if (ret > 0)
Chris Masonac8e9812010-02-28 15:39:26 -05001938 return 0;
1939 return 1;
1940}
1941
1942static noinline int copy_to_sk(struct btrfs_root *root,
1943 struct btrfs_path *path,
1944 struct btrfs_key *key,
1945 struct btrfs_ioctl_search_key *sk,
Gerhard Heift9b6e8172014-01-30 16:24:00 +01001946 size_t *buf_size,
Gerhard Heiftba346b32014-01-30 16:24:02 +01001947 char __user *ubuf,
Chris Masonac8e9812010-02-28 15:39:26 -05001948 unsigned long *sk_offset,
1949 int *num_found)
1950{
1951 u64 found_transid;
1952 struct extent_buffer *leaf;
1953 struct btrfs_ioctl_search_header sh;
Naohiro Aotadd81d452015-06-30 11:25:43 +09001954 struct btrfs_key test;
Chris Masonac8e9812010-02-28 15:39:26 -05001955 unsigned long item_off;
1956 unsigned long item_len;
1957 int nritems;
1958 int i;
1959 int slot;
Chris Masonac8e9812010-02-28 15:39:26 -05001960 int ret = 0;
1961
1962 leaf = path->nodes[0];
1963 slot = path->slots[0];
1964 nritems = btrfs_header_nritems(leaf);
1965
1966 if (btrfs_header_generation(leaf) > sk->max_transid) {
1967 i = nritems;
1968 goto advance_key;
1969 }
1970 found_transid = btrfs_header_generation(leaf);
1971
1972 for (i = slot; i < nritems; i++) {
1973 item_off = btrfs_item_ptr_offset(leaf, i);
1974 item_len = btrfs_item_size_nr(leaf, i);
1975
Gabriel de Perthuis03b71c62013-05-06 17:40:18 +00001976 btrfs_item_key_to_cpu(leaf, key, i);
1977 if (!key_in_sk(key, sk))
1978 continue;
1979
Gerhard Heift9b6e8172014-01-30 16:24:00 +01001980 if (sizeof(sh) + item_len > *buf_size) {
Gerhard Heift8f5f6172014-01-30 16:23:59 +01001981 if (*num_found) {
1982 ret = 1;
1983 goto out;
1984 }
Chris Masonac8e9812010-02-28 15:39:26 -05001985
Gerhard Heift8f5f6172014-01-30 16:23:59 +01001986 /*
1987 * return one empty item back for v1, which does not
1988 * handle -EOVERFLOW
1989 */
1990
Gerhard Heift9b6e8172014-01-30 16:24:00 +01001991 *buf_size = sizeof(sh) + item_len;
Chris Masonac8e9812010-02-28 15:39:26 -05001992 item_len = 0;
Gerhard Heift8f5f6172014-01-30 16:23:59 +01001993 ret = -EOVERFLOW;
1994 }
Chris Masonac8e9812010-02-28 15:39:26 -05001995
Gerhard Heift9b6e8172014-01-30 16:24:00 +01001996 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
Chris Masonac8e9812010-02-28 15:39:26 -05001997 ret = 1;
Gerhard Heift25c9bc22014-01-30 16:23:57 +01001998 goto out;
Chris Masonac8e9812010-02-28 15:39:26 -05001999 }
2000
Chris Masonac8e9812010-02-28 15:39:26 -05002001 sh.objectid = key->objectid;
2002 sh.offset = key->offset;
2003 sh.type = key->type;
2004 sh.len = item_len;
2005 sh.transid = found_transid;
2006
2007 /* copy search result header */
Gerhard Heiftba346b32014-01-30 16:24:02 +01002008 if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
2009 ret = -EFAULT;
2010 goto out;
2011 }
2012
Chris Masonac8e9812010-02-28 15:39:26 -05002013 *sk_offset += sizeof(sh);
2014
2015 if (item_len) {
Gerhard Heiftba346b32014-01-30 16:24:02 +01002016 char __user *up = ubuf + *sk_offset;
Chris Masonac8e9812010-02-28 15:39:26 -05002017 /* copy the item */
Gerhard Heiftba346b32014-01-30 16:24:02 +01002018 if (read_extent_buffer_to_user(leaf, up,
2019 item_off, item_len)) {
2020 ret = -EFAULT;
2021 goto out;
2022 }
2023
Chris Masonac8e9812010-02-28 15:39:26 -05002024 *sk_offset += item_len;
Chris Masonac8e9812010-02-28 15:39:26 -05002025 }
Hugo Millse2156862011-05-14 17:43:41 +00002026 (*num_found)++;
Chris Masonac8e9812010-02-28 15:39:26 -05002027
Gerhard Heift8f5f6172014-01-30 16:23:59 +01002028 if (ret) /* -EOVERFLOW from above */
2029 goto out;
2030
Gerhard Heift25c9bc22014-01-30 16:23:57 +01002031 if (*num_found >= sk->nr_items) {
2032 ret = 1;
2033 goto out;
2034 }
Chris Masonac8e9812010-02-28 15:39:26 -05002035 }
2036advance_key:
Chris Masonac8e9812010-02-28 15:39:26 -05002037 ret = 0;
Naohiro Aotadd81d452015-06-30 11:25:43 +09002038 test.objectid = sk->max_objectid;
2039 test.type = sk->max_type;
2040 test.offset = sk->max_offset;
2041 if (btrfs_comp_cpu_keys(key, &test) >= 0)
2042 ret = 1;
2043 else if (key->offset < (u64)-1)
Chris Masonabc6e132010-03-18 12:10:08 -04002044 key->offset++;
Naohiro Aotadd81d452015-06-30 11:25:43 +09002045 else if (key->type < (u8)-1) {
Chris Masonabc6e132010-03-18 12:10:08 -04002046 key->offset = 0;
2047 key->type++;
Naohiro Aotadd81d452015-06-30 11:25:43 +09002048 } else if (key->objectid < (u64)-1) {
Chris Masonabc6e132010-03-18 12:10:08 -04002049 key->offset = 0;
2050 key->type = 0;
2051 key->objectid++;
2052 } else
2053 ret = 1;
Gerhard Heift25c9bc22014-01-30 16:23:57 +01002054out:
Gerhard Heiftba346b32014-01-30 16:24:02 +01002055 /*
2056 * 0: all items from this leaf copied, continue with next
2057 * 1: * more items can be copied, but unused buffer is too small
2058 * * all items were found
2059 * Either way, it will stops the loop which iterates to the next
2060 * leaf
2061 * -EOVERFLOW: item was to large for buffer
2062 * -EFAULT: could not copy extent buffer back to userspace
2063 */
Chris Masonac8e9812010-02-28 15:39:26 -05002064 return ret;
2065}
2066
2067static noinline int search_ioctl(struct inode *inode,
Gerhard Heift12544442014-01-30 16:23:58 +01002068 struct btrfs_ioctl_search_key *sk,
Gerhard Heift9b6e8172014-01-30 16:24:00 +01002069 size_t *buf_size,
Gerhard Heiftba346b32014-01-30 16:24:02 +01002070 char __user *ubuf)
Chris Masonac8e9812010-02-28 15:39:26 -05002071{
2072 struct btrfs_root *root;
2073 struct btrfs_key key;
Chris Masonac8e9812010-02-28 15:39:26 -05002074 struct btrfs_path *path;
Chris Masonac8e9812010-02-28 15:39:26 -05002075 struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info;
2076 int ret;
2077 int num_found = 0;
2078 unsigned long sk_offset = 0;
2079
Gerhard Heift9b6e8172014-01-30 16:24:00 +01002080 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2081 *buf_size = sizeof(struct btrfs_ioctl_search_header);
Gerhard Heift12544442014-01-30 16:23:58 +01002082 return -EOVERFLOW;
Gerhard Heift9b6e8172014-01-30 16:24:00 +01002083 }
Gerhard Heift12544442014-01-30 16:23:58 +01002084
Chris Masonac8e9812010-02-28 15:39:26 -05002085 path = btrfs_alloc_path();
2086 if (!path)
2087 return -ENOMEM;
2088
2089 if (sk->tree_id == 0) {
2090 /* search the root of the inode that was passed */
2091 root = BTRFS_I(inode)->root;
2092 } else {
2093 key.objectid = sk->tree_id;
2094 key.type = BTRFS_ROOT_ITEM_KEY;
2095 key.offset = (u64)-1;
2096 root = btrfs_read_fs_root_no_name(info, &key);
2097 if (IS_ERR(root)) {
Chris Masonac8e9812010-02-28 15:39:26 -05002098 btrfs_free_path(path);
2099 return -ENOENT;
2100 }
2101 }
2102
2103 key.objectid = sk->min_objectid;
2104 key.type = sk->min_type;
2105 key.offset = sk->min_offset;
2106
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05302107 while (1) {
Filipe David Borba Manana6174d3c2013-10-01 16:13:42 +01002108 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
Chris Masonac8e9812010-02-28 15:39:26 -05002109 if (ret != 0) {
2110 if (ret > 0)
2111 ret = 0;
2112 goto err;
2113 }
Gerhard Heiftba346b32014-01-30 16:24:02 +01002114 ret = copy_to_sk(root, path, &key, sk, buf_size, ubuf,
Chris Masonac8e9812010-02-28 15:39:26 -05002115 &sk_offset, &num_found);
David Sterbab3b4aa72011-04-21 01:20:15 +02002116 btrfs_release_path(path);
Gerhard Heift25c9bc22014-01-30 16:23:57 +01002117 if (ret)
Chris Masonac8e9812010-02-28 15:39:26 -05002118 break;
2119
2120 }
Gerhard Heift8f5f6172014-01-30 16:23:59 +01002121 if (ret > 0)
2122 ret = 0;
Chris Masonac8e9812010-02-28 15:39:26 -05002123err:
2124 sk->nr_items = num_found;
2125 btrfs_free_path(path);
2126 return ret;
2127}
2128
2129static noinline int btrfs_ioctl_tree_search(struct file *file,
2130 void __user *argp)
2131{
Gerhard Heiftba346b32014-01-30 16:24:02 +01002132 struct btrfs_ioctl_search_args __user *uargs;
2133 struct btrfs_ioctl_search_key sk;
Gerhard Heift9b6e8172014-01-30 16:24:00 +01002134 struct inode *inode;
2135 int ret;
2136 size_t buf_size;
Chris Masonac8e9812010-02-28 15:39:26 -05002137
2138 if (!capable(CAP_SYS_ADMIN))
2139 return -EPERM;
2140
Gerhard Heiftba346b32014-01-30 16:24:02 +01002141 uargs = (struct btrfs_ioctl_search_args __user *)argp;
Chris Masonac8e9812010-02-28 15:39:26 -05002142
Gerhard Heiftba346b32014-01-30 16:24:02 +01002143 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2144 return -EFAULT;
2145
2146 buf_size = sizeof(uargs->buf);
Chris Masonac8e9812010-02-28 15:39:26 -05002147
Al Viro496ad9a2013-01-23 17:07:38 -05002148 inode = file_inode(file);
Gerhard Heiftba346b32014-01-30 16:24:02 +01002149 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
Gerhard Heift8f5f6172014-01-30 16:23:59 +01002150
2151 /*
2152 * In the origin implementation an overflow is handled by returning a
2153 * search header with a len of zero, so reset ret.
2154 */
2155 if (ret == -EOVERFLOW)
2156 ret = 0;
2157
Gerhard Heiftba346b32014-01-30 16:24:02 +01002158 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
Chris Masonac8e9812010-02-28 15:39:26 -05002159 ret = -EFAULT;
Chris Masonac8e9812010-02-28 15:39:26 -05002160 return ret;
2161}
2162
Gerhard Heiftcc68a8a2014-01-30 16:24:03 +01002163static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2164 void __user *argp)
2165{
2166 struct btrfs_ioctl_search_args_v2 __user *uarg;
2167 struct btrfs_ioctl_search_args_v2 args;
2168 struct inode *inode;
2169 int ret;
2170 size_t buf_size;
Byongho Leeee221842015-12-15 01:42:10 +09002171 const size_t buf_limit = SZ_16M;
Gerhard Heiftcc68a8a2014-01-30 16:24:03 +01002172
2173 if (!capable(CAP_SYS_ADMIN))
2174 return -EPERM;
2175
2176 /* copy search header and buffer size */
2177 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2178 if (copy_from_user(&args, uarg, sizeof(args)))
2179 return -EFAULT;
2180
2181 buf_size = args.buf_size;
2182
2183 if (buf_size < sizeof(struct btrfs_ioctl_search_header))
2184 return -EOVERFLOW;
2185
2186 /* limit result size to 16MB */
2187 if (buf_size > buf_limit)
2188 buf_size = buf_limit;
2189
2190 inode = file_inode(file);
2191 ret = search_ioctl(inode, &args.key, &buf_size,
2192 (char *)(&uarg->buf[0]));
2193 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2194 ret = -EFAULT;
2195 else if (ret == -EOVERFLOW &&
2196 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2197 ret = -EFAULT;
2198
Yan, Zheng76dda932009-09-21 16:00:26 -04002199 return ret;
2200}
2201
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002202/*
Chris Masonac8e9812010-02-28 15:39:26 -05002203 * Search INODE_REFs to identify path name of 'dirid' directory
2204 * in a 'tree_id' tree. and sets path name to 'name'.
2205 */
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002206static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2207 u64 tree_id, u64 dirid, char *name)
2208{
2209 struct btrfs_root *root;
2210 struct btrfs_key key;
Chris Masonac8e9812010-02-28 15:39:26 -05002211 char *ptr;
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002212 int ret = -1;
2213 int slot;
2214 int len;
2215 int total_len = 0;
2216 struct btrfs_inode_ref *iref;
2217 struct extent_buffer *l;
2218 struct btrfs_path *path;
2219
2220 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2221 name[0]='\0';
2222 return 0;
2223 }
2224
2225 path = btrfs_alloc_path();
2226 if (!path)
2227 return -ENOMEM;
2228
Chris Masonac8e9812010-02-28 15:39:26 -05002229 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002230
2231 key.objectid = tree_id;
2232 key.type = BTRFS_ROOT_ITEM_KEY;
2233 key.offset = (u64)-1;
2234 root = btrfs_read_fs_root_no_name(info, &key);
2235 if (IS_ERR(root)) {
David Sterbaf14d1042015-10-08 11:37:06 +02002236 btrfs_err(info, "could not find root %llu", tree_id);
Chris Mason8ad6fca2010-03-18 12:23:10 -04002237 ret = -ENOENT;
2238 goto out;
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002239 }
2240
2241 key.objectid = dirid;
2242 key.type = BTRFS_INODE_REF_KEY;
Chris Mason8ad6fca2010-03-18 12:23:10 -04002243 key.offset = (u64)-1;
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002244
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05302245 while (1) {
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002246 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2247 if (ret < 0)
2248 goto out;
Filipe David Borba Manana18674c62013-08-14 03:00:21 +01002249 else if (ret > 0) {
2250 ret = btrfs_previous_item(root, path, dirid,
2251 BTRFS_INODE_REF_KEY);
2252 if (ret < 0)
2253 goto out;
2254 else if (ret > 0) {
2255 ret = -ENOENT;
2256 goto out;
2257 }
2258 }
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002259
2260 l = path->nodes[0];
2261 slot = path->slots[0];
2262 btrfs_item_key_to_cpu(l, &key, slot);
2263
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002264 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2265 len = btrfs_inode_ref_name_len(l, iref);
2266 ptr -= len + 1;
2267 total_len += len + 1;
Filipe David Borba Mananaa696cf32013-08-14 03:00:20 +01002268 if (ptr < name) {
2269 ret = -ENAMETOOLONG;
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002270 goto out;
Filipe David Borba Mananaa696cf32013-08-14 03:00:20 +01002271 }
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002272
2273 *(ptr + len) = '/';
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05302274 read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002275
2276 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2277 break;
2278
David Sterbab3b4aa72011-04-21 01:20:15 +02002279 btrfs_release_path(path);
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002280 key.objectid = key.offset;
Chris Mason8ad6fca2010-03-18 12:23:10 -04002281 key.offset = (u64)-1;
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002282 dirid = key.objectid;
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002283 }
Li Zefan77906a502011-07-14 03:16:00 +00002284 memmove(name, ptr, total_len);
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05302285 name[total_len] = '\0';
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002286 ret = 0;
2287out:
2288 btrfs_free_path(path);
Chris Masonac8e9812010-02-28 15:39:26 -05002289 return ret;
2290}
2291
2292static noinline int btrfs_ioctl_ino_lookup(struct file *file,
2293 void __user *argp)
2294{
2295 struct btrfs_ioctl_ino_lookup_args *args;
2296 struct inode *inode;
David Sterba01b810b2015-05-12 19:14:49 +02002297 int ret = 0;
Chris Masonac8e9812010-02-28 15:39:26 -05002298
Julia Lawall2354d082010-10-29 15:14:18 -04002299 args = memdup_user(argp, sizeof(*args));
2300 if (IS_ERR(args))
2301 return PTR_ERR(args);
Dan Carpenterc2b96922010-03-20 11:24:15 +00002302
Al Viro496ad9a2013-01-23 17:07:38 -05002303 inode = file_inode(file);
Chris Masonac8e9812010-02-28 15:39:26 -05002304
David Sterba01b810b2015-05-12 19:14:49 +02002305 /*
2306 * Unprivileged query to obtain the containing subvolume root id. The
2307 * path is reset so it's consistent with btrfs_search_path_in_tree.
2308 */
Chris Mason1b53ac42010-03-18 12:17:05 -04002309 if (args->treeid == 0)
2310 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
2311
David Sterba01b810b2015-05-12 19:14:49 +02002312 if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2313 args->name[0] = 0;
2314 goto out;
2315 }
2316
2317 if (!capable(CAP_SYS_ADMIN)) {
2318 ret = -EPERM;
2319 goto out;
2320 }
2321
Chris Masonac8e9812010-02-28 15:39:26 -05002322 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
2323 args->treeid, args->objectid,
2324 args->name);
2325
David Sterba01b810b2015-05-12 19:14:49 +02002326out:
Chris Masonac8e9812010-02-28 15:39:26 -05002327 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2328 ret = -EFAULT;
2329
2330 kfree(args);
TARUISI Hiroaki98d377a2009-11-18 05:42:14 +00002331 return ret;
2332}
2333
Yan, Zheng76dda932009-09-21 16:00:26 -04002334static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2335 void __user *arg)
2336{
Al Viro54563d42013-09-01 15:57:51 -04002337 struct dentry *parent = file->f_path.dentry;
Yan, Zheng76dda932009-09-21 16:00:26 -04002338 struct dentry *dentry;
David Howells2b0143b2015-03-17 22:25:59 +00002339 struct inode *dir = d_inode(parent);
Yan, Zheng76dda932009-09-21 16:00:26 -04002340 struct inode *inode;
2341 struct btrfs_root *root = BTRFS_I(dir)->root;
2342 struct btrfs_root *dest = NULL;
2343 struct btrfs_ioctl_vol_args *vol_args;
2344 struct btrfs_trans_handle *trans;
Miao Xiec58aaad2013-02-28 10:05:36 +00002345 struct btrfs_block_rsv block_rsv;
David Sterba521e0542014-04-15 16:41:44 +02002346 u64 root_flags;
Miao Xiec58aaad2013-02-28 10:05:36 +00002347 u64 qgroup_reserved;
Yan, Zheng76dda932009-09-21 16:00:26 -04002348 int namelen;
2349 int ret;
2350 int err = 0;
2351
Yan, Zheng76dda932009-09-21 16:00:26 -04002352 vol_args = memdup_user(arg, sizeof(*vol_args));
2353 if (IS_ERR(vol_args))
2354 return PTR_ERR(vol_args);
2355
2356 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2357 namelen = strlen(vol_args->name);
2358 if (strchr(vol_args->name, '/') ||
2359 strncmp(vol_args->name, "..", namelen) == 0) {
2360 err = -EINVAL;
2361 goto out;
2362 }
2363
Al Viroa561be72011-11-23 11:57:51 -05002364 err = mnt_want_write_file(file);
Yan, Zheng76dda932009-09-21 16:00:26 -04002365 if (err)
2366 goto out;
2367
David Sterba521e0542014-04-15 16:41:44 +02002368
David Sterba5c50c9b2013-03-22 18:12:51 +00002369 err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
2370 if (err == -EINTR)
David Sterbae43f9982013-12-06 17:51:32 +01002371 goto out_drop_write;
Yan, Zheng76dda932009-09-21 16:00:26 -04002372 dentry = lookup_one_len(vol_args->name, parent, namelen);
2373 if (IS_ERR(dentry)) {
2374 err = PTR_ERR(dentry);
2375 goto out_unlock_dir;
2376 }
2377
David Howells2b0143b2015-03-17 22:25:59 +00002378 if (d_really_is_negative(dentry)) {
Yan, Zheng76dda932009-09-21 16:00:26 -04002379 err = -ENOENT;
2380 goto out_dput;
2381 }
2382
David Howells2b0143b2015-03-17 22:25:59 +00002383 inode = d_inode(dentry);
Sage Weil4260f7c2010-10-29 15:46:43 -04002384 dest = BTRFS_I(inode)->root;
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05302385 if (!capable(CAP_SYS_ADMIN)) {
Sage Weil4260f7c2010-10-29 15:46:43 -04002386 /*
2387 * Regular user. Only allow this with a special mount
2388 * option, when the user has write+exec access to the
2389 * subvol root, and when rmdir(2) would have been
2390 * allowed.
2391 *
2392 * Note that this is _not_ check that the subvol is
2393 * empty or doesn't contain data that we wouldn't
2394 * otherwise be able to delete.
2395 *
2396 * Users who want to delete empty subvols should try
2397 * rmdir(2).
2398 */
2399 err = -EPERM;
2400 if (!btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
2401 goto out_dput;
2402
2403 /*
2404 * Do not allow deletion if the parent dir is the same
2405 * as the dir to be deleted. That means the ioctl
2406 * must be called on the dentry referencing the root
2407 * of the subvol, not a random directory contained
2408 * within it.
2409 */
2410 err = -EINVAL;
2411 if (root == dest)
2412 goto out_dput;
2413
2414 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
2415 if (err)
2416 goto out_dput;
Sage Weil4260f7c2010-10-29 15:46:43 -04002417 }
2418
Miao Xie5c39da52012-10-22 11:39:53 +00002419 /* check if subvolume may be deleted by a user */
2420 err = btrfs_may_delete(dir, dentry, 1);
2421 if (err)
2422 goto out_dput;
2423
Li Zefan33345d012011-04-20 10:31:50 +08002424 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
Yan, Zheng76dda932009-09-21 16:00:26 -04002425 err = -EINVAL;
2426 goto out_dput;
2427 }
2428
Al Viro59551022016-01-22 15:40:57 -05002429 inode_lock(inode);
David Sterba521e0542014-04-15 16:41:44 +02002430
2431 /*
2432 * Don't allow to delete a subvolume with send in progress. This is
2433 * inside the i_mutex so the error handling that has to drop the bit
2434 * again is not run concurrently.
2435 */
2436 spin_lock(&dest->root_item_lock);
Filipe Mananac55bfa62014-05-25 03:55:44 +01002437 root_flags = btrfs_root_flags(&dest->root_item);
2438 if (dest->send_in_progress == 0) {
2439 btrfs_set_root_flags(&dest->root_item,
David Sterba521e0542014-04-15 16:41:44 +02002440 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
2441 spin_unlock(&dest->root_item_lock);
2442 } else {
2443 spin_unlock(&dest->root_item_lock);
2444 btrfs_warn(root->fs_info,
2445 "Attempt to delete subvolume %llu during send",
Filipe Mananac55bfa62014-05-25 03:55:44 +01002446 dest->root_key.objectid);
David Sterba521e0542014-04-15 16:41:44 +02002447 err = -EPERM;
Omar Sandoval909e26d2015-04-10 14:20:40 -07002448 goto out_unlock_inode;
David Sterba521e0542014-04-15 16:41:44 +02002449 }
2450
Yan, Zheng76dda932009-09-21 16:00:26 -04002451 down_write(&root->fs_info->subvol_sem);
2452
2453 err = may_destroy_subvol(dest);
2454 if (err)
2455 goto out_up_write;
2456
Miao Xiec58aaad2013-02-28 10:05:36 +00002457 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
2458 /*
2459 * One for dir inode, two for dir entries, two for root
2460 * ref/backref.
2461 */
2462 err = btrfs_subvolume_reserve_metadata(root, &block_rsv,
Jeff Mahoneyee3441b2013-07-09 16:37:21 -04002463 5, &qgroup_reserved, true);
Miao Xiec58aaad2013-02-28 10:05:36 +00002464 if (err)
2465 goto out_up_write;
2466
Yan, Zhenga22285a2010-05-16 10:48:46 -04002467 trans = btrfs_start_transaction(root, 0);
2468 if (IS_ERR(trans)) {
2469 err = PTR_ERR(trans);
Miao Xiec58aaad2013-02-28 10:05:36 +00002470 goto out_release;
Yan, Zhenga22285a2010-05-16 10:48:46 -04002471 }
Miao Xiec58aaad2013-02-28 10:05:36 +00002472 trans->block_rsv = &block_rsv;
2473 trans->bytes_reserved = block_rsv.size;
Yan, Zhenga22285a2010-05-16 10:48:46 -04002474
Filipe Manana2be63d52016-02-12 11:34:23 +00002475 btrfs_record_snapshot_destroy(trans, dir);
2476
Yan, Zheng76dda932009-09-21 16:00:26 -04002477 ret = btrfs_unlink_subvol(trans, root, dir,
2478 dest->root_key.objectid,
2479 dentry->d_name.name,
2480 dentry->d_name.len);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002481 if (ret) {
2482 err = ret;
2483 btrfs_abort_transaction(trans, root, ret);
2484 goto out_end_trans;
2485 }
Yan, Zheng76dda932009-09-21 16:00:26 -04002486
2487 btrfs_record_root_in_trans(trans, dest);
2488
2489 memset(&dest->root_item.drop_progress, 0,
2490 sizeof(dest->root_item.drop_progress));
2491 dest->root_item.drop_level = 0;
2492 btrfs_set_root_refs(&dest->root_item, 0);
2493
Miao Xie27cdeb72014-04-02 19:51:05 +08002494 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
Yan, Zhengd68fc572010-05-16 10:49:58 -04002495 ret = btrfs_insert_orphan_item(trans,
2496 root->fs_info->tree_root,
2497 dest->root_key.objectid);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002498 if (ret) {
2499 btrfs_abort_transaction(trans, root, ret);
2500 err = ret;
2501 goto out_end_trans;
2502 }
Yan, Zhengd68fc572010-05-16 10:49:58 -04002503 }
Stefan Behrensdd5f9612013-08-15 17:11:20 +02002504
2505 ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
2506 dest->root_item.uuid, BTRFS_UUID_KEY_SUBVOL,
2507 dest->root_key.objectid);
2508 if (ret && ret != -ENOENT) {
2509 btrfs_abort_transaction(trans, root, ret);
2510 err = ret;
2511 goto out_end_trans;
2512 }
2513 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
2514 ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
2515 dest->root_item.received_uuid,
2516 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
2517 dest->root_key.objectid);
2518 if (ret && ret != -ENOENT) {
2519 btrfs_abort_transaction(trans, root, ret);
2520 err = ret;
2521 goto out_end_trans;
2522 }
2523 }
2524
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002525out_end_trans:
Miao Xiec58aaad2013-02-28 10:05:36 +00002526 trans->block_rsv = NULL;
2527 trans->bytes_reserved = 0;
Sage Weil531cb132010-10-29 15:41:32 -04002528 ret = btrfs_end_transaction(trans, root);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002529 if (ret && !err)
2530 err = ret;
Yan, Zheng76dda932009-09-21 16:00:26 -04002531 inode->i_flags |= S_DEAD;
Miao Xiec58aaad2013-02-28 10:05:36 +00002532out_release:
2533 btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
Yan, Zheng76dda932009-09-21 16:00:26 -04002534out_up_write:
2535 up_write(&root->fs_info->subvol_sem);
David Sterba521e0542014-04-15 16:41:44 +02002536 if (err) {
2537 spin_lock(&dest->root_item_lock);
Filipe Mananac55bfa62014-05-25 03:55:44 +01002538 root_flags = btrfs_root_flags(&dest->root_item);
2539 btrfs_set_root_flags(&dest->root_item,
David Sterba521e0542014-04-15 16:41:44 +02002540 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
2541 spin_unlock(&dest->root_item_lock);
2542 }
Omar Sandoval909e26d2015-04-10 14:20:40 -07002543out_unlock_inode:
Al Viro59551022016-01-22 15:40:57 -05002544 inode_unlock(inode);
Yan, Zheng76dda932009-09-21 16:00:26 -04002545 if (!err) {
Omar Sandoval64ad6c42015-06-02 17:31:00 -07002546 d_invalidate(dentry);
Yan, Zheng76dda932009-09-21 16:00:26 -04002547 btrfs_invalidate_inodes(dest);
2548 d_delete(dentry);
David Sterba61155aa2014-04-15 16:42:03 +02002549 ASSERT(dest->send_in_progress == 0);
Liu Bofa6ac872013-02-20 14:10:23 +00002550
2551 /* the last ref */
David Sterba57cdc8d2014-02-05 02:37:48 +01002552 if (dest->ino_cache_inode) {
2553 iput(dest->ino_cache_inode);
2554 dest->ino_cache_inode = NULL;
Liu Bofa6ac872013-02-20 14:10:23 +00002555 }
Yan, Zheng76dda932009-09-21 16:00:26 -04002556 }
2557out_dput:
2558 dput(dentry);
2559out_unlock_dir:
Al Viro59551022016-01-22 15:40:57 -05002560 inode_unlock(dir);
David Sterbae43f9982013-12-06 17:51:32 +01002561out_drop_write:
Al Viro2a79f172011-12-09 08:06:57 -05002562 mnt_drop_write_file(file);
Yan, Zheng76dda932009-09-21 16:00:26 -04002563out:
2564 kfree(vol_args);
2565 return err;
2566}
2567
Chris Mason1e701a32010-03-11 09:42:04 -05002568static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002569{
Al Viro496ad9a2013-01-23 17:07:38 -05002570 struct inode *inode = file_inode(file);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002571 struct btrfs_root *root = BTRFS_I(inode)->root;
Chris Mason1e701a32010-03-11 09:42:04 -05002572 struct btrfs_ioctl_defrag_range_args *range;
Yan Zhengc146afa2008-11-12 14:34:12 -05002573 int ret;
2574
Ilya Dryomov25122d12013-01-20 15:57:57 +02002575 ret = mnt_want_write_file(file);
2576 if (ret)
2577 return ret;
Li Zefanb83cc962010-12-20 16:04:08 +08002578
Ilya Dryomov25122d12013-01-20 15:57:57 +02002579 if (btrfs_root_readonly(root)) {
2580 ret = -EROFS;
2581 goto out;
Stefan Behrens5ac00ad2012-11-05 17:54:08 +01002582 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002583
2584 switch (inode->i_mode & S_IFMT) {
2585 case S_IFDIR:
Chris Masone441d542009-01-05 16:57:23 -05002586 if (!capable(CAP_SYS_ADMIN)) {
2587 ret = -EPERM;
2588 goto out;
2589 }
Eric Sandeende78b512013-01-31 18:21:12 +00002590 ret = btrfs_defrag_root(root);
Yan, Zheng8929ecfa2010-05-16 10:49:58 -04002591 if (ret)
2592 goto out;
Eric Sandeende78b512013-01-31 18:21:12 +00002593 ret = btrfs_defrag_root(root->fs_info->extent_root);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002594 break;
2595 case S_IFREG:
Chris Masone441d542009-01-05 16:57:23 -05002596 if (!(file->f_mode & FMODE_WRITE)) {
2597 ret = -EINVAL;
2598 goto out;
2599 }
Chris Mason1e701a32010-03-11 09:42:04 -05002600
2601 range = kzalloc(sizeof(*range), GFP_KERNEL);
2602 if (!range) {
2603 ret = -ENOMEM;
2604 goto out;
2605 }
2606
2607 if (argp) {
2608 if (copy_from_user(range, argp,
2609 sizeof(*range))) {
2610 ret = -EFAULT;
2611 kfree(range);
Dan Carpenter683be162010-03-20 11:24:48 +00002612 goto out;
Chris Mason1e701a32010-03-11 09:42:04 -05002613 }
2614 /* compression requires us to start the IO */
2615 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
2616 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
2617 range->extent_thresh = (u32)-1;
2618 }
2619 } else {
2620 /* the rest are all set to zero by kzalloc */
2621 range->len = (u64)-1;
2622 }
Al Viro496ad9a2013-01-23 17:07:38 -05002623 ret = btrfs_defrag_file(file_inode(file), file,
Chris Mason4cb53002011-05-24 15:35:30 -04002624 range, 0, 0);
2625 if (ret > 0)
2626 ret = 0;
Chris Mason1e701a32010-03-11 09:42:04 -05002627 kfree(range);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002628 break;
Yan, Zheng8929ecfa2010-05-16 10:49:58 -04002629 default:
2630 ret = -EINVAL;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002631 }
Chris Masone441d542009-01-05 16:57:23 -05002632out:
Ilya Dryomov25122d12013-01-20 15:57:57 +02002633 mnt_drop_write_file(file);
Chris Masone441d542009-01-05 16:57:23 -05002634 return ret;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002635}
2636
Christoph Hellwigb2950862008-12-02 09:54:17 -05002637static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002638{
2639 struct btrfs_ioctl_vol_args *vol_args;
2640 int ret;
2641
Chris Masone441d542009-01-05 16:57:23 -05002642 if (!capable(CAP_SYS_ADMIN))
2643 return -EPERM;
2644
Stefan Behrens5ac00ad2012-11-05 17:54:08 +01002645 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2646 1)) {
Anand Jaine57138b2013-08-21 11:44:48 +08002647 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02002648 }
2649
Stefan Behrens5ac00ad2012-11-05 17:54:08 +01002650 mutex_lock(&root->fs_info->volume_mutex);
Li Zefandae7b662009-04-08 15:06:54 +08002651 vol_args = memdup_user(arg, sizeof(*vol_args));
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02002652 if (IS_ERR(vol_args)) {
2653 ret = PTR_ERR(vol_args);
2654 goto out;
2655 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002656
Mark Fasheh5516e592008-07-24 12:20:14 -04002657 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002658 ret = btrfs_init_new_device(root, vol_args->name);
2659
Anand Jain43d20762014-07-01 00:58:56 +08002660 if (!ret)
2661 btrfs_info(root->fs_info, "disk added %s",vol_args->name);
2662
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002663 kfree(vol_args);
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02002664out:
2665 mutex_unlock(&root->fs_info->volume_mutex);
Stefan Behrens5ac00ad2012-11-05 17:54:08 +01002666 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002667 return ret;
2668}
2669
Anand Jain6b526ed2016-02-13 10:01:39 +08002670static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
2671{
2672 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
2673 struct btrfs_ioctl_vol_args_v2 *vol_args;
2674 int ret;
2675
2676 if (!capable(CAP_SYS_ADMIN))
2677 return -EPERM;
2678
2679 ret = mnt_want_write_file(file);
2680 if (ret)
2681 return ret;
2682
2683 vol_args = memdup_user(arg, sizeof(*vol_args));
2684 if (IS_ERR(vol_args)) {
2685 ret = PTR_ERR(vol_args);
2686 goto err_drop;
2687 }
2688
2689 /* Check for compatibility reject unknown flags */
2690 if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS)
2691 return -ENOTTY;
2692
2693 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2694 1)) {
2695 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2696 goto out;
2697 }
2698
2699 mutex_lock(&root->fs_info->volume_mutex);
2700 if (vol_args->flags & BTRFS_DEVICE_BY_ID) {
2701 ret = btrfs_rm_device(root, NULL, vol_args->devid);
2702 } else {
2703 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
2704 ret = btrfs_rm_device(root, vol_args->name, 0);
2705 }
2706 mutex_unlock(&root->fs_info->volume_mutex);
2707 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
2708
2709 if (!ret) {
2710 if (vol_args->flags & BTRFS_DEVICE_BY_ID)
2711 btrfs_info(root->fs_info, "device deleted: id %llu",
2712 vol_args->devid);
2713 else
2714 btrfs_info(root->fs_info, "device deleted: %s",
2715 vol_args->name);
2716 }
2717out:
2718 kfree(vol_args);
2719err_drop:
2720 mnt_drop_write_file(file);
2721 return ret;
2722}
2723
Miao Xieda249272012-11-26 08:44:50 +00002724static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002725{
Al Viro496ad9a2013-01-23 17:07:38 -05002726 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002727 struct btrfs_ioctl_vol_args *vol_args;
2728 int ret;
2729
Chris Masone441d542009-01-05 16:57:23 -05002730 if (!capable(CAP_SYS_ADMIN))
2731 return -EPERM;
2732
Miao Xieda249272012-11-26 08:44:50 +00002733 ret = mnt_want_write_file(file);
2734 if (ret)
2735 return ret;
Yan Zhengc146afa2008-11-12 14:34:12 -05002736
Li Zefandae7b662009-04-08 15:06:54 +08002737 vol_args = memdup_user(arg, sizeof(*vol_args));
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02002738 if (IS_ERR(vol_args)) {
2739 ret = PTR_ERR(vol_args);
Dan Carpenterc47ca322014-09-04 14:09:15 +03002740 goto err_drop;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02002741 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002742
Mark Fasheh5516e592008-07-24 12:20:14 -04002743 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002744
Anand Jain183860f2013-05-17 10:52:45 +00002745 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2746 1)) {
2747 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2748 goto out;
2749 }
2750
2751 mutex_lock(&root->fs_info->volume_mutex);
Anand Jain6b526ed2016-02-13 10:01:39 +08002752 ret = btrfs_rm_device(root, vol_args->name, 0);
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02002753 mutex_unlock(&root->fs_info->volume_mutex);
Stefan Behrens5ac00ad2012-11-05 17:54:08 +01002754 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
Anand Jain183860f2013-05-17 10:52:45 +00002755
Anand Jainec95d492014-07-01 00:58:57 +08002756 if (!ret)
2757 btrfs_info(root->fs_info, "disk deleted %s",vol_args->name);
2758
Anand Jain183860f2013-05-17 10:52:45 +00002759out:
2760 kfree(vol_args);
Dan Carpenterc47ca322014-09-04 14:09:15 +03002761err_drop:
Ilya Dryomov4ac20c72013-01-20 15:57:57 +02002762 mnt_drop_write_file(file);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04002763 return ret;
2764}
2765
Jan Schmidt475f6382011-03-11 15:41:01 +01002766static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
2767{
Li Zefan027ed2f2011-06-08 08:27:56 +00002768 struct btrfs_ioctl_fs_info_args *fi_args;
Jan Schmidt475f6382011-03-11 15:41:01 +01002769 struct btrfs_device *device;
Jan Schmidt475f6382011-03-11 15:41:01 +01002770 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
Li Zefan027ed2f2011-06-08 08:27:56 +00002771 int ret = 0;
Jan Schmidt475f6382011-03-11 15:41:01 +01002772
Li Zefan027ed2f2011-06-08 08:27:56 +00002773 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
2774 if (!fi_args)
2775 return -ENOMEM;
2776
Filipe David Borba Mananaf7171752013-08-12 20:56:58 +01002777 mutex_lock(&fs_devices->device_list_mutex);
Li Zefan027ed2f2011-06-08 08:27:56 +00002778 fi_args->num_devices = fs_devices->num_devices;
2779 memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
Jan Schmidt475f6382011-03-11 15:41:01 +01002780
Byongho Leed7641a42015-09-01 23:10:57 +09002781 list_for_each_entry(device, &fs_devices->devices, dev_list) {
Li Zefan027ed2f2011-06-08 08:27:56 +00002782 if (device->devid > fi_args->max_id)
2783 fi_args->max_id = device->devid;
Jan Schmidt475f6382011-03-11 15:41:01 +01002784 }
2785 mutex_unlock(&fs_devices->device_list_mutex);
2786
David Sterba80a773f2014-05-07 18:17:06 +02002787 fi_args->nodesize = root->fs_info->super_copy->nodesize;
2788 fi_args->sectorsize = root->fs_info->super_copy->sectorsize;
2789 fi_args->clone_alignment = root->fs_info->super_copy->sectorsize;
2790
Li Zefan027ed2f2011-06-08 08:27:56 +00002791 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
2792 ret = -EFAULT;
Jan Schmidt475f6382011-03-11 15:41:01 +01002793
Li Zefan027ed2f2011-06-08 08:27:56 +00002794 kfree(fi_args);
2795 return ret;
Jan Schmidt475f6382011-03-11 15:41:01 +01002796}
2797
2798static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
2799{
2800 struct btrfs_ioctl_dev_info_args *di_args;
2801 struct btrfs_device *dev;
2802 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2803 int ret = 0;
2804 char *s_uuid = NULL;
Jan Schmidt475f6382011-03-11 15:41:01 +01002805
Jan Schmidt475f6382011-03-11 15:41:01 +01002806 di_args = memdup_user(arg, sizeof(*di_args));
2807 if (IS_ERR(di_args))
2808 return PTR_ERR(di_args);
2809
Stefan Behrensdd5f9612013-08-15 17:11:20 +02002810 if (!btrfs_is_empty_uuid(di_args->uuid))
Jan Schmidt475f6382011-03-11 15:41:01 +01002811 s_uuid = di_args->uuid;
2812
2813 mutex_lock(&fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01002814 dev = btrfs_find_device(root->fs_info, di_args->devid, s_uuid, NULL);
Jan Schmidt475f6382011-03-11 15:41:01 +01002815
2816 if (!dev) {
2817 ret = -ENODEV;
2818 goto out;
2819 }
2820
2821 di_args->devid = dev->devid;
Miao Xie7cc8e582014-09-03 21:35:38 +08002822 di_args->bytes_used = btrfs_device_get_bytes_used(dev);
2823 di_args->total_bytes = btrfs_device_get_total_bytes(dev);
Jan Schmidt475f6382011-03-11 15:41:01 +01002824 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
Jim Meyeringa27202f2012-04-26 18:36:56 +02002825 if (dev->name) {
Josef Bacik606686e2012-06-04 14:03:51 -04002826 struct rcu_string *name;
2827
2828 rcu_read_lock();
2829 name = rcu_dereference(dev->name);
2830 strncpy(di_args->path, name->str, sizeof(di_args->path));
2831 rcu_read_unlock();
Jim Meyeringa27202f2012-04-26 18:36:56 +02002832 di_args->path[sizeof(di_args->path) - 1] = 0;
2833 } else {
Stefan Behrens99ba55a2012-03-19 16:17:22 +01002834 di_args->path[0] = '\0';
Jim Meyeringa27202f2012-04-26 18:36:56 +02002835 }
Jan Schmidt475f6382011-03-11 15:41:01 +01002836
2837out:
David Sterba55793c02013-04-26 15:20:23 +00002838 mutex_unlock(&fs_devices->device_list_mutex);
Jan Schmidt475f6382011-03-11 15:41:01 +01002839 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
2840 ret = -EFAULT;
2841
2842 kfree(di_args);
2843 return ret;
2844}
2845
Mark Fashehf4414602015-06-30 14:42:05 -07002846static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
Mark Fasheh416161d2013-08-06 11:42:51 -07002847{
2848 struct page *page;
Mark Fasheh416161d2013-08-06 11:42:51 -07002849
Mark Fasheh416161d2013-08-06 11:42:51 -07002850 page = grab_cache_page(inode->i_mapping, index);
2851 if (!page)
Filipe Manana31314002016-01-27 18:37:47 +00002852 return ERR_PTR(-ENOMEM);
Mark Fasheh416161d2013-08-06 11:42:51 -07002853
2854 if (!PageUptodate(page)) {
Filipe Manana31314002016-01-27 18:37:47 +00002855 int ret;
2856
2857 ret = btrfs_readpage(NULL, page);
2858 if (ret)
2859 return ERR_PTR(ret);
Mark Fasheh416161d2013-08-06 11:42:51 -07002860 lock_page(page);
2861 if (!PageUptodate(page)) {
2862 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002863 put_page(page);
Filipe Manana31314002016-01-27 18:37:47 +00002864 return ERR_PTR(-EIO);
2865 }
2866 if (page->mapping != inode->i_mapping) {
2867 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002868 put_page(page);
Filipe Manana31314002016-01-27 18:37:47 +00002869 return ERR_PTR(-EAGAIN);
Mark Fasheh416161d2013-08-06 11:42:51 -07002870 }
2871 }
Mark Fasheh416161d2013-08-06 11:42:51 -07002872
2873 return page;
2874}
2875
Mark Fashehf4414602015-06-30 14:42:05 -07002876static int gather_extent_pages(struct inode *inode, struct page **pages,
2877 int num_pages, u64 off)
2878{
2879 int i;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002880 pgoff_t index = off >> PAGE_SHIFT;
Mark Fashehf4414602015-06-30 14:42:05 -07002881
2882 for (i = 0; i < num_pages; i++) {
Filipe Manana31314002016-01-27 18:37:47 +00002883again:
Mark Fashehf4414602015-06-30 14:42:05 -07002884 pages[i] = extent_same_get_page(inode, index + i);
Filipe Manana31314002016-01-27 18:37:47 +00002885 if (IS_ERR(pages[i])) {
2886 int err = PTR_ERR(pages[i]);
2887
2888 if (err == -EAGAIN)
2889 goto again;
2890 pages[i] = NULL;
2891 return err;
2892 }
Mark Fashehf4414602015-06-30 14:42:05 -07002893 }
2894 return 0;
2895}
2896
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002897static int lock_extent_range(struct inode *inode, u64 off, u64 len,
2898 bool retry_range_locking)
Mark Fasheh77fe20dc2013-08-06 11:42:48 -07002899{
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002900 /*
2901 * Do any pending delalloc/csum calculations on inode, one way or
2902 * another, and lock file content.
2903 * The locking order is:
2904 *
2905 * 1) pages
2906 * 2) range in the inode's io tree
2907 */
Mark Fasheh77fe20dc2013-08-06 11:42:48 -07002908 while (1) {
2909 struct btrfs_ordered_extent *ordered;
2910 lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2911 ordered = btrfs_lookup_first_ordered_extent(inode,
2912 off + len - 1);
Filipe Mananaff5df9b2014-05-30 17:56:24 +01002913 if ((!ordered ||
2914 ordered->file_offset + ordered->len <= off ||
2915 ordered->file_offset >= off + len) &&
Mark Fasheh77fe20dc2013-08-06 11:42:48 -07002916 !test_range_bit(&BTRFS_I(inode)->io_tree, off,
Filipe Mananaff5df9b2014-05-30 17:56:24 +01002917 off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
2918 if (ordered)
2919 btrfs_put_ordered_extent(ordered);
Mark Fasheh77fe20dc2013-08-06 11:42:48 -07002920 break;
Filipe Mananaff5df9b2014-05-30 17:56:24 +01002921 }
Mark Fasheh77fe20dc2013-08-06 11:42:48 -07002922 unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2923 if (ordered)
2924 btrfs_put_ordered_extent(ordered);
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002925 if (!retry_range_locking)
2926 return -EAGAIN;
Mark Fasheh77fe20dc2013-08-06 11:42:48 -07002927 btrfs_wait_ordered_range(inode, off, len);
2928 }
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002929 return 0;
Mark Fasheh77fe20dc2013-08-06 11:42:48 -07002930}
2931
Mark Fashehf4414602015-06-30 14:42:05 -07002932static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
Mark Fasheh416161d2013-08-06 11:42:51 -07002933{
Al Viro59551022016-01-22 15:40:57 -05002934 inode_unlock(inode1);
2935 inode_unlock(inode2);
Mark Fasheh416161d2013-08-06 11:42:51 -07002936}
2937
Mark Fashehf4414602015-06-30 14:42:05 -07002938static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
2939{
2940 if (inode1 < inode2)
2941 swap(inode1, inode2);
2942
Al Viro59551022016-01-22 15:40:57 -05002943 inode_lock_nested(inode1, I_MUTEX_PARENT);
2944 inode_lock_nested(inode2, I_MUTEX_CHILD);
Mark Fashehf4414602015-06-30 14:42:05 -07002945}
2946
2947static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
2948 struct inode *inode2, u64 loff2, u64 len)
2949{
2950 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
2951 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
2952}
2953
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002954static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
2955 struct inode *inode2, u64 loff2, u64 len,
2956 bool retry_range_locking)
Mark Fasheh416161d2013-08-06 11:42:51 -07002957{
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002958 int ret;
2959
Mark Fasheh416161d2013-08-06 11:42:51 -07002960 if (inode1 < inode2) {
2961 swap(inode1, inode2);
2962 swap(loff1, loff2);
2963 }
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002964 ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
2965 if (ret)
2966 return ret;
2967 ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
2968 if (ret)
2969 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
2970 loff1 + len - 1);
2971 return ret;
Mark Fashehf4414602015-06-30 14:42:05 -07002972}
2973
2974struct cmp_pages {
2975 int num_pages;
2976 struct page **src_pages;
2977 struct page **dst_pages;
2978};
2979
2980static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2981{
2982 int i;
2983 struct page *pg;
2984
2985 for (i = 0; i < cmp->num_pages; i++) {
2986 pg = cmp->src_pages[i];
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002987 if (pg) {
2988 unlock_page(pg);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002989 put_page(pg);
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002990 }
Mark Fashehf4414602015-06-30 14:42:05 -07002991 pg = cmp->dst_pages[i];
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002992 if (pg) {
2993 unlock_page(pg);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002994 put_page(pg);
Filipe Mananae0bd70c2016-01-27 10:20:58 +00002995 }
Mark Fasheh416161d2013-08-06 11:42:51 -07002996 }
Mark Fashehf4414602015-06-30 14:42:05 -07002997 kfree(cmp->src_pages);
2998 kfree(cmp->dst_pages);
2999}
3000
3001static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
3002 struct inode *dst, u64 dst_loff,
3003 u64 len, struct cmp_pages *cmp)
3004{
3005 int ret;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003006 int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
Mark Fashehf4414602015-06-30 14:42:05 -07003007 struct page **src_pgarr, **dst_pgarr;
3008
3009 /*
3010 * We must gather up all the pages before we initiate our
3011 * extent locking. We use an array for the page pointers. Size
3012 * of the array is bounded by len, which is in turn bounded by
3013 * BTRFS_MAX_DEDUPE_LEN.
3014 */
David Sterba66722f72016-02-11 15:01:38 +01003015 src_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
3016 dst_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
Mark Fashehf4414602015-06-30 14:42:05 -07003017 if (!src_pgarr || !dst_pgarr) {
3018 kfree(src_pgarr);
3019 kfree(dst_pgarr);
3020 return -ENOMEM;
3021 }
3022 cmp->num_pages = num_pages;
3023 cmp->src_pages = src_pgarr;
3024 cmp->dst_pages = dst_pgarr;
3025
3026 ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff);
3027 if (ret)
3028 goto out;
3029
3030 ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff);
3031
3032out:
3033 if (ret)
3034 btrfs_cmp_data_free(cmp);
3035 return 0;
Mark Fasheh416161d2013-08-06 11:42:51 -07003036}
3037
3038static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
Mark Fashehf4414602015-06-30 14:42:05 -07003039 u64 dst_loff, u64 len, struct cmp_pages *cmp)
Mark Fasheh416161d2013-08-06 11:42:51 -07003040{
3041 int ret = 0;
Mark Fashehf4414602015-06-30 14:42:05 -07003042 int i;
Mark Fasheh416161d2013-08-06 11:42:51 -07003043 struct page *src_page, *dst_page;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003044 unsigned int cmp_len = PAGE_SIZE;
Mark Fasheh416161d2013-08-06 11:42:51 -07003045 void *addr, *dst_addr;
3046
Mark Fashehf4414602015-06-30 14:42:05 -07003047 i = 0;
Mark Fasheh416161d2013-08-06 11:42:51 -07003048 while (len) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003049 if (len < PAGE_SIZE)
Mark Fasheh416161d2013-08-06 11:42:51 -07003050 cmp_len = len;
3051
Mark Fashehf4414602015-06-30 14:42:05 -07003052 BUG_ON(i >= cmp->num_pages);
3053
3054 src_page = cmp->src_pages[i];
3055 dst_page = cmp->dst_pages[i];
Filipe Mananae0bd70c2016-01-27 10:20:58 +00003056 ASSERT(PageLocked(src_page));
3057 ASSERT(PageLocked(dst_page));
Mark Fashehf4414602015-06-30 14:42:05 -07003058
Mark Fasheh416161d2013-08-06 11:42:51 -07003059 addr = kmap_atomic(src_page);
3060 dst_addr = kmap_atomic(dst_page);
3061
3062 flush_dcache_page(src_page);
3063 flush_dcache_page(dst_page);
3064
3065 if (memcmp(addr, dst_addr, cmp_len))
Darrick J. Wong2b3909f2015-12-19 00:56:05 -08003066 ret = -EBADE;
Mark Fasheh416161d2013-08-06 11:42:51 -07003067
3068 kunmap_atomic(addr);
3069 kunmap_atomic(dst_addr);
Mark Fasheh416161d2013-08-06 11:42:51 -07003070
3071 if (ret)
3072 break;
3073
Mark Fasheh416161d2013-08-06 11:42:51 -07003074 len -= cmp_len;
Mark Fashehf4414602015-06-30 14:42:05 -07003075 i++;
Mark Fasheh416161d2013-08-06 11:42:51 -07003076 }
3077
3078 return ret;
3079}
3080
Mark Fashehe1d227a2015-06-08 15:05:25 -07003081static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
3082 u64 olen)
Mark Fasheh416161d2013-08-06 11:42:51 -07003083{
Mark Fashehe1d227a2015-06-08 15:05:25 -07003084 u64 len = *plen;
Mark Fasheh416161d2013-08-06 11:42:51 -07003085 u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
3086
Mark Fashehe1d227a2015-06-08 15:05:25 -07003087 if (off + olen > inode->i_size || off + olen < off)
Mark Fasheh416161d2013-08-06 11:42:51 -07003088 return -EINVAL;
Mark Fashehe1d227a2015-06-08 15:05:25 -07003089
3090 /* if we extend to eof, continue to block boundary */
3091 if (off + len == inode->i_size)
3092 *plen = len = ALIGN(inode->i_size, bs) - off;
3093
Mark Fasheh416161d2013-08-06 11:42:51 -07003094 /* Check that we are block aligned - btrfs_clone() requires this */
3095 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
3096 return -EINVAL;
3097
3098 return 0;
3099}
3100
Mark Fashehe1d227a2015-06-08 15:05:25 -07003101static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
Mark Fasheh416161d2013-08-06 11:42:51 -07003102 struct inode *dst, u64 dst_loff)
3103{
3104 int ret;
Mark Fashehe1d227a2015-06-08 15:05:25 -07003105 u64 len = olen;
Mark Fashehf4414602015-06-30 14:42:05 -07003106 struct cmp_pages cmp;
Mark Fasheh0efa9f42015-06-30 14:42:07 -07003107 int same_inode = 0;
3108 u64 same_lock_start = 0;
3109 u64 same_lock_len = 0;
Mark Fasheh416161d2013-08-06 11:42:51 -07003110
Mark Fasheh416161d2013-08-06 11:42:51 -07003111 if (src == dst)
Mark Fasheh0efa9f42015-06-30 14:42:07 -07003112 same_inode = 1;
Mark Fasheh416161d2013-08-06 11:42:51 -07003113
Filipe Manana113e8282015-03-30 18:26:47 +01003114 if (len == 0)
3115 return 0;
3116
Mark Fasheh0efa9f42015-06-30 14:42:07 -07003117 if (same_inode) {
Al Viro59551022016-01-22 15:40:57 -05003118 inode_lock(src);
Mark Fasheh416161d2013-08-06 11:42:51 -07003119
Mark Fasheh0efa9f42015-06-30 14:42:07 -07003120 ret = extent_same_check_offsets(src, loff, &len, olen);
3121 if (ret)
3122 goto out_unlock;
Filipe Mananaf4dfe682016-02-12 14:44:00 +00003123 ret = extent_same_check_offsets(src, dst_loff, &len, olen);
3124 if (ret)
3125 goto out_unlock;
Mark Fasheh416161d2013-08-06 11:42:51 -07003126
Mark Fasheh0efa9f42015-06-30 14:42:07 -07003127 /*
3128 * Single inode case wants the same checks, except we
3129 * don't want our length pushed out past i_size as
3130 * comparing that data range makes no sense.
3131 *
3132 * extent_same_check_offsets() will do this for an
3133 * unaligned length at i_size, so catch it here and
3134 * reject the request.
3135 *
3136 * This effectively means we require aligned extents
3137 * for the single-inode case, whereas the other cases
3138 * allow an unaligned length so long as it ends at
3139 * i_size.
3140 */
3141 if (len != olen) {
3142 ret = -EINVAL;
3143 goto out_unlock;
3144 }
3145
3146 /* Check for overlapping ranges */
3147 if (dst_loff + len > loff && dst_loff < loff + len) {
3148 ret = -EINVAL;
3149 goto out_unlock;
3150 }
3151
3152 same_lock_start = min_t(u64, loff, dst_loff);
3153 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3154 } else {
3155 btrfs_double_inode_lock(src, dst);
3156
3157 ret = extent_same_check_offsets(src, loff, &len, olen);
3158 if (ret)
3159 goto out_unlock;
3160
3161 ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
3162 if (ret)
3163 goto out_unlock;
3164 }
Mark Fasheh416161d2013-08-06 11:42:51 -07003165
3166 /* don't make the dst file partly checksummed */
3167 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3168 (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
3169 ret = -EINVAL;
3170 goto out_unlock;
3171 }
3172
Filipe Mananae0bd70c2016-01-27 10:20:58 +00003173again:
Mark Fashehf4414602015-06-30 14:42:05 -07003174 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
3175 if (ret)
3176 goto out_unlock;
3177
Mark Fasheh0efa9f42015-06-30 14:42:07 -07003178 if (same_inode)
Filipe Mananae0bd70c2016-01-27 10:20:58 +00003179 ret = lock_extent_range(src, same_lock_start, same_lock_len,
3180 false);
Mark Fasheh0efa9f42015-06-30 14:42:07 -07003181 else
Filipe Mananae0bd70c2016-01-27 10:20:58 +00003182 ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
3183 false);
3184 /*
3185 * If one of the inodes has dirty pages in the respective range or
3186 * ordered extents, we need to flush dellaloc and wait for all ordered
3187 * extents in the range. We must unlock the pages and the ranges in the
3188 * io trees to avoid deadlocks when flushing delalloc (requires locking
3189 * pages) and when waiting for ordered extents to complete (they require
3190 * range locking).
3191 */
3192 if (ret == -EAGAIN) {
3193 /*
3194 * Ranges in the io trees already unlocked. Now unlock all
3195 * pages before waiting for all IO to complete.
3196 */
3197 btrfs_cmp_data_free(&cmp);
3198 if (same_inode) {
3199 btrfs_wait_ordered_range(src, same_lock_start,
3200 same_lock_len);
3201 } else {
3202 btrfs_wait_ordered_range(src, loff, len);
3203 btrfs_wait_ordered_range(dst, dst_loff, len);
3204 }
3205 goto again;
3206 }
3207 ASSERT(ret == 0);
3208 if (WARN_ON(ret)) {
3209 /* ranges in the io trees already unlocked */
3210 btrfs_cmp_data_free(&cmp);
3211 return ret;
3212 }
Mark Fashehf4414602015-06-30 14:42:05 -07003213
Mark Fasheh207910d2015-06-30 14:42:04 -07003214 /* pass original length for comparison so we stay within i_size */
Mark Fashehf4414602015-06-30 14:42:05 -07003215 ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
Mark Fasheh416161d2013-08-06 11:42:51 -07003216 if (ret == 0)
Mark Fasheh1c919a52015-06-30 14:42:08 -07003217 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
Mark Fasheh416161d2013-08-06 11:42:51 -07003218
Mark Fasheh0efa9f42015-06-30 14:42:07 -07003219 if (same_inode)
3220 unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
3221 same_lock_start + same_lock_len - 1);
3222 else
3223 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
Mark Fashehf4414602015-06-30 14:42:05 -07003224
3225 btrfs_cmp_data_free(&cmp);
Mark Fasheh416161d2013-08-06 11:42:51 -07003226out_unlock:
Mark Fasheh0efa9f42015-06-30 14:42:07 -07003227 if (same_inode)
Al Viro59551022016-01-22 15:40:57 -05003228 inode_unlock(src);
Mark Fasheh0efa9f42015-06-30 14:42:07 -07003229 else
3230 btrfs_double_inode_unlock(src, dst);
Mark Fasheh416161d2013-08-06 11:42:51 -07003231
3232 return ret;
3233}
3234
Byongho Leeee221842015-12-15 01:42:10 +09003235#define BTRFS_MAX_DEDUPE_LEN SZ_16M
Mark Fasheh416161d2013-08-06 11:42:51 -07003236
Darrick J. Wong2b3909f2015-12-19 00:56:05 -08003237ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
3238 struct file *dst_file, u64 dst_loff)
Mark Fasheh416161d2013-08-06 11:42:51 -07003239{
Darrick J. Wong2b3909f2015-12-19 00:56:05 -08003240 struct inode *src = file_inode(src_file);
3241 struct inode *dst = file_inode(dst_file);
Mark Fasheh416161d2013-08-06 11:42:51 -07003242 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
Darrick J. Wong2b3909f2015-12-19 00:56:05 -08003243 ssize_t res;
Mark Fasheh416161d2013-08-06 11:42:51 -07003244
Darrick J. Wong2b3909f2015-12-19 00:56:05 -08003245 if (olen > BTRFS_MAX_DEDUPE_LEN)
3246 olen = BTRFS_MAX_DEDUPE_LEN;
Mark Fasheh416161d2013-08-06 11:42:51 -07003247
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003248 if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
Mark Fasheh416161d2013-08-06 11:42:51 -07003249 /*
3250 * Btrfs does not support blocksize < page_size. As a
3251 * result, btrfs_cmp_data() won't correctly handle
3252 * this situation without an update.
3253 */
Darrick J. Wong2b3909f2015-12-19 00:56:05 -08003254 return -EINVAL;
Mark Fasheh416161d2013-08-06 11:42:51 -07003255 }
3256
Darrick J. Wong2b3909f2015-12-19 00:56:05 -08003257 res = btrfs_extent_same(src, loff, olen, dst, dst_loff);
3258 if (res)
3259 return res;
3260 return olen;
Mark Fasheh416161d2013-08-06 11:42:51 -07003261}
3262
Filipe Mananaf82a9902014-06-01 01:50:28 +01003263static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3264 struct inode *inode,
3265 u64 endoff,
3266 const u64 destoff,
Mark Fasheh1c919a52015-06-30 14:42:08 -07003267 const u64 olen,
3268 int no_time_update)
Filipe Mananaf82a9902014-06-01 01:50:28 +01003269{
3270 struct btrfs_root *root = BTRFS_I(inode)->root;
3271 int ret;
3272
3273 inode_inc_iversion(inode);
Mark Fasheh1c919a52015-06-30 14:42:08 -07003274 if (!no_time_update)
Deepa Dinamani04b285f2016-02-06 23:57:21 -08003275 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
Filipe Mananaf82a9902014-06-01 01:50:28 +01003276 /*
3277 * We round up to the block size at eof when determining which
3278 * extents to clone above, but shouldn't round up the file size.
3279 */
3280 if (endoff > destoff + olen)
3281 endoff = destoff + olen;
3282 if (endoff > inode->i_size)
3283 btrfs_i_size_write(inode, endoff);
3284
3285 ret = btrfs_update_inode(trans, root, inode);
3286 if (ret) {
3287 btrfs_abort_transaction(trans, root, ret);
3288 btrfs_end_transaction(trans, root);
3289 goto out;
3290 }
3291 ret = btrfs_end_transaction(trans, root);
3292out:
3293 return ret;
3294}
3295
Filipe Manana7ffbb592014-06-09 03:48:05 +01003296static void clone_update_extent_map(struct inode *inode,
3297 const struct btrfs_trans_handle *trans,
3298 const struct btrfs_path *path,
Filipe Manana7ffbb592014-06-09 03:48:05 +01003299 const u64 hole_offset,
3300 const u64 hole_len)
3301{
3302 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3303 struct extent_map *em;
3304 int ret;
3305
3306 em = alloc_extent_map();
3307 if (!em) {
3308 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3309 &BTRFS_I(inode)->runtime_flags);
3310 return;
3311 }
3312
Filipe Manana14f59792014-06-29 21:45:40 +01003313 if (path) {
3314 struct btrfs_file_extent_item *fi;
3315
3316 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3317 struct btrfs_file_extent_item);
Filipe Manana7ffbb592014-06-09 03:48:05 +01003318 btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3319 em->generation = -1;
3320 if (btrfs_file_extent_type(path->nodes[0], fi) ==
3321 BTRFS_FILE_EXTENT_INLINE)
3322 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3323 &BTRFS_I(inode)->runtime_flags);
3324 } else {
3325 em->start = hole_offset;
3326 em->len = hole_len;
3327 em->ram_bytes = em->len;
3328 em->orig_start = hole_offset;
3329 em->block_start = EXTENT_MAP_HOLE;
3330 em->block_len = 0;
3331 em->orig_block_len = 0;
3332 em->compress_type = BTRFS_COMPRESS_NONE;
3333 em->generation = trans->transid;
3334 }
3335
3336 while (1) {
3337 write_lock(&em_tree->lock);
3338 ret = add_extent_mapping(em_tree, em, 1);
3339 write_unlock(&em_tree->lock);
3340 if (ret != -EEXIST) {
3341 free_extent_map(em);
3342 break;
3343 }
3344 btrfs_drop_extent_cache(inode, em->start,
3345 em->start + em->len - 1, 0);
3346 }
3347
David Sterbaee39b432014-09-30 01:33:33 +02003348 if (ret)
Filipe Manana7ffbb592014-06-09 03:48:05 +01003349 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3350 &BTRFS_I(inode)->runtime_flags);
3351}
3352
Filipe Manana8039d872015-10-13 15:15:00 +01003353/*
3354 * Make sure we do not end up inserting an inline extent into a file that has
3355 * already other (non-inline) extents. If a file has an inline extent it can
3356 * not have any other extents and the (single) inline extent must start at the
3357 * file offset 0. Failing to respect these rules will lead to file corruption,
3358 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
3359 *
3360 * We can have extents that have been already written to disk or we can have
3361 * dirty ranges still in delalloc, in which case the extent maps and items are
3362 * created only when we run delalloc, and the delalloc ranges might fall outside
3363 * the range we are currently locking in the inode's io tree. So we check the
3364 * inode's i_size because of that (i_size updates are done while holding the
3365 * i_mutex, which we are holding here).
3366 * We also check to see if the inode has a size not greater than "datal" but has
3367 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
3368 * protected against such concurrent fallocate calls by the i_mutex).
3369 *
3370 * If the file has no extents but a size greater than datal, do not allow the
3371 * copy because we would need turn the inline extent into a non-inline one (even
3372 * with NO_HOLES enabled). If we find our destination inode only has one inline
3373 * extent, just overwrite it with the source inline extent if its size is less
3374 * than the source extent's size, or we could copy the source inline extent's
3375 * data into the destination inode's inline extent if the later is greater then
3376 * the former.
3377 */
3378static int clone_copy_inline_extent(struct inode *src,
3379 struct inode *dst,
3380 struct btrfs_trans_handle *trans,
3381 struct btrfs_path *path,
3382 struct btrfs_key *new_key,
3383 const u64 drop_start,
3384 const u64 datal,
3385 const u64 skip,
3386 const u64 size,
3387 char *inline_data)
3388{
3389 struct btrfs_root *root = BTRFS_I(dst)->root;
3390 const u64 aligned_end = ALIGN(new_key->offset + datal,
3391 root->sectorsize);
3392 int ret;
3393 struct btrfs_key key;
3394
3395 if (new_key->offset > 0)
3396 return -EOPNOTSUPP;
3397
3398 key.objectid = btrfs_ino(dst);
3399 key.type = BTRFS_EXTENT_DATA_KEY;
3400 key.offset = 0;
3401 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3402 if (ret < 0) {
3403 return ret;
3404 } else if (ret > 0) {
3405 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3406 ret = btrfs_next_leaf(root, path);
3407 if (ret < 0)
3408 return ret;
3409 else if (ret > 0)
3410 goto copy_inline_extent;
3411 }
3412 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3413 if (key.objectid == btrfs_ino(dst) &&
3414 key.type == BTRFS_EXTENT_DATA_KEY) {
3415 ASSERT(key.offset > 0);
3416 return -EOPNOTSUPP;
3417 }
3418 } else if (i_size_read(dst) <= datal) {
3419 struct btrfs_file_extent_item *ei;
3420 u64 ext_len;
3421
3422 /*
3423 * If the file size is <= datal, make sure there are no other
3424 * extents following (can happen do to an fallocate call with
3425 * the flag FALLOC_FL_KEEP_SIZE).
3426 */
3427 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3428 struct btrfs_file_extent_item);
3429 /*
3430 * If it's an inline extent, it can not have other extents
3431 * following it.
3432 */
3433 if (btrfs_file_extent_type(path->nodes[0], ei) ==
3434 BTRFS_FILE_EXTENT_INLINE)
3435 goto copy_inline_extent;
3436
3437 ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3438 if (ext_len > aligned_end)
3439 return -EOPNOTSUPP;
3440
3441 ret = btrfs_next_item(root, path);
3442 if (ret < 0) {
3443 return ret;
3444 } else if (ret == 0) {
3445 btrfs_item_key_to_cpu(path->nodes[0], &key,
3446 path->slots[0]);
3447 if (key.objectid == btrfs_ino(dst) &&
3448 key.type == BTRFS_EXTENT_DATA_KEY)
3449 return -EOPNOTSUPP;
3450 }
3451 }
3452
3453copy_inline_extent:
3454 /*
3455 * We have no extent items, or we have an extent at offset 0 which may
3456 * or may not be inlined. All these cases are dealt the same way.
3457 */
3458 if (i_size_read(dst) > datal) {
3459 /*
3460 * If the destination inode has an inline extent...
3461 * This would require copying the data from the source inline
3462 * extent into the beginning of the destination's inline extent.
3463 * But this is really complex, both extents can be compressed
3464 * or just one of them, which would require decompressing and
3465 * re-compressing data (which could increase the new compressed
3466 * size, not allowing the compressed data to fit anymore in an
3467 * inline extent).
3468 * So just don't support this case for now (it should be rare,
3469 * we are not really saving space when cloning inline extents).
3470 */
3471 return -EOPNOTSUPP;
3472 }
3473
3474 btrfs_release_path(path);
3475 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
3476 if (ret)
3477 return ret;
3478 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
3479 if (ret)
3480 return ret;
3481
3482 if (skip) {
3483 const u32 start = btrfs_file_extent_calc_inline_size(0);
3484
3485 memmove(inline_data + start, inline_data + start + skip, datal);
3486 }
3487
3488 write_extent_buffer(path->nodes[0], inline_data,
3489 btrfs_item_ptr_offset(path->nodes[0],
3490 path->slots[0]),
3491 size);
3492 inode_add_bytes(dst, datal);
3493
3494 return 0;
3495}
3496
Mark Fasheh32b7c682013-08-06 11:42:49 -07003497/**
3498 * btrfs_clone() - clone a range from inode file to another
3499 *
3500 * @src: Inode to clone from
3501 * @inode: Inode to clone to
3502 * @off: Offset within source to start clone from
3503 * @olen: Original length, passed by user, of range to clone
Mark Fasheh1c919a52015-06-30 14:42:08 -07003504 * @olen_aligned: Block-aligned value of olen
Mark Fasheh32b7c682013-08-06 11:42:49 -07003505 * @destoff: Offset within @inode to start clone
Mark Fasheh1c919a52015-06-30 14:42:08 -07003506 * @no_time_update: Whether to update mtime/ctime on the target inode
Mark Fasheh32b7c682013-08-06 11:42:49 -07003507 */
3508static int btrfs_clone(struct inode *src, struct inode *inode,
Filipe Mananaf82a9902014-06-01 01:50:28 +01003509 const u64 off, const u64 olen, const u64 olen_aligned,
Mark Fasheh1c919a52015-06-30 14:42:08 -07003510 const u64 destoff, int no_time_update)
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003511{
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003512 struct btrfs_root *root = BTRFS_I(inode)->root;
Mark Fasheh32b7c682013-08-06 11:42:49 -07003513 struct btrfs_path *path = NULL;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003514 struct extent_buffer *leaf;
Mark Fasheh32b7c682013-08-06 11:42:49 -07003515 struct btrfs_trans_handle *trans;
3516 char *buf = NULL;
Yan Zhengae01a0a2008-08-04 23:23:47 -04003517 struct btrfs_key key;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003518 u32 nritems;
3519 int slot;
Yan Zhengae01a0a2008-08-04 23:23:47 -04003520 int ret;
Filipe Mananaf82a9902014-06-01 01:50:28 +01003521 const u64 len = olen_aligned;
Filipe Mananaf82a9902014-06-01 01:50:28 +01003522 u64 last_dest_end = destoff;
Yan Zhengae01a0a2008-08-04 23:23:47 -04003523
3524 ret = -ENOMEM;
David Sterba707e8a02014-06-04 19:22:26 +02003525 buf = vmalloc(root->nodesize);
Yan Zhengae01a0a2008-08-04 23:23:47 -04003526 if (!buf)
Mark Fasheh32b7c682013-08-06 11:42:49 -07003527 return ret;
Yan Zhengae01a0a2008-08-04 23:23:47 -04003528
3529 path = btrfs_alloc_path();
3530 if (!path) {
3531 vfree(buf);
Mark Fasheh32b7c682013-08-06 11:42:49 -07003532 return ret;
Yan Zhengae01a0a2008-08-04 23:23:47 -04003533 }
Mark Fasheh32b7c682013-08-06 11:42:49 -07003534
David Sterbae4058b52015-11-27 16:31:35 +01003535 path->reada = READA_FORWARD;
Sage Weilc5c9cd42008-11-12 14:32:25 -05003536 /* clone data */
Li Zefan33345d012011-04-20 10:31:50 +08003537 key.objectid = btrfs_ino(src);
Yan Zhengae01a0a2008-08-04 23:23:47 -04003538 key.type = BTRFS_EXTENT_DATA_KEY;
Filipe Manana2c463822014-05-31 02:31:05 +01003539 key.offset = off;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003540
3541 while (1) {
Chris Masonde249e62015-04-11 05:09:06 -07003542 u64 next_key_min_offset = key.offset + 1;
Filipe Mananadf858e72015-03-31 14:56:46 +01003543
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003544 /*
3545 * note the key will change type as we walk through the
3546 * tree.
3547 */
Filipe David Borba Mananae4355f32014-01-13 19:35:01 +00003548 path->leave_spinning = 1;
David Sterba362a20c2011-08-01 18:11:57 +02003549 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
3550 0, 0);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003551 if (ret < 0)
3552 goto out;
Filipe Manana2c463822014-05-31 02:31:05 +01003553 /*
3554 * First search, if no extent item that starts at offset off was
3555 * found but the previous item is an extent item, it's possible
3556 * it might overlap our target range, therefore process it.
3557 */
3558 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
3559 btrfs_item_key_to_cpu(path->nodes[0], &key,
3560 path->slots[0] - 1);
3561 if (key.type == BTRFS_EXTENT_DATA_KEY)
3562 path->slots[0]--;
3563 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003564
Yan Zhengae01a0a2008-08-04 23:23:47 -04003565 nritems = btrfs_header_nritems(path->nodes[0]);
Filipe David Borba Mananae4355f32014-01-13 19:35:01 +00003566process_slot:
Yan Zhengae01a0a2008-08-04 23:23:47 -04003567 if (path->slots[0] >= nritems) {
David Sterba362a20c2011-08-01 18:11:57 +02003568 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003569 if (ret < 0)
3570 goto out;
3571 if (ret > 0)
3572 break;
Yan Zhengae01a0a2008-08-04 23:23:47 -04003573 nritems = btrfs_header_nritems(path->nodes[0]);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003574 }
3575 leaf = path->nodes[0];
3576 slot = path->slots[0];
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003577
Yan Zhengae01a0a2008-08-04 23:23:47 -04003578 btrfs_item_key_to_cpu(leaf, &key, slot);
David Sterba962a2982014-06-04 18:41:45 +02003579 if (key.type > BTRFS_EXTENT_DATA_KEY ||
Li Zefan33345d012011-04-20 10:31:50 +08003580 key.objectid != btrfs_ino(src))
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003581 break;
3582
David Sterba962a2982014-06-04 18:41:45 +02003583 if (key.type == BTRFS_EXTENT_DATA_KEY) {
Sage Weilc5c9cd42008-11-12 14:32:25 -05003584 struct btrfs_file_extent_item *extent;
3585 int type;
Zheng Yan31840ae2008-09-23 13:14:14 -04003586 u32 size;
3587 struct btrfs_key new_key;
Sage Weilc5c9cd42008-11-12 14:32:25 -05003588 u64 disko = 0, diskl = 0;
3589 u64 datao = 0, datal = 0;
3590 u8 comp;
Filipe Mananaf82a9902014-06-01 01:50:28 +01003591 u64 drop_start;
Zheng Yan31840ae2008-09-23 13:14:14 -04003592
Sage Weilc5c9cd42008-11-12 14:32:25 -05003593 extent = btrfs_item_ptr(leaf, slot,
3594 struct btrfs_file_extent_item);
3595 comp = btrfs_file_extent_compression(leaf, extent);
3596 type = btrfs_file_extent_type(leaf, extent);
Chris Masonc8a894d2009-06-27 21:07:03 -04003597 if (type == BTRFS_FILE_EXTENT_REG ||
3598 type == BTRFS_FILE_EXTENT_PREALLOC) {
Chris Masond3977122009-01-05 21:25:51 -05003599 disko = btrfs_file_extent_disk_bytenr(leaf,
3600 extent);
3601 diskl = btrfs_file_extent_disk_num_bytes(leaf,
3602 extent);
Sage Weilc5c9cd42008-11-12 14:32:25 -05003603 datao = btrfs_file_extent_offset(leaf, extent);
Chris Masond3977122009-01-05 21:25:51 -05003604 datal = btrfs_file_extent_num_bytes(leaf,
3605 extent);
Sage Weilc5c9cd42008-11-12 14:32:25 -05003606 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3607 /* take upper bound, may be compressed */
3608 datal = btrfs_file_extent_ram_bytes(leaf,
3609 extent);
3610 }
Zheng Yan31840ae2008-09-23 13:14:14 -04003611
Filipe Manana2c463822014-05-31 02:31:05 +01003612 /*
3613 * The first search might have left us at an extent
3614 * item that ends before our target range's start, can
3615 * happen if we have holes and NO_HOLES feature enabled.
3616 */
3617 if (key.offset + datal <= off) {
Filipe David Borba Mananae4355f32014-01-13 19:35:01 +00003618 path->slots[0]++;
3619 goto process_slot;
Filipe Manana2c463822014-05-31 02:31:05 +01003620 } else if (key.offset >= off + len) {
3621 break;
Filipe David Borba Mananae4355f32014-01-13 19:35:01 +00003622 }
Filipe Mananadf858e72015-03-31 14:56:46 +01003623 next_key_min_offset = key.offset + datal;
Filipe David Borba Mananae4355f32014-01-13 19:35:01 +00003624 size = btrfs_item_size_nr(leaf, slot);
3625 read_extent_buffer(leaf, buf,
3626 btrfs_item_ptr_offset(leaf, slot),
3627 size);
3628
3629 btrfs_release_path(path);
3630 path->leave_spinning = 0;
Sage Weilc5c9cd42008-11-12 14:32:25 -05003631
Zheng Yan31840ae2008-09-23 13:14:14 -04003632 memcpy(&new_key, &key, sizeof(new_key));
Li Zefan33345d012011-04-20 10:31:50 +08003633 new_key.objectid = btrfs_ino(inode);
Li Zefan4d728ec2011-01-26 14:10:43 +08003634 if (off <= key.offset)
3635 new_key.offset = key.offset + destoff - off;
3636 else
3637 new_key.offset = destoff;
Sage Weilc5c9cd42008-11-12 14:32:25 -05003638
Sage Weilb6f34092011-09-20 14:48:51 -04003639 /*
Filipe Mananaf82a9902014-06-01 01:50:28 +01003640 * Deal with a hole that doesn't have an extent item
3641 * that represents it (NO_HOLES feature enabled).
3642 * This hole is either in the middle of the cloning
3643 * range or at the beginning (fully overlaps it or
3644 * partially overlaps it).
3645 */
3646 if (new_key.offset != last_dest_end)
3647 drop_start = last_dest_end;
3648 else
3649 drop_start = new_key.offset;
3650
3651 /*
Sage Weilb6f34092011-09-20 14:48:51 -04003652 * 1 - adjusting old extent (we may have to split it)
3653 * 1 - add new extent
3654 * 1 - inode update
3655 */
3656 trans = btrfs_start_transaction(root, 3);
Yan, Zhenga22285a2010-05-16 10:48:46 -04003657 if (IS_ERR(trans)) {
3658 ret = PTR_ERR(trans);
3659 goto out;
3660 }
3661
Chris Masonc8a894d2009-06-27 21:07:03 -04003662 if (type == BTRFS_FILE_EXTENT_REG ||
3663 type == BTRFS_FILE_EXTENT_PREALLOC) {
Li Zefand72c0842011-09-11 10:52:25 -04003664 /*
3665 * a | --- range to clone ---| b
3666 * | ------------- extent ------------- |
3667 */
3668
Antonio Ospite93915582014-06-04 14:03:48 +02003669 /* subtract range b */
Li Zefand72c0842011-09-11 10:52:25 -04003670 if (key.offset + datal > off + len)
3671 datal = off + len - key.offset;
3672
Antonio Ospite93915582014-06-04 14:03:48 +02003673 /* subtract range a */
Yan, Zhenga22285a2010-05-16 10:48:46 -04003674 if (off > key.offset) {
3675 datao += off - key.offset;
3676 datal -= off - key.offset;
3677 }
3678
Josef Bacik5dc562c2012-08-17 13:14:17 -04003679 ret = btrfs_drop_extents(trans, root, inode,
Filipe Mananaf82a9902014-06-01 01:50:28 +01003680 drop_start,
Yan, Zhenga22285a2010-05-16 10:48:46 -04003681 new_key.offset + datal,
Josef Bacik26714852012-08-29 12:24:27 -04003682 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003683 if (ret) {
David Sterba3f9e3df2014-04-15 18:50:17 +02003684 if (ret != -EOPNOTSUPP)
Liu Bo00fdf132014-03-10 18:56:07 +08003685 btrfs_abort_transaction(trans,
3686 root, ret);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003687 btrfs_end_transaction(trans, root);
3688 goto out;
3689 }
Yan, Zhenga22285a2010-05-16 10:48:46 -04003690
Sage Weilc5c9cd42008-11-12 14:32:25 -05003691 ret = btrfs_insert_empty_item(trans, root, path,
3692 &new_key, size);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003693 if (ret) {
3694 btrfs_abort_transaction(trans, root,
3695 ret);
3696 btrfs_end_transaction(trans, root);
3697 goto out;
3698 }
Sage Weilc5c9cd42008-11-12 14:32:25 -05003699
3700 leaf = path->nodes[0];
3701 slot = path->slots[0];
3702 write_extent_buffer(leaf, buf,
3703 btrfs_item_ptr_offset(leaf, slot),
3704 size);
3705
3706 extent = btrfs_item_ptr(leaf, slot,
3707 struct btrfs_file_extent_item);
Sage Weilc5c9cd42008-11-12 14:32:25 -05003708
Sage Weilc5c9cd42008-11-12 14:32:25 -05003709 /* disko == 0 means it's a hole */
3710 if (!disko)
3711 datao = 0;
Sage Weilc5c9cd42008-11-12 14:32:25 -05003712
3713 btrfs_set_file_extent_offset(leaf, extent,
3714 datao);
3715 btrfs_set_file_extent_num_bytes(leaf, extent,
3716 datal);
Josef Bacikfcebe452014-05-13 17:30:47 -07003717
Sage Weilc5c9cd42008-11-12 14:32:25 -05003718 if (disko) {
3719 inode_add_bytes(inode, datal);
3720 ret = btrfs_inc_extent_ref(trans, root,
Yan Zheng5d4f98a2009-06-10 10:45:14 -04003721 disko, diskl, 0,
3722 root->root_key.objectid,
Li Zefan33345d012011-04-20 10:31:50 +08003723 btrfs_ino(inode),
Filipe Mananab06c4bf2015-10-23 07:52:54 +01003724 new_key.offset - datao);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003725 if (ret) {
3726 btrfs_abort_transaction(trans,
3727 root,
3728 ret);
3729 btrfs_end_transaction(trans,
3730 root);
3731 goto out;
3732
3733 }
Sage Weilc5c9cd42008-11-12 14:32:25 -05003734 }
3735 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3736 u64 skip = 0;
3737 u64 trim = 0;
Filipe Mananaed958762015-07-14 16:09:39 +01003738
Sage Weilc5c9cd42008-11-12 14:32:25 -05003739 if (off > key.offset) {
3740 skip = off - key.offset;
3741 new_key.offset += skip;
3742 }
Chris Masond3977122009-01-05 21:25:51 -05003743
Liu Boaa42ffd2012-09-18 03:52:23 -06003744 if (key.offset + datal > off + len)
3745 trim = key.offset + datal - (off + len);
Chris Masond3977122009-01-05 21:25:51 -05003746
Sage Weilc5c9cd42008-11-12 14:32:25 -05003747 if (comp && (skip || trim)) {
Sage Weilc5c9cd42008-11-12 14:32:25 -05003748 ret = -EINVAL;
Yan, Zhenga22285a2010-05-16 10:48:46 -04003749 btrfs_end_transaction(trans, root);
Sage Weilc5c9cd42008-11-12 14:32:25 -05003750 goto out;
3751 }
3752 size -= skip + trim;
3753 datal -= skip + trim;
Yan, Zhenga22285a2010-05-16 10:48:46 -04003754
Filipe Manana8039d872015-10-13 15:15:00 +01003755 ret = clone_copy_inline_extent(src, inode,
3756 trans, path,
3757 &new_key,
3758 drop_start,
3759 datal,
3760 skip, size, buf);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003761 if (ret) {
David Sterba3f9e3df2014-04-15 18:50:17 +02003762 if (ret != -EOPNOTSUPP)
Chris Mason3a29bc02014-04-07 07:10:40 -07003763 btrfs_abort_transaction(trans,
Filipe Manana8039d872015-10-13 15:15:00 +01003764 root,
3765 ret);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003766 btrfs_end_transaction(trans, root);
3767 goto out;
3768 }
Sage Weilc5c9cd42008-11-12 14:32:25 -05003769 leaf = path->nodes[0];
3770 slot = path->slots[0];
Sage Weilc5c9cd42008-11-12 14:32:25 -05003771 }
3772
Filipe Manana7ffbb592014-06-09 03:48:05 +01003773 /* If we have an implicit hole (NO_HOLES feature). */
3774 if (drop_start < new_key.offset)
3775 clone_update_extent_map(inode, trans,
Filipe Manana14f59792014-06-29 21:45:40 +01003776 NULL, drop_start,
Filipe Manana7ffbb592014-06-09 03:48:05 +01003777 new_key.offset - drop_start);
3778
Filipe Manana14f59792014-06-29 21:45:40 +01003779 clone_update_extent_map(inode, trans, path, 0, 0);
Filipe Manana7ffbb592014-06-09 03:48:05 +01003780
Sage Weilc5c9cd42008-11-12 14:32:25 -05003781 btrfs_mark_buffer_dirty(leaf);
David Sterbab3b4aa72011-04-21 01:20:15 +02003782 btrfs_release_path(path);
Sage Weilc5c9cd42008-11-12 14:32:25 -05003783
Filipe Manana62e23902014-08-08 02:47:06 +01003784 last_dest_end = ALIGN(new_key.offset + datal,
3785 root->sectorsize);
Filipe Mananaf82a9902014-06-01 01:50:28 +01003786 ret = clone_finish_inode_update(trans, inode,
3787 last_dest_end,
Mark Fasheh1c919a52015-06-30 14:42:08 -07003788 destoff, olen,
3789 no_time_update);
Filipe Mananaf82a9902014-06-01 01:50:28 +01003790 if (ret)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003791 goto out;
Filipe Manana2c463822014-05-31 02:31:05 +01003792 if (new_key.offset + datal >= destoff + len)
3793 break;
Yan, Zhenga22285a2010-05-16 10:48:46 -04003794 }
David Sterbab3b4aa72011-04-21 01:20:15 +02003795 btrfs_release_path(path);
Filipe Mananadf858e72015-03-31 14:56:46 +01003796 key.offset = next_key_min_offset;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003797 }
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003798 ret = 0;
Mark Fasheh32b7c682013-08-06 11:42:49 -07003799
Filipe Mananaf82a9902014-06-01 01:50:28 +01003800 if (last_dest_end < destoff + len) {
3801 /*
3802 * We have an implicit hole (NO_HOLES feature is enabled) that
3803 * fully or partially overlaps our cloning range at its end.
3804 */
3805 btrfs_release_path(path);
3806
3807 /*
3808 * 1 - remove extent(s)
3809 * 1 - inode update
3810 */
3811 trans = btrfs_start_transaction(root, 2);
3812 if (IS_ERR(trans)) {
3813 ret = PTR_ERR(trans);
3814 goto out;
3815 }
3816 ret = btrfs_drop_extents(trans, root, inode,
3817 last_dest_end, destoff + len, 1);
3818 if (ret) {
3819 if (ret != -EOPNOTSUPP)
3820 btrfs_abort_transaction(trans, root, ret);
3821 btrfs_end_transaction(trans, root);
3822 goto out;
3823 }
Filipe Manana14f59792014-06-29 21:45:40 +01003824 clone_update_extent_map(inode, trans, NULL, last_dest_end,
3825 destoff + len - last_dest_end);
Filipe Mananaf82a9902014-06-01 01:50:28 +01003826 ret = clone_finish_inode_update(trans, inode, destoff + len,
Mark Fasheh1c919a52015-06-30 14:42:08 -07003827 destoff, olen, no_time_update);
Filipe Mananaf82a9902014-06-01 01:50:28 +01003828 }
3829
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003830out:
Mark Fasheh32b7c682013-08-06 11:42:49 -07003831 btrfs_free_path(path);
3832 vfree(buf);
3833 return ret;
3834}
3835
Zach Brown3db11b22015-11-10 16:53:32 -05003836static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3837 u64 off, u64 olen, u64 destoff)
Mark Fasheh32b7c682013-08-06 11:42:49 -07003838{
Al Viro54563d42013-09-01 15:57:51 -04003839 struct inode *inode = file_inode(file);
Zach Brown3db11b22015-11-10 16:53:32 -05003840 struct inode *src = file_inode(file_src);
Mark Fasheh32b7c682013-08-06 11:42:49 -07003841 struct btrfs_root *root = BTRFS_I(inode)->root;
Mark Fasheh32b7c682013-08-06 11:42:49 -07003842 int ret;
3843 u64 len = olen;
3844 u64 bs = root->fs_info->sb->s_blocksize;
Zach Brown3db11b22015-11-10 16:53:32 -05003845 int same_inode = src == inode;
Mark Fasheh32b7c682013-08-06 11:42:49 -07003846
3847 /*
3848 * TODO:
3849 * - split compressed inline extents. annoying: we need to
3850 * decompress into destination's address_space (the file offset
3851 * may change, so source mapping won't do), then recompress (or
3852 * otherwise reinsert) a subrange.
Liu Bo00fdf132014-03-10 18:56:07 +08003853 *
3854 * - split destination inode's inline extents. The inline extents can
3855 * be either compressed or non-compressed.
Mark Fasheh32b7c682013-08-06 11:42:49 -07003856 */
3857
Mark Fasheh32b7c682013-08-06 11:42:49 -07003858 if (btrfs_root_readonly(root))
3859 return -EROFS;
3860
Zach Brown3db11b22015-11-10 16:53:32 -05003861 if (file_src->f_path.mnt != file->f_path.mnt ||
3862 src->i_sb != inode->i_sb)
3863 return -EXDEV;
Mark Fasheh32b7c682013-08-06 11:42:49 -07003864
3865 /* don't make the dst file partly checksummed */
3866 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3867 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
Zach Brown3db11b22015-11-10 16:53:32 -05003868 return -EINVAL;
Mark Fasheh32b7c682013-08-06 11:42:49 -07003869
Mark Fasheh32b7c682013-08-06 11:42:49 -07003870 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
Zach Brown3db11b22015-11-10 16:53:32 -05003871 return -EISDIR;
Mark Fasheh32b7c682013-08-06 11:42:49 -07003872
3873 if (!same_inode) {
Mark Fasheh293a8482015-06-30 14:42:06 -07003874 btrfs_double_inode_lock(src, inode);
Mark Fasheh32b7c682013-08-06 11:42:49 -07003875 } else {
Al Viro59551022016-01-22 15:40:57 -05003876 inode_lock(src);
Mark Fasheh32b7c682013-08-06 11:42:49 -07003877 }
3878
3879 /* determine range to clone */
3880 ret = -EINVAL;
3881 if (off + len > src->i_size || off + len < off)
3882 goto out_unlock;
3883 if (len == 0)
3884 olen = len = src->i_size - off;
3885 /* if we extend to eof, continue to block boundary */
3886 if (off + len == src->i_size)
3887 len = ALIGN(src->i_size, bs) - off;
3888
Filipe Mananaccccf3d62015-03-30 18:23:59 +01003889 if (len == 0) {
3890 ret = 0;
3891 goto out_unlock;
3892 }
3893
Mark Fasheh32b7c682013-08-06 11:42:49 -07003894 /* verify the end result is block aligned */
3895 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
3896 !IS_ALIGNED(destoff, bs))
3897 goto out_unlock;
3898
3899 /* verify if ranges are overlapped within the same file */
3900 if (same_inode) {
3901 if (destoff + len > off && destoff < off + len)
3902 goto out_unlock;
3903 }
3904
3905 if (destoff > inode->i_size) {
3906 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
3907 if (ret)
3908 goto out_unlock;
3909 }
3910
Filipe Mananac125b8b2014-05-23 05:03:34 +01003911 /*
3912 * Lock the target range too. Right after we replace the file extent
3913 * items in the fs tree (which now point to the cloned data), we might
3914 * have a worker replace them with extent items relative to a write
3915 * operation that was issued before this clone operation (i.e. confront
3916 * with inode.c:btrfs_finish_ordered_io).
3917 */
3918 if (same_inode) {
3919 u64 lock_start = min_t(u64, off, destoff);
3920 u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
Mark Fasheh32b7c682013-08-06 11:42:49 -07003921
Filipe Mananae0bd70c2016-01-27 10:20:58 +00003922 ret = lock_extent_range(src, lock_start, lock_len, true);
Filipe Mananac125b8b2014-05-23 05:03:34 +01003923 } else {
Filipe Mananae0bd70c2016-01-27 10:20:58 +00003924 ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
3925 true);
3926 }
3927 ASSERT(ret == 0);
3928 if (WARN_ON(ret)) {
3929 /* ranges in the io trees already unlocked */
3930 goto out_unlock;
Filipe Mananac125b8b2014-05-23 05:03:34 +01003931 }
Mark Fasheh32b7c682013-08-06 11:42:49 -07003932
Mark Fasheh1c919a52015-06-30 14:42:08 -07003933 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
Mark Fasheh32b7c682013-08-06 11:42:49 -07003934
Filipe Mananac125b8b2014-05-23 05:03:34 +01003935 if (same_inode) {
3936 u64 lock_start = min_t(u64, off, destoff);
3937 u64 lock_end = max_t(u64, off, destoff) + len - 1;
3938
3939 unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
3940 } else {
Mark Fasheh293a8482015-06-30 14:42:06 -07003941 btrfs_double_extent_unlock(src, off, inode, destoff, len);
Filipe Mananac125b8b2014-05-23 05:03:34 +01003942 }
3943 /*
3944 * Truncate page cache pages so that future reads will see the cloned
3945 * data immediately and not the previous data.
3946 */
Chandan Rajendra65bfa652016-01-21 15:56:04 +05303947 truncate_inode_pages_range(&inode->i_data,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003948 round_down(destoff, PAGE_SIZE),
3949 round_up(destoff + len, PAGE_SIZE) - 1);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003950out_unlock:
Mark Fasheh293a8482015-06-30 14:42:06 -07003951 if (!same_inode)
3952 btrfs_double_inode_unlock(src, inode);
3953 else
Al Viro59551022016-01-22 15:40:57 -05003954 inode_unlock(src);
Zach Brown3db11b22015-11-10 16:53:32 -05003955 return ret;
3956}
3957
3958ssize_t btrfs_copy_file_range(struct file *file_in, loff_t pos_in,
3959 struct file *file_out, loff_t pos_out,
3960 size_t len, unsigned int flags)
3961{
3962 ssize_t ret;
3963
3964 ret = btrfs_clone_files(file_out, file_in, pos_in, len, pos_out);
3965 if (ret == 0)
3966 ret = len;
3967 return ret;
3968}
3969
Christoph Hellwig04b38d62015-12-03 12:59:50 +01003970int btrfs_clone_file_range(struct file *src_file, loff_t off,
3971 struct file *dst_file, loff_t destoff, u64 len)
Zach Brown3db11b22015-11-10 16:53:32 -05003972{
Christoph Hellwig04b38d62015-12-03 12:59:50 +01003973 return btrfs_clone_files(dst_file, src_file, off, len, destoff);
Sage Weilc5c9cd42008-11-12 14:32:25 -05003974}
3975
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003976/*
3977 * there are many ways the trans_start and trans_end ioctls can lead
3978 * to deadlocks. They should only be used by applications that
3979 * basically own the machine, and have a very in depth understanding
3980 * of all the possible deadlocks and enospc problems.
3981 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05003982static long btrfs_ioctl_trans_start(struct file *file)
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003983{
Al Viro496ad9a2013-01-23 17:07:38 -05003984 struct inode *inode = file_inode(file);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003985 struct btrfs_root *root = BTRFS_I(inode)->root;
3986 struct btrfs_trans_handle *trans;
Sage Weil1ab86ae2009-09-29 18:38:44 -04003987 int ret;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003988
Sage Weil1ab86ae2009-09-29 18:38:44 -04003989 ret = -EPERM;
Christoph Hellwigdf5b5522008-06-11 21:53:58 -04003990 if (!capable(CAP_SYS_ADMIN))
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04003991 goto out;
Sage Weil1ab86ae2009-09-29 18:38:44 -04003992
3993 ret = -EINPROGRESS;
3994 if (file->private_data)
3995 goto out;
Sage Weil9ca9ee02008-08-04 10:41:27 -04003996
Li Zefanb83cc962010-12-20 16:04:08 +08003997 ret = -EROFS;
3998 if (btrfs_root_readonly(root))
3999 goto out;
4000
Al Viroa561be72011-11-23 11:57:51 -05004001 ret = mnt_want_write_file(file);
Yan Zhengc146afa2008-11-12 14:34:12 -05004002 if (ret)
4003 goto out;
4004
Josef Bacika4abeea2011-04-11 17:25:13 -04004005 atomic_inc(&root->fs_info->open_ioctl_trans);
Sage Weil9ca9ee02008-08-04 10:41:27 -04004006
Sage Weil1ab86ae2009-09-29 18:38:44 -04004007 ret = -ENOMEM;
Josef Bacik7a7eaa42011-04-13 12:54:33 -04004008 trans = btrfs_start_ioctl_transaction(root);
Tsutomu Itohabd30bb2011-01-24 00:57:10 +00004009 if (IS_ERR(trans))
Sage Weil1ab86ae2009-09-29 18:38:44 -04004010 goto out_drop;
4011
4012 file->private_data = trans;
4013 return 0;
4014
4015out_drop:
Josef Bacika4abeea2011-04-11 17:25:13 -04004016 atomic_dec(&root->fs_info->open_ioctl_trans);
Al Viro2a79f172011-12-09 08:06:57 -05004017 mnt_drop_write_file(file);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04004018out:
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04004019 return ret;
4020}
4021
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004022static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4023{
Al Viro496ad9a2013-01-23 17:07:38 -05004024 struct inode *inode = file_inode(file);
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004025 struct btrfs_root *root = BTRFS_I(inode)->root;
4026 struct btrfs_root *new_root;
4027 struct btrfs_dir_item *di;
4028 struct btrfs_trans_handle *trans;
4029 struct btrfs_path *path;
4030 struct btrfs_key location;
4031 struct btrfs_disk_key disk_key;
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004032 u64 objectid = 0;
4033 u64 dir_id;
Miao Xie3c04ce02012-11-26 08:43:07 +00004034 int ret;
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004035
4036 if (!capable(CAP_SYS_ADMIN))
4037 return -EPERM;
4038
Miao Xie3c04ce02012-11-26 08:43:07 +00004039 ret = mnt_want_write_file(file);
4040 if (ret)
4041 return ret;
4042
4043 if (copy_from_user(&objectid, argp, sizeof(objectid))) {
4044 ret = -EFAULT;
4045 goto out;
4046 }
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004047
4048 if (!objectid)
chandan1cecf572013-09-13 19:34:10 +05304049 objectid = BTRFS_FS_TREE_OBJECTID;
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004050
4051 location.objectid = objectid;
4052 location.type = BTRFS_ROOT_ITEM_KEY;
4053 location.offset = (u64)-1;
4054
4055 new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
Miao Xie3c04ce02012-11-26 08:43:07 +00004056 if (IS_ERR(new_root)) {
4057 ret = PTR_ERR(new_root);
4058 goto out;
4059 }
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004060
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004061 path = btrfs_alloc_path();
Miao Xie3c04ce02012-11-26 08:43:07 +00004062 if (!path) {
4063 ret = -ENOMEM;
4064 goto out;
4065 }
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004066 path->leave_spinning = 1;
4067
4068 trans = btrfs_start_transaction(root, 1);
Tsutomu Itoh98d5dc12011-01-20 06:19:37 +00004069 if (IS_ERR(trans)) {
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004070 btrfs_free_path(path);
Miao Xie3c04ce02012-11-26 08:43:07 +00004071 ret = PTR_ERR(trans);
4072 goto out;
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004073 }
4074
David Sterba6c417612011-04-13 15:41:04 +02004075 dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004076 di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
4077 dir_id, "default", 7, 1);
Dan Carpentercf1e99a2010-05-29 09:47:24 +00004078 if (IS_ERR_OR_NULL(di)) {
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004079 btrfs_free_path(path);
4080 btrfs_end_transaction(trans, root);
Frank Holtonefe120a2013-12-20 11:37:06 -05004081 btrfs_err(new_root->fs_info, "Umm, you don't have the default dir"
4082 "item, this isn't going to work");
Miao Xie3c04ce02012-11-26 08:43:07 +00004083 ret = -ENOENT;
4084 goto out;
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004085 }
4086
4087 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
4088 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
4089 btrfs_mark_buffer_dirty(path->nodes[0]);
4090 btrfs_free_path(path);
4091
Mitch Harder2b0ce2c2012-07-24 11:58:43 -06004092 btrfs_set_fs_incompat(root->fs_info, DEFAULT_SUBVOL);
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004093 btrfs_end_transaction(trans, root);
Miao Xie3c04ce02012-11-26 08:43:07 +00004094out:
4095 mnt_drop_write_file(file);
4096 return ret;
Josef Bacik6ef5ed02009-12-11 21:11:29 +00004097}
4098
Stefan Behrens5af3e8c2012-08-01 18:56:49 +02004099void btrfs_get_block_group_info(struct list_head *groups_list,
4100 struct btrfs_ioctl_space_info *space)
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004101{
4102 struct btrfs_block_group_cache *block_group;
4103
4104 space->total_bytes = 0;
4105 space->used_bytes = 0;
4106 space->flags = 0;
4107 list_for_each_entry(block_group, groups_list, list) {
4108 space->flags = block_group->flags;
4109 space->total_bytes += block_group->key.offset;
4110 space->used_bytes +=
4111 btrfs_block_group_used(&block_group->item);
4112 }
4113}
4114
Eric Sandeen48a3b632013-04-25 20:41:01 +00004115static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
Josef Bacik1406e432010-01-13 18:19:06 +00004116{
4117 struct btrfs_ioctl_space_args space_args;
4118 struct btrfs_ioctl_space_info space;
4119 struct btrfs_ioctl_space_info *dest;
Chris Mason7fde62b2010-03-16 15:40:10 -04004120 struct btrfs_ioctl_space_info *dest_orig;
Daniel J Blueman13f26962011-04-11 15:56:31 +00004121 struct btrfs_ioctl_space_info __user *user_dest;
Josef Bacik1406e432010-01-13 18:19:06 +00004122 struct btrfs_space_info *info;
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004123 u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
4124 BTRFS_BLOCK_GROUP_SYSTEM,
4125 BTRFS_BLOCK_GROUP_METADATA,
4126 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
4127 int num_types = 4;
Chris Mason7fde62b2010-03-16 15:40:10 -04004128 int alloc_size;
Josef Bacik1406e432010-01-13 18:19:06 +00004129 int ret = 0;
Dan Rosenberg51788b12011-02-14 16:04:23 -05004130 u64 slot_count = 0;
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004131 int i, c;
Josef Bacik1406e432010-01-13 18:19:06 +00004132
4133 if (copy_from_user(&space_args,
4134 (struct btrfs_ioctl_space_args __user *)arg,
4135 sizeof(space_args)))
4136 return -EFAULT;
4137
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004138 for (i = 0; i < num_types; i++) {
4139 struct btrfs_space_info *tmp;
4140
4141 info = NULL;
4142 rcu_read_lock();
4143 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
4144 list) {
4145 if (tmp->flags == types[i]) {
4146 info = tmp;
4147 break;
4148 }
4149 }
4150 rcu_read_unlock();
4151
4152 if (!info)
4153 continue;
4154
4155 down_read(&info->groups_sem);
4156 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4157 if (!list_empty(&info->block_groups[c]))
4158 slot_count++;
4159 }
4160 up_read(&info->groups_sem);
4161 }
Josef Bacik1406e432010-01-13 18:19:06 +00004162
David Sterba36523e952014-02-07 14:34:12 +01004163 /*
4164 * Global block reserve, exported as a space_info
4165 */
4166 slot_count++;
4167
Chris Mason7fde62b2010-03-16 15:40:10 -04004168 /* space_slots == 0 means they are asking for a count */
4169 if (space_args.space_slots == 0) {
4170 space_args.total_spaces = slot_count;
4171 goto out;
4172 }
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004173
Dan Rosenberg51788b12011-02-14 16:04:23 -05004174 slot_count = min_t(u64, space_args.space_slots, slot_count);
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004175
Chris Mason7fde62b2010-03-16 15:40:10 -04004176 alloc_size = sizeof(*dest) * slot_count;
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004177
Chris Mason7fde62b2010-03-16 15:40:10 -04004178 /* we generally have at most 6 or so space infos, one for each raid
4179 * level. So, a whole page should be more than enough for everyone
4180 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004181 if (alloc_size > PAGE_SIZE)
Chris Mason7fde62b2010-03-16 15:40:10 -04004182 return -ENOMEM;
4183
4184 space_args.total_spaces = 0;
David Sterba8d2db782015-11-04 15:38:29 +01004185 dest = kmalloc(alloc_size, GFP_KERNEL);
Chris Mason7fde62b2010-03-16 15:40:10 -04004186 if (!dest)
4187 return -ENOMEM;
4188 dest_orig = dest;
4189
4190 /* now we have a buffer to copy into */
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004191 for (i = 0; i < num_types; i++) {
4192 struct btrfs_space_info *tmp;
Chris Mason7fde62b2010-03-16 15:40:10 -04004193
Dan Rosenberg51788b12011-02-14 16:04:23 -05004194 if (!slot_count)
4195 break;
4196
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004197 info = NULL;
4198 rcu_read_lock();
4199 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
4200 list) {
4201 if (tmp->flags == types[i]) {
4202 info = tmp;
4203 break;
4204 }
4205 }
4206 rcu_read_unlock();
Chris Mason7fde62b2010-03-16 15:40:10 -04004207
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004208 if (!info)
4209 continue;
4210 down_read(&info->groups_sem);
4211 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4212 if (!list_empty(&info->block_groups[c])) {
Stefan Behrens5af3e8c2012-08-01 18:56:49 +02004213 btrfs_get_block_group_info(
4214 &info->block_groups[c], &space);
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004215 memcpy(dest, &space, sizeof(space));
4216 dest++;
4217 space_args.total_spaces++;
Dan Rosenberg51788b12011-02-14 16:04:23 -05004218 slot_count--;
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004219 }
Dan Rosenberg51788b12011-02-14 16:04:23 -05004220 if (!slot_count)
4221 break;
Josef Bacikbf5fc0932010-09-29 11:22:36 -04004222 }
4223 up_read(&info->groups_sem);
Josef Bacik1406e432010-01-13 18:19:06 +00004224 }
Josef Bacik1406e432010-01-13 18:19:06 +00004225
David Sterba36523e952014-02-07 14:34:12 +01004226 /*
4227 * Add global block reserve
4228 */
4229 if (slot_count) {
4230 struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
4231
4232 spin_lock(&block_rsv->lock);
4233 space.total_bytes = block_rsv->size;
4234 space.used_bytes = block_rsv->size - block_rsv->reserved;
4235 spin_unlock(&block_rsv->lock);
4236 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
4237 memcpy(dest, &space, sizeof(space));
4238 space_args.total_spaces++;
4239 }
4240
Daniel J Blueman2eec6c82012-04-26 00:37:14 +08004241 user_dest = (struct btrfs_ioctl_space_info __user *)
Chris Mason7fde62b2010-03-16 15:40:10 -04004242 (arg + sizeof(struct btrfs_ioctl_space_args));
4243
4244 if (copy_to_user(user_dest, dest_orig, alloc_size))
4245 ret = -EFAULT;
4246
4247 kfree(dest_orig);
4248out:
4249 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
Josef Bacik1406e432010-01-13 18:19:06 +00004250 ret = -EFAULT;
4251
4252 return ret;
4253}
4254
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04004255/*
4256 * there are many ways the trans_start and trans_end ioctls can lead
4257 * to deadlocks. They should only be used by applications that
4258 * basically own the machine, and have a very in depth understanding
4259 * of all the possible deadlocks and enospc problems.
4260 */
4261long btrfs_ioctl_trans_end(struct file *file)
4262{
Al Viro496ad9a2013-01-23 17:07:38 -05004263 struct inode *inode = file_inode(file);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04004264 struct btrfs_root *root = BTRFS_I(inode)->root;
4265 struct btrfs_trans_handle *trans;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04004266
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04004267 trans = file->private_data;
Sage Weil1ab86ae2009-09-29 18:38:44 -04004268 if (!trans)
4269 return -EINVAL;
Christoph Hellwigb2141072008-09-05 16:43:31 -04004270 file->private_data = NULL;
Sage Weil9ca9ee02008-08-04 10:41:27 -04004271
Sage Weil1ab86ae2009-09-29 18:38:44 -04004272 btrfs_end_transaction(trans, root);
4273
Josef Bacika4abeea2011-04-11 17:25:13 -04004274 atomic_dec(&root->fs_info->open_ioctl_trans);
Sage Weil9ca9ee02008-08-04 10:41:27 -04004275
Al Viro2a79f172011-12-09 08:06:57 -05004276 mnt_drop_write_file(file);
Sage Weil1ab86ae2009-09-29 18:38:44 -04004277 return 0;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04004278}
4279
Miao Xie9a8c28b2012-11-26 08:40:43 +00004280static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
4281 void __user *argp)
Sage Weil46204592010-10-29 15:41:32 -04004282{
Sage Weil46204592010-10-29 15:41:32 -04004283 struct btrfs_trans_handle *trans;
4284 u64 transid;
Tsutomu Itohdb5b4932011-03-23 08:14:16 +00004285 int ret;
Sage Weil46204592010-10-29 15:41:32 -04004286
Miao Xied4edf392013-02-20 09:17:06 +00004287 trans = btrfs_attach_transaction_barrier(root);
Miao Xieff7c1d32012-11-26 08:41:29 +00004288 if (IS_ERR(trans)) {
4289 if (PTR_ERR(trans) != -ENOENT)
4290 return PTR_ERR(trans);
4291
4292 /* No running transaction, don't bother */
4293 transid = root->fs_info->last_trans_committed;
4294 goto out;
4295 }
Sage Weil46204592010-10-29 15:41:32 -04004296 transid = trans->transid;
Tsutomu Itohdb5b4932011-03-23 08:14:16 +00004297 ret = btrfs_commit_transaction_async(trans, root, 0);
Tsutomu Itoh8b2b2d32011-04-04 01:52:13 +00004298 if (ret) {
4299 btrfs_end_transaction(trans, root);
Tsutomu Itohdb5b4932011-03-23 08:14:16 +00004300 return ret;
Tsutomu Itoh8b2b2d32011-04-04 01:52:13 +00004301 }
Miao Xieff7c1d32012-11-26 08:41:29 +00004302out:
Sage Weil46204592010-10-29 15:41:32 -04004303 if (argp)
4304 if (copy_to_user(argp, &transid, sizeof(transid)))
4305 return -EFAULT;
4306 return 0;
4307}
4308
Miao Xie9a8c28b2012-11-26 08:40:43 +00004309static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
4310 void __user *argp)
Sage Weil46204592010-10-29 15:41:32 -04004311{
Sage Weil46204592010-10-29 15:41:32 -04004312 u64 transid;
4313
4314 if (argp) {
4315 if (copy_from_user(&transid, argp, sizeof(transid)))
4316 return -EFAULT;
4317 } else {
4318 transid = 0; /* current trans */
4319 }
4320 return btrfs_wait_for_commit(root, transid);
4321}
4322
Miao Xieb8e95482012-11-26 08:48:01 +00004323static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
Jan Schmidt475f6382011-03-11 15:41:01 +01004324{
Al Viro496ad9a2013-01-23 17:07:38 -05004325 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Jan Schmidt475f6382011-03-11 15:41:01 +01004326 struct btrfs_ioctl_scrub_args *sa;
Miao Xieb8e95482012-11-26 08:48:01 +00004327 int ret;
Jan Schmidt475f6382011-03-11 15:41:01 +01004328
4329 if (!capable(CAP_SYS_ADMIN))
4330 return -EPERM;
4331
4332 sa = memdup_user(arg, sizeof(*sa));
4333 if (IS_ERR(sa))
4334 return PTR_ERR(sa);
4335
Miao Xieb8e95482012-11-26 08:48:01 +00004336 if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
4337 ret = mnt_want_write_file(file);
4338 if (ret)
4339 goto out;
4340 }
4341
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004342 ret = btrfs_scrub_dev(root->fs_info, sa->devid, sa->start, sa->end,
Stefan Behrens63a212a2012-11-05 18:29:28 +01004343 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
4344 0);
Jan Schmidt475f6382011-03-11 15:41:01 +01004345
4346 if (copy_to_user(arg, sa, sizeof(*sa)))
4347 ret = -EFAULT;
4348
Miao Xieb8e95482012-11-26 08:48:01 +00004349 if (!(sa->flags & BTRFS_SCRUB_READONLY))
4350 mnt_drop_write_file(file);
4351out:
Jan Schmidt475f6382011-03-11 15:41:01 +01004352 kfree(sa);
4353 return ret;
4354}
4355
4356static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
4357{
4358 if (!capable(CAP_SYS_ADMIN))
4359 return -EPERM;
4360
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004361 return btrfs_scrub_cancel(root->fs_info);
Jan Schmidt475f6382011-03-11 15:41:01 +01004362}
4363
4364static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
4365 void __user *arg)
4366{
4367 struct btrfs_ioctl_scrub_args *sa;
4368 int ret;
4369
4370 if (!capable(CAP_SYS_ADMIN))
4371 return -EPERM;
4372
4373 sa = memdup_user(arg, sizeof(*sa));
4374 if (IS_ERR(sa))
4375 return PTR_ERR(sa);
4376
4377 ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
4378
4379 if (copy_to_user(arg, sa, sizeof(*sa)))
4380 ret = -EFAULT;
4381
4382 kfree(sa);
4383 return ret;
4384}
4385
Stefan Behrensc11d2c22012-05-25 16:06:09 +02004386static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
David Sterbab27f7c02012-06-22 06:30:39 -06004387 void __user *arg)
Stefan Behrensc11d2c22012-05-25 16:06:09 +02004388{
4389 struct btrfs_ioctl_get_dev_stats *sa;
4390 int ret;
4391
Stefan Behrensc11d2c22012-05-25 16:06:09 +02004392 sa = memdup_user(arg, sizeof(*sa));
4393 if (IS_ERR(sa))
4394 return PTR_ERR(sa);
4395
David Sterbab27f7c02012-06-22 06:30:39 -06004396 if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
4397 kfree(sa);
4398 return -EPERM;
4399 }
4400
4401 ret = btrfs_get_dev_stats(root, sa);
Stefan Behrensc11d2c22012-05-25 16:06:09 +02004402
4403 if (copy_to_user(arg, sa, sizeof(*sa)))
4404 ret = -EFAULT;
4405
4406 kfree(sa);
4407 return ret;
4408}
4409
Stefan Behrens3f6bcfb2012-11-06 15:08:53 +01004410static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
4411{
4412 struct btrfs_ioctl_dev_replace_args *p;
4413 int ret;
4414
4415 if (!capable(CAP_SYS_ADMIN))
4416 return -EPERM;
4417
4418 p = memdup_user(arg, sizeof(*p));
4419 if (IS_ERR(p))
4420 return PTR_ERR(p);
4421
4422 switch (p->cmd) {
4423 case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
Ilya Dryomovadfa97c2013-10-10 20:39:28 +03004424 if (root->fs_info->sb->s_flags & MS_RDONLY) {
4425 ret = -EROFS;
4426 goto out;
4427 }
Stefan Behrens3f6bcfb2012-11-06 15:08:53 +01004428 if (atomic_xchg(
4429 &root->fs_info->mutually_exclusive_operation_running,
4430 1)) {
Anand Jaine57138b2013-08-21 11:44:48 +08004431 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
Stefan Behrens3f6bcfb2012-11-06 15:08:53 +01004432 } else {
4433 ret = btrfs_dev_replace_start(root, p);
4434 atomic_set(
4435 &root->fs_info->mutually_exclusive_operation_running,
4436 0);
4437 }
4438 break;
4439 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4440 btrfs_dev_replace_status(root->fs_info, p);
4441 ret = 0;
4442 break;
4443 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4444 ret = btrfs_dev_replace_cancel(root->fs_info, p);
4445 break;
4446 default:
4447 ret = -EINVAL;
4448 break;
4449 }
4450
4451 if (copy_to_user(arg, p, sizeof(*p)))
4452 ret = -EFAULT;
Ilya Dryomovadfa97c2013-10-10 20:39:28 +03004453out:
Stefan Behrens3f6bcfb2012-11-06 15:08:53 +01004454 kfree(p);
4455 return ret;
4456}
4457
Jan Schmidtd7728c92011-07-07 16:48:38 +02004458static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
4459{
4460 int ret = 0;
4461 int i;
Chris Mason740c3d22011-11-02 15:48:34 -04004462 u64 rel_ptr;
Jan Schmidtd7728c92011-07-07 16:48:38 +02004463 int size;
Chris Mason806468f2011-11-06 03:07:10 -05004464 struct btrfs_ioctl_ino_path_args *ipa = NULL;
Jan Schmidtd7728c92011-07-07 16:48:38 +02004465 struct inode_fs_paths *ipath = NULL;
4466 struct btrfs_path *path;
4467
Kusanagi Kouichi82b22ac2013-01-28 11:33:31 +00004468 if (!capable(CAP_DAC_READ_SEARCH))
Jan Schmidtd7728c92011-07-07 16:48:38 +02004469 return -EPERM;
4470
4471 path = btrfs_alloc_path();
4472 if (!path) {
4473 ret = -ENOMEM;
4474 goto out;
4475 }
4476
4477 ipa = memdup_user(arg, sizeof(*ipa));
4478 if (IS_ERR(ipa)) {
4479 ret = PTR_ERR(ipa);
4480 ipa = NULL;
4481 goto out;
4482 }
4483
4484 size = min_t(u32, ipa->size, 4096);
4485 ipath = init_ipath(size, root, path);
4486 if (IS_ERR(ipath)) {
4487 ret = PTR_ERR(ipath);
4488 ipath = NULL;
4489 goto out;
4490 }
4491
4492 ret = paths_from_inode(ipa->inum, ipath);
4493 if (ret < 0)
4494 goto out;
4495
4496 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
Jeff Mahoney745c4d82011-11-20 07:31:57 -05004497 rel_ptr = ipath->fspath->val[i] -
4498 (u64)(unsigned long)ipath->fspath->val;
Chris Mason740c3d22011-11-02 15:48:34 -04004499 ipath->fspath->val[i] = rel_ptr;
Jan Schmidtd7728c92011-07-07 16:48:38 +02004500 }
4501
Jeff Mahoney745c4d82011-11-20 07:31:57 -05004502 ret = copy_to_user((void *)(unsigned long)ipa->fspath,
4503 (void *)(unsigned long)ipath->fspath, size);
Jan Schmidtd7728c92011-07-07 16:48:38 +02004504 if (ret) {
4505 ret = -EFAULT;
4506 goto out;
4507 }
4508
4509out:
4510 btrfs_free_path(path);
4511 free_ipath(ipath);
4512 kfree(ipa);
4513
4514 return ret;
4515}
4516
4517static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
4518{
4519 struct btrfs_data_container *inodes = ctx;
4520 const size_t c = 3 * sizeof(u64);
4521
4522 if (inodes->bytes_left >= c) {
4523 inodes->bytes_left -= c;
4524 inodes->val[inodes->elem_cnt] = inum;
4525 inodes->val[inodes->elem_cnt + 1] = offset;
4526 inodes->val[inodes->elem_cnt + 2] = root;
4527 inodes->elem_cnt += 3;
4528 } else {
4529 inodes->bytes_missing += c - inodes->bytes_left;
4530 inodes->bytes_left = 0;
4531 inodes->elem_missed += 3;
4532 }
4533
4534 return 0;
4535}
4536
4537static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
4538 void __user *arg)
4539{
4540 int ret = 0;
4541 int size;
Jan Schmidtd7728c92011-07-07 16:48:38 +02004542 struct btrfs_ioctl_logical_ino_args *loi;
4543 struct btrfs_data_container *inodes = NULL;
4544 struct btrfs_path *path = NULL;
Jan Schmidtd7728c92011-07-07 16:48:38 +02004545
4546 if (!capable(CAP_SYS_ADMIN))
4547 return -EPERM;
4548
4549 loi = memdup_user(arg, sizeof(*loi));
4550 if (IS_ERR(loi)) {
4551 ret = PTR_ERR(loi);
4552 loi = NULL;
4553 goto out;
4554 }
4555
4556 path = btrfs_alloc_path();
4557 if (!path) {
4558 ret = -ENOMEM;
4559 goto out;
4560 }
4561
Byongho Leeee221842015-12-15 01:42:10 +09004562 size = min_t(u32, loi->size, SZ_64K);
Jan Schmidtd7728c92011-07-07 16:48:38 +02004563 inodes = init_data_container(size);
4564 if (IS_ERR(inodes)) {
4565 ret = PTR_ERR(inodes);
4566 inodes = NULL;
4567 goto out;
4568 }
4569
Liu Bodf031f02012-09-07 20:01:29 -06004570 ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path,
4571 build_ino_list, inodes);
4572 if (ret == -EINVAL)
Jan Schmidtd7728c92011-07-07 16:48:38 +02004573 ret = -ENOENT;
4574 if (ret < 0)
4575 goto out;
4576
Jeff Mahoney745c4d82011-11-20 07:31:57 -05004577 ret = copy_to_user((void *)(unsigned long)loi->inodes,
4578 (void *)(unsigned long)inodes, size);
Jan Schmidtd7728c92011-07-07 16:48:38 +02004579 if (ret)
4580 ret = -EFAULT;
4581
4582out:
4583 btrfs_free_path(path);
Liu Bo425d17a2012-09-07 20:01:30 -06004584 vfree(inodes);
Jan Schmidtd7728c92011-07-07 16:48:38 +02004585 kfree(loi);
4586
4587 return ret;
4588}
4589
Ilya Dryomov19a39dc2012-01-16 22:04:49 +02004590void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004591 struct btrfs_ioctl_balance_args *bargs)
4592{
4593 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4594
4595 bargs->flags = bctl->flags;
4596
Ilya Dryomov837d5b62012-01-16 22:04:49 +02004597 if (atomic_read(&fs_info->balance_running))
4598 bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
4599 if (atomic_read(&fs_info->balance_pause_req))
4600 bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
Ilya Dryomova7e99c62012-01-16 22:04:49 +02004601 if (atomic_read(&fs_info->balance_cancel_req))
4602 bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
Ilya Dryomov837d5b62012-01-16 22:04:49 +02004603
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004604 memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
4605 memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
4606 memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
Ilya Dryomov19a39dc2012-01-16 22:04:49 +02004607
4608 if (lock) {
4609 spin_lock(&fs_info->balance_lock);
4610 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4611 spin_unlock(&fs_info->balance_lock);
4612 } else {
4613 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4614 }
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004615}
4616
Liu Bo9ba1f6e2012-05-11 18:11:26 +08004617static long btrfs_ioctl_balance(struct file *file, void __user *arg)
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004618{
Al Viro496ad9a2013-01-23 17:07:38 -05004619 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004620 struct btrfs_fs_info *fs_info = root->fs_info;
4621 struct btrfs_ioctl_balance_args *bargs;
4622 struct btrfs_balance_control *bctl;
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004623 bool need_unlock; /* for mut. excl. ops lock */
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004624 int ret;
4625
4626 if (!capable(CAP_SYS_ADMIN))
4627 return -EPERM;
4628
Liu Boe54bfa32012-06-29 03:58:48 -06004629 ret = mnt_want_write_file(file);
Liu Bo9ba1f6e2012-05-11 18:11:26 +08004630 if (ret)
4631 return ret;
4632
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004633again:
4634 if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
4635 mutex_lock(&fs_info->volume_mutex);
4636 mutex_lock(&fs_info->balance_mutex);
4637 need_unlock = true;
4638 goto locked;
4639 }
4640
4641 /*
4642 * mut. excl. ops lock is locked. Three possibilites:
4643 * (1) some other op is running
4644 * (2) balance is running
4645 * (3) balance is paused -- special case (think resume)
4646 */
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004647 mutex_lock(&fs_info->balance_mutex);
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004648 if (fs_info->balance_ctl) {
4649 /* this is either (2) or (3) */
4650 if (!atomic_read(&fs_info->balance_running)) {
4651 mutex_unlock(&fs_info->balance_mutex);
4652 if (!mutex_trylock(&fs_info->volume_mutex))
4653 goto again;
4654 mutex_lock(&fs_info->balance_mutex);
4655
4656 if (fs_info->balance_ctl &&
4657 !atomic_read(&fs_info->balance_running)) {
4658 /* this is (3) */
4659 need_unlock = false;
4660 goto locked;
4661 }
4662
4663 mutex_unlock(&fs_info->balance_mutex);
4664 mutex_unlock(&fs_info->volume_mutex);
4665 goto again;
4666 } else {
4667 /* this is (2) */
4668 mutex_unlock(&fs_info->balance_mutex);
4669 ret = -EINPROGRESS;
4670 goto out;
4671 }
4672 } else {
4673 /* this is (1) */
4674 mutex_unlock(&fs_info->balance_mutex);
Anand Jaine57138b2013-08-21 11:44:48 +08004675 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004676 goto out;
4677 }
4678
4679locked:
4680 BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004681
4682 if (arg) {
4683 bargs = memdup_user(arg, sizeof(*bargs));
4684 if (IS_ERR(bargs)) {
4685 ret = PTR_ERR(bargs);
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004686 goto out_unlock;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004687 }
Ilya Dryomovde322262012-01-16 22:04:49 +02004688
4689 if (bargs->flags & BTRFS_BALANCE_RESUME) {
4690 if (!fs_info->balance_ctl) {
4691 ret = -ENOTCONN;
4692 goto out_bargs;
4693 }
4694
4695 bctl = fs_info->balance_ctl;
4696 spin_lock(&fs_info->balance_lock);
4697 bctl->flags |= BTRFS_BALANCE_RESUME;
4698 spin_unlock(&fs_info->balance_lock);
4699
4700 goto do_balance;
4701 }
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004702 } else {
4703 bargs = NULL;
4704 }
4705
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004706 if (fs_info->balance_ctl) {
Ilya Dryomov837d5b62012-01-16 22:04:49 +02004707 ret = -EINPROGRESS;
4708 goto out_bargs;
4709 }
4710
David Sterba8d2db782015-11-04 15:38:29 +01004711 bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004712 if (!bctl) {
4713 ret = -ENOMEM;
4714 goto out_bargs;
4715 }
4716
4717 bctl->fs_info = fs_info;
4718 if (arg) {
4719 memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
4720 memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
4721 memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
4722
4723 bctl->flags = bargs->flags;
Ilya Dryomovf43ffb62012-01-16 22:04:47 +02004724 } else {
4725 /* balance everything - no filters */
4726 bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004727 }
4728
David Sterba8eb93452015-10-12 16:55:54 +02004729 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
4730 ret = -EINVAL;
Christian Engelmayer0f89abf2015-10-21 00:50:06 +02004731 goto out_bctl;
David Sterba8eb93452015-10-12 16:55:54 +02004732 }
4733
Ilya Dryomovde322262012-01-16 22:04:49 +02004734do_balance:
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004735 /*
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004736 * Ownership of bctl and mutually_exclusive_operation_running
4737 * goes to to btrfs_balance. bctl is freed in __cancel_balance,
4738 * or, if restriper was paused all the way until unmount, in
4739 * free_fs_info. mutually_exclusive_operation_running is
4740 * cleared in __cancel_balance.
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004741 */
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004742 need_unlock = false;
4743
4744 ret = btrfs_balance(bctl, bargs);
Christian Engelmayer0f89abf2015-10-21 00:50:06 +02004745 bctl = NULL;
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004746
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004747 if (arg) {
4748 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4749 ret = -EFAULT;
4750 }
4751
Christian Engelmayer0f89abf2015-10-21 00:50:06 +02004752out_bctl:
4753 kfree(bctl);
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004754out_bargs:
4755 kfree(bargs);
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004756out_unlock:
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004757 mutex_unlock(&fs_info->balance_mutex);
4758 mutex_unlock(&fs_info->volume_mutex);
Ilya Dryomoved0fb782013-01-20 15:57:57 +02004759 if (need_unlock)
4760 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
4761out:
Liu Boe54bfa32012-06-29 03:58:48 -06004762 mnt_drop_write_file(file);
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02004763 return ret;
4764}
4765
Ilya Dryomov837d5b62012-01-16 22:04:49 +02004766static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
4767{
4768 if (!capable(CAP_SYS_ADMIN))
4769 return -EPERM;
4770
4771 switch (cmd) {
4772 case BTRFS_BALANCE_CTL_PAUSE:
4773 return btrfs_pause_balance(root->fs_info);
Ilya Dryomova7e99c62012-01-16 22:04:49 +02004774 case BTRFS_BALANCE_CTL_CANCEL:
4775 return btrfs_cancel_balance(root->fs_info);
Ilya Dryomov837d5b62012-01-16 22:04:49 +02004776 }
4777
4778 return -EINVAL;
4779}
4780
Ilya Dryomov19a39dc2012-01-16 22:04:49 +02004781static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
4782 void __user *arg)
4783{
4784 struct btrfs_fs_info *fs_info = root->fs_info;
4785 struct btrfs_ioctl_balance_args *bargs;
4786 int ret = 0;
4787
4788 if (!capable(CAP_SYS_ADMIN))
4789 return -EPERM;
4790
4791 mutex_lock(&fs_info->balance_mutex);
4792 if (!fs_info->balance_ctl) {
4793 ret = -ENOTCONN;
4794 goto out;
4795 }
4796
David Sterba8d2db782015-11-04 15:38:29 +01004797 bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
Ilya Dryomov19a39dc2012-01-16 22:04:49 +02004798 if (!bargs) {
4799 ret = -ENOMEM;
4800 goto out;
4801 }
4802
4803 update_ioctl_balance_args(fs_info, 1, bargs);
4804
4805 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4806 ret = -EFAULT;
4807
4808 kfree(bargs);
4809out:
4810 mutex_unlock(&fs_info->balance_mutex);
4811 return ret;
4812}
4813
Miao Xie905b0dd2012-11-26 08:50:11 +00004814static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
Arne Jansen5d13a372011-09-14 15:53:51 +02004815{
Al Viro496ad9a2013-01-23 17:07:38 -05004816 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Arne Jansen5d13a372011-09-14 15:53:51 +02004817 struct btrfs_ioctl_quota_ctl_args *sa;
4818 struct btrfs_trans_handle *trans = NULL;
4819 int ret;
4820 int err;
4821
4822 if (!capable(CAP_SYS_ADMIN))
4823 return -EPERM;
4824
Miao Xie905b0dd2012-11-26 08:50:11 +00004825 ret = mnt_want_write_file(file);
4826 if (ret)
4827 return ret;
Arne Jansen5d13a372011-09-14 15:53:51 +02004828
4829 sa = memdup_user(arg, sizeof(*sa));
Miao Xie905b0dd2012-11-26 08:50:11 +00004830 if (IS_ERR(sa)) {
4831 ret = PTR_ERR(sa);
4832 goto drop_write;
4833 }
Arne Jansen5d13a372011-09-14 15:53:51 +02004834
Wang Shilong7708f022013-04-07 10:24:57 +00004835 down_write(&root->fs_info->subvol_sem);
Jan Schmidt2f232032013-04-25 16:04:51 +00004836 trans = btrfs_start_transaction(root->fs_info->tree_root, 2);
4837 if (IS_ERR(trans)) {
4838 ret = PTR_ERR(trans);
4839 goto out;
Arne Jansen5d13a372011-09-14 15:53:51 +02004840 }
4841
4842 switch (sa->cmd) {
4843 case BTRFS_QUOTA_CTL_ENABLE:
4844 ret = btrfs_quota_enable(trans, root->fs_info);
4845 break;
4846 case BTRFS_QUOTA_CTL_DISABLE:
4847 ret = btrfs_quota_disable(trans, root->fs_info);
4848 break;
Arne Jansen5d13a372011-09-14 15:53:51 +02004849 default:
4850 ret = -EINVAL;
4851 break;
4852 }
4853
Jan Schmidt2f232032013-04-25 16:04:51 +00004854 err = btrfs_commit_transaction(trans, root->fs_info->tree_root);
4855 if (err && !ret)
4856 ret = err;
Arne Jansen5d13a372011-09-14 15:53:51 +02004857out:
4858 kfree(sa);
Wang Shilong7708f022013-04-07 10:24:57 +00004859 up_write(&root->fs_info->subvol_sem);
Miao Xie905b0dd2012-11-26 08:50:11 +00004860drop_write:
4861 mnt_drop_write_file(file);
Arne Jansen5d13a372011-09-14 15:53:51 +02004862 return ret;
4863}
4864
Miao Xie905b0dd2012-11-26 08:50:11 +00004865static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
Arne Jansen5d13a372011-09-14 15:53:51 +02004866{
Al Viro496ad9a2013-01-23 17:07:38 -05004867 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Arne Jansen5d13a372011-09-14 15:53:51 +02004868 struct btrfs_ioctl_qgroup_assign_args *sa;
4869 struct btrfs_trans_handle *trans;
4870 int ret;
4871 int err;
4872
4873 if (!capable(CAP_SYS_ADMIN))
4874 return -EPERM;
4875
Miao Xie905b0dd2012-11-26 08:50:11 +00004876 ret = mnt_want_write_file(file);
4877 if (ret)
4878 return ret;
Arne Jansen5d13a372011-09-14 15:53:51 +02004879
4880 sa = memdup_user(arg, sizeof(*sa));
Miao Xie905b0dd2012-11-26 08:50:11 +00004881 if (IS_ERR(sa)) {
4882 ret = PTR_ERR(sa);
4883 goto drop_write;
4884 }
Arne Jansen5d13a372011-09-14 15:53:51 +02004885
4886 trans = btrfs_join_transaction(root);
4887 if (IS_ERR(trans)) {
4888 ret = PTR_ERR(trans);
4889 goto out;
4890 }
4891
4892 /* FIXME: check if the IDs really exist */
4893 if (sa->assign) {
4894 ret = btrfs_add_qgroup_relation(trans, root->fs_info,
4895 sa->src, sa->dst);
4896 } else {
4897 ret = btrfs_del_qgroup_relation(trans, root->fs_info,
4898 sa->src, sa->dst);
4899 }
4900
Qu Wenruoe082f562015-02-27 16:24:28 +08004901 /* update qgroup status and info */
4902 err = btrfs_run_qgroups(trans, root->fs_info);
4903 if (err < 0)
Anand Jaina4553fe2015-09-25 14:43:01 +08004904 btrfs_std_error(root->fs_info, ret,
Qu Wenruoe082f562015-02-27 16:24:28 +08004905 "failed to update qgroup status and info\n");
Arne Jansen5d13a372011-09-14 15:53:51 +02004906 err = btrfs_end_transaction(trans, root);
4907 if (err && !ret)
4908 ret = err;
4909
4910out:
4911 kfree(sa);
Miao Xie905b0dd2012-11-26 08:50:11 +00004912drop_write:
4913 mnt_drop_write_file(file);
Arne Jansen5d13a372011-09-14 15:53:51 +02004914 return ret;
4915}
4916
Miao Xie905b0dd2012-11-26 08:50:11 +00004917static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
Arne Jansen5d13a372011-09-14 15:53:51 +02004918{
Al Viro496ad9a2013-01-23 17:07:38 -05004919 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Arne Jansen5d13a372011-09-14 15:53:51 +02004920 struct btrfs_ioctl_qgroup_create_args *sa;
4921 struct btrfs_trans_handle *trans;
4922 int ret;
4923 int err;
4924
4925 if (!capable(CAP_SYS_ADMIN))
4926 return -EPERM;
4927
Miao Xie905b0dd2012-11-26 08:50:11 +00004928 ret = mnt_want_write_file(file);
4929 if (ret)
4930 return ret;
Arne Jansen5d13a372011-09-14 15:53:51 +02004931
4932 sa = memdup_user(arg, sizeof(*sa));
Miao Xie905b0dd2012-11-26 08:50:11 +00004933 if (IS_ERR(sa)) {
4934 ret = PTR_ERR(sa);
4935 goto drop_write;
4936 }
Arne Jansen5d13a372011-09-14 15:53:51 +02004937
Miao Xied86e56c2012-11-15 11:35:41 +00004938 if (!sa->qgroupid) {
4939 ret = -EINVAL;
4940 goto out;
4941 }
4942
Arne Jansen5d13a372011-09-14 15:53:51 +02004943 trans = btrfs_join_transaction(root);
4944 if (IS_ERR(trans)) {
4945 ret = PTR_ERR(trans);
4946 goto out;
4947 }
4948
4949 /* FIXME: check if the IDs really exist */
4950 if (sa->create) {
Dongsheng Yang4087cf22015-01-18 10:59:23 -05004951 ret = btrfs_create_qgroup(trans, root->fs_info, sa->qgroupid);
Arne Jansen5d13a372011-09-14 15:53:51 +02004952 } else {
4953 ret = btrfs_remove_qgroup(trans, root->fs_info, sa->qgroupid);
4954 }
4955
4956 err = btrfs_end_transaction(trans, root);
4957 if (err && !ret)
4958 ret = err;
4959
4960out:
4961 kfree(sa);
Miao Xie905b0dd2012-11-26 08:50:11 +00004962drop_write:
4963 mnt_drop_write_file(file);
Arne Jansen5d13a372011-09-14 15:53:51 +02004964 return ret;
4965}
4966
Miao Xie905b0dd2012-11-26 08:50:11 +00004967static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
Arne Jansen5d13a372011-09-14 15:53:51 +02004968{
Al Viro496ad9a2013-01-23 17:07:38 -05004969 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Arne Jansen5d13a372011-09-14 15:53:51 +02004970 struct btrfs_ioctl_qgroup_limit_args *sa;
4971 struct btrfs_trans_handle *trans;
4972 int ret;
4973 int err;
4974 u64 qgroupid;
4975
4976 if (!capable(CAP_SYS_ADMIN))
4977 return -EPERM;
4978
Miao Xie905b0dd2012-11-26 08:50:11 +00004979 ret = mnt_want_write_file(file);
4980 if (ret)
4981 return ret;
Arne Jansen5d13a372011-09-14 15:53:51 +02004982
4983 sa = memdup_user(arg, sizeof(*sa));
Miao Xie905b0dd2012-11-26 08:50:11 +00004984 if (IS_ERR(sa)) {
4985 ret = PTR_ERR(sa);
4986 goto drop_write;
4987 }
Arne Jansen5d13a372011-09-14 15:53:51 +02004988
4989 trans = btrfs_join_transaction(root);
4990 if (IS_ERR(trans)) {
4991 ret = PTR_ERR(trans);
4992 goto out;
4993 }
4994
4995 qgroupid = sa->qgroupid;
4996 if (!qgroupid) {
4997 /* take the current subvol as qgroup */
4998 qgroupid = root->root_key.objectid;
4999 }
5000
5001 /* FIXME: check if the IDs really exist */
5002 ret = btrfs_limit_qgroup(trans, root->fs_info, qgroupid, &sa->lim);
5003
5004 err = btrfs_end_transaction(trans, root);
5005 if (err && !ret)
5006 ret = err;
5007
5008out:
5009 kfree(sa);
Miao Xie905b0dd2012-11-26 08:50:11 +00005010drop_write:
5011 mnt_drop_write_file(file);
Arne Jansen5d13a372011-09-14 15:53:51 +02005012 return ret;
5013}
5014
Jan Schmidt2f232032013-04-25 16:04:51 +00005015static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
5016{
Al Viro6d0379e2013-06-16 19:32:35 +04005017 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Jan Schmidt2f232032013-04-25 16:04:51 +00005018 struct btrfs_ioctl_quota_rescan_args *qsa;
5019 int ret;
5020
5021 if (!capable(CAP_SYS_ADMIN))
5022 return -EPERM;
5023
5024 ret = mnt_want_write_file(file);
5025 if (ret)
5026 return ret;
5027
5028 qsa = memdup_user(arg, sizeof(*qsa));
5029 if (IS_ERR(qsa)) {
5030 ret = PTR_ERR(qsa);
5031 goto drop_write;
5032 }
5033
5034 if (qsa->flags) {
5035 ret = -EINVAL;
5036 goto out;
5037 }
5038
5039 ret = btrfs_qgroup_rescan(root->fs_info);
5040
5041out:
5042 kfree(qsa);
5043drop_write:
5044 mnt_drop_write_file(file);
5045 return ret;
5046}
5047
5048static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
5049{
Al Viro6d0379e2013-06-16 19:32:35 +04005050 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Jan Schmidt2f232032013-04-25 16:04:51 +00005051 struct btrfs_ioctl_quota_rescan_args *qsa;
5052 int ret = 0;
5053
5054 if (!capable(CAP_SYS_ADMIN))
5055 return -EPERM;
5056
David Sterba8d2db782015-11-04 15:38:29 +01005057 qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
Jan Schmidt2f232032013-04-25 16:04:51 +00005058 if (!qsa)
5059 return -ENOMEM;
5060
5061 if (root->fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
5062 qsa->flags = 1;
5063 qsa->progress = root->fs_info->qgroup_rescan_progress.objectid;
5064 }
5065
5066 if (copy_to_user(arg, qsa, sizeof(*qsa)))
5067 ret = -EFAULT;
5068
5069 kfree(qsa);
5070 return ret;
5071}
5072
Jan Schmidt57254b6e2013-05-06 19:14:17 +00005073static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
5074{
Al Viro54563d42013-09-01 15:57:51 -04005075 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Jan Schmidt57254b6e2013-05-06 19:14:17 +00005076
5077 if (!capable(CAP_SYS_ADMIN))
5078 return -EPERM;
5079
5080 return btrfs_qgroup_wait_for_completion(root->fs_info);
5081}
5082
Hugo Millsabccd002014-01-30 20:17:00 +00005083static long _btrfs_ioctl_set_received_subvol(struct file *file,
5084 struct btrfs_ioctl_received_subvol_args *sa)
Alexander Block8ea05e32012-07-25 17:35:53 +02005085{
Al Viro496ad9a2013-01-23 17:07:38 -05005086 struct inode *inode = file_inode(file);
Alexander Block8ea05e32012-07-25 17:35:53 +02005087 struct btrfs_root *root = BTRFS_I(inode)->root;
5088 struct btrfs_root_item *root_item = &root->root_item;
5089 struct btrfs_trans_handle *trans;
Deepa Dinamani04b285f2016-02-06 23:57:21 -08005090 struct timespec ct = current_fs_time(inode->i_sb);
Alexander Block8ea05e32012-07-25 17:35:53 +02005091 int ret = 0;
Stefan Behrensdd5f9612013-08-15 17:11:20 +02005092 int received_uuid_changed;
Alexander Block8ea05e32012-07-25 17:35:53 +02005093
David Sterbabd60ea02014-01-16 15:50:22 +01005094 if (!inode_owner_or_capable(inode))
5095 return -EPERM;
5096
Alexander Block8ea05e32012-07-25 17:35:53 +02005097 ret = mnt_want_write_file(file);
5098 if (ret < 0)
5099 return ret;
5100
5101 down_write(&root->fs_info->subvol_sem);
5102
5103 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
5104 ret = -EINVAL;
5105 goto out;
5106 }
5107
5108 if (btrfs_root_readonly(root)) {
5109 ret = -EROFS;
5110 goto out;
5111 }
5112
Stefan Behrensdd5f9612013-08-15 17:11:20 +02005113 /*
5114 * 1 - root item
5115 * 2 - uuid items (received uuid + subvol uuid)
5116 */
5117 trans = btrfs_start_transaction(root, 3);
Alexander Block8ea05e32012-07-25 17:35:53 +02005118 if (IS_ERR(trans)) {
5119 ret = PTR_ERR(trans);
5120 trans = NULL;
5121 goto out;
5122 }
5123
5124 sa->rtransid = trans->transid;
5125 sa->rtime.sec = ct.tv_sec;
5126 sa->rtime.nsec = ct.tv_nsec;
5127
Stefan Behrensdd5f9612013-08-15 17:11:20 +02005128 received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
5129 BTRFS_UUID_SIZE);
5130 if (received_uuid_changed &&
5131 !btrfs_is_empty_uuid(root_item->received_uuid))
5132 btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
5133 root_item->received_uuid,
5134 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5135 root->root_key.objectid);
Alexander Block8ea05e32012-07-25 17:35:53 +02005136 memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
5137 btrfs_set_root_stransid(root_item, sa->stransid);
5138 btrfs_set_root_rtransid(root_item, sa->rtransid);
Qu Wenruo3cae2102013-07-16 11:19:18 +08005139 btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
5140 btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
5141 btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
5142 btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
Alexander Block8ea05e32012-07-25 17:35:53 +02005143
5144 ret = btrfs_update_root(trans, root->fs_info->tree_root,
5145 &root->root_key, &root->root_item);
5146 if (ret < 0) {
5147 btrfs_end_transaction(trans, root);
Alexander Block8ea05e32012-07-25 17:35:53 +02005148 goto out;
Stefan Behrensdd5f9612013-08-15 17:11:20 +02005149 }
5150 if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5151 ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
5152 sa->uuid,
5153 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5154 root->root_key.objectid);
5155 if (ret < 0 && ret != -EEXIST) {
5156 btrfs_abort_transaction(trans, root, ret);
Alexander Block8ea05e32012-07-25 17:35:53 +02005157 goto out;
Stefan Behrensdd5f9612013-08-15 17:11:20 +02005158 }
5159 }
5160 ret = btrfs_commit_transaction(trans, root);
5161 if (ret < 0) {
5162 btrfs_abort_transaction(trans, root, ret);
5163 goto out;
Alexander Block8ea05e32012-07-25 17:35:53 +02005164 }
5165
Hugo Millsabccd002014-01-30 20:17:00 +00005166out:
5167 up_write(&root->fs_info->subvol_sem);
5168 mnt_drop_write_file(file);
5169 return ret;
5170}
5171
5172#ifdef CONFIG_64BIT
5173static long btrfs_ioctl_set_received_subvol_32(struct file *file,
5174 void __user *arg)
5175{
5176 struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
5177 struct btrfs_ioctl_received_subvol_args *args64 = NULL;
5178 int ret = 0;
5179
5180 args32 = memdup_user(arg, sizeof(*args32));
5181 if (IS_ERR(args32)) {
5182 ret = PTR_ERR(args32);
5183 args32 = NULL;
5184 goto out;
5185 }
5186
David Sterba8d2db782015-11-04 15:38:29 +01005187 args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
Dan Carpenter84dbeb82014-03-28 11:06:00 +03005188 if (!args64) {
5189 ret = -ENOMEM;
Hugo Millsabccd002014-01-30 20:17:00 +00005190 goto out;
5191 }
5192
5193 memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
5194 args64->stransid = args32->stransid;
5195 args64->rtransid = args32->rtransid;
5196 args64->stime.sec = args32->stime.sec;
5197 args64->stime.nsec = args32->stime.nsec;
5198 args64->rtime.sec = args32->rtime.sec;
5199 args64->rtime.nsec = args32->rtime.nsec;
5200 args64->flags = args32->flags;
5201
5202 ret = _btrfs_ioctl_set_received_subvol(file, args64);
5203 if (ret)
5204 goto out;
5205
5206 memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
5207 args32->stransid = args64->stransid;
5208 args32->rtransid = args64->rtransid;
5209 args32->stime.sec = args64->stime.sec;
5210 args32->stime.nsec = args64->stime.nsec;
5211 args32->rtime.sec = args64->rtime.sec;
5212 args32->rtime.nsec = args64->rtime.nsec;
5213 args32->flags = args64->flags;
5214
5215 ret = copy_to_user(arg, args32, sizeof(*args32));
5216 if (ret)
5217 ret = -EFAULT;
5218
5219out:
5220 kfree(args32);
5221 kfree(args64);
5222 return ret;
5223}
5224#endif
5225
5226static long btrfs_ioctl_set_received_subvol(struct file *file,
5227 void __user *arg)
5228{
5229 struct btrfs_ioctl_received_subvol_args *sa = NULL;
5230 int ret = 0;
5231
5232 sa = memdup_user(arg, sizeof(*sa));
5233 if (IS_ERR(sa)) {
5234 ret = PTR_ERR(sa);
5235 sa = NULL;
5236 goto out;
5237 }
5238
5239 ret = _btrfs_ioctl_set_received_subvol(file, sa);
5240
5241 if (ret)
5242 goto out;
5243
Alexander Block8ea05e32012-07-25 17:35:53 +02005244 ret = copy_to_user(arg, sa, sizeof(*sa));
5245 if (ret)
5246 ret = -EFAULT;
5247
5248out:
5249 kfree(sa);
Alexander Block8ea05e32012-07-25 17:35:53 +02005250 return ret;
5251}
5252
jeff.liu867ab662013-01-05 02:48:01 +00005253static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
5254{
Al Viro6d0379e2013-06-16 19:32:35 +04005255 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Anand Jaina1b83ac2013-07-19 17:39:32 +08005256 size_t len;
jeff.liu867ab662013-01-05 02:48:01 +00005257 int ret;
Anand Jaina1b83ac2013-07-19 17:39:32 +08005258 char label[BTRFS_LABEL_SIZE];
5259
5260 spin_lock(&root->fs_info->super_lock);
5261 memcpy(label, root->fs_info->super_copy->label, BTRFS_LABEL_SIZE);
5262 spin_unlock(&root->fs_info->super_lock);
5263
5264 len = strnlen(label, BTRFS_LABEL_SIZE);
jeff.liu867ab662013-01-05 02:48:01 +00005265
5266 if (len == BTRFS_LABEL_SIZE) {
Frank Holtonefe120a2013-12-20 11:37:06 -05005267 btrfs_warn(root->fs_info,
5268 "label is too long, return the first %zu bytes", --len);
jeff.liu867ab662013-01-05 02:48:01 +00005269 }
5270
jeff.liu867ab662013-01-05 02:48:01 +00005271 ret = copy_to_user(arg, label, len);
jeff.liu867ab662013-01-05 02:48:01 +00005272
5273 return ret ? -EFAULT : 0;
5274}
5275
jeff.liua8bfd4a2013-01-05 02:48:08 +00005276static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
5277{
Al Viro6d0379e2013-06-16 19:32:35 +04005278 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
jeff.liua8bfd4a2013-01-05 02:48:08 +00005279 struct btrfs_super_block *super_block = root->fs_info->super_copy;
5280 struct btrfs_trans_handle *trans;
5281 char label[BTRFS_LABEL_SIZE];
5282 int ret;
5283
5284 if (!capable(CAP_SYS_ADMIN))
5285 return -EPERM;
5286
5287 if (copy_from_user(label, arg, sizeof(label)))
5288 return -EFAULT;
5289
5290 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
Frank Holtonefe120a2013-12-20 11:37:06 -05005291 btrfs_err(root->fs_info, "unable to set label with more than %d bytes",
jeff.liua8bfd4a2013-01-05 02:48:08 +00005292 BTRFS_LABEL_SIZE - 1);
5293 return -EINVAL;
5294 }
5295
5296 ret = mnt_want_write_file(file);
5297 if (ret)
5298 return ret;
5299
jeff.liua8bfd4a2013-01-05 02:48:08 +00005300 trans = btrfs_start_transaction(root, 0);
5301 if (IS_ERR(trans)) {
5302 ret = PTR_ERR(trans);
5303 goto out_unlock;
5304 }
5305
Anand Jaina1b83ac2013-07-19 17:39:32 +08005306 spin_lock(&root->fs_info->super_lock);
jeff.liua8bfd4a2013-01-05 02:48:08 +00005307 strcpy(super_block->label, label);
Anand Jaina1b83ac2013-07-19 17:39:32 +08005308 spin_unlock(&root->fs_info->super_lock);
Jeff Mahoneyd0270ac2014-02-07 14:33:57 +01005309 ret = btrfs_commit_transaction(trans, root);
jeff.liua8bfd4a2013-01-05 02:48:08 +00005310
5311out_unlock:
jeff.liua8bfd4a2013-01-05 02:48:08 +00005312 mnt_drop_write_file(file);
5313 return ret;
5314}
5315
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005316#define INIT_FEATURE_FLAGS(suffix) \
5317 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5318 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5319 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5320
David Sterbad5131b62016-02-17 15:26:27 +01005321int btrfs_ioctl_get_supported_features(void __user *arg)
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005322{
David Sterba4d4ab6d2015-11-19 11:42:31 +01005323 static const struct btrfs_ioctl_feature_flags features[3] = {
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005324 INIT_FEATURE_FLAGS(SUPP),
5325 INIT_FEATURE_FLAGS(SAFE_SET),
5326 INIT_FEATURE_FLAGS(SAFE_CLEAR)
5327 };
5328
5329 if (copy_to_user(arg, &features, sizeof(features)))
5330 return -EFAULT;
5331
5332 return 0;
5333}
5334
5335static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
5336{
5337 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5338 struct btrfs_super_block *super_block = root->fs_info->super_copy;
5339 struct btrfs_ioctl_feature_flags features;
5340
5341 features.compat_flags = btrfs_super_compat_flags(super_block);
5342 features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
5343 features.incompat_flags = btrfs_super_incompat_flags(super_block);
5344
5345 if (copy_to_user(arg, &features, sizeof(features)))
5346 return -EFAULT;
5347
5348 return 0;
5349}
5350
Jeff Mahoney3b02a682013-11-01 13:07:02 -04005351static int check_feature_bits(struct btrfs_root *root,
5352 enum btrfs_feature_set set,
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005353 u64 change_mask, u64 flags, u64 supported_flags,
5354 u64 safe_set, u64 safe_clear)
5355{
Jeff Mahoney3b02a682013-11-01 13:07:02 -04005356 const char *type = btrfs_feature_set_names[set];
5357 char *names;
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005358 u64 disallowed, unsupported;
5359 u64 set_mask = flags & change_mask;
5360 u64 clear_mask = ~flags & change_mask;
5361
5362 unsupported = set_mask & ~supported_flags;
5363 if (unsupported) {
Jeff Mahoney3b02a682013-11-01 13:07:02 -04005364 names = btrfs_printable_features(set, unsupported);
5365 if (names) {
5366 btrfs_warn(root->fs_info,
5367 "this kernel does not support the %s feature bit%s",
5368 names, strchr(names, ',') ? "s" : "");
5369 kfree(names);
5370 } else
5371 btrfs_warn(root->fs_info,
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005372 "this kernel does not support %s bits 0x%llx",
5373 type, unsupported);
5374 return -EOPNOTSUPP;
5375 }
5376
5377 disallowed = set_mask & ~safe_set;
5378 if (disallowed) {
Jeff Mahoney3b02a682013-11-01 13:07:02 -04005379 names = btrfs_printable_features(set, disallowed);
5380 if (names) {
5381 btrfs_warn(root->fs_info,
5382 "can't set the %s feature bit%s while mounted",
5383 names, strchr(names, ',') ? "s" : "");
5384 kfree(names);
5385 } else
5386 btrfs_warn(root->fs_info,
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005387 "can't set %s bits 0x%llx while mounted",
5388 type, disallowed);
5389 return -EPERM;
5390 }
5391
5392 disallowed = clear_mask & ~safe_clear;
5393 if (disallowed) {
Jeff Mahoney3b02a682013-11-01 13:07:02 -04005394 names = btrfs_printable_features(set, disallowed);
5395 if (names) {
5396 btrfs_warn(root->fs_info,
5397 "can't clear the %s feature bit%s while mounted",
5398 names, strchr(names, ',') ? "s" : "");
5399 kfree(names);
5400 } else
5401 btrfs_warn(root->fs_info,
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005402 "can't clear %s bits 0x%llx while mounted",
5403 type, disallowed);
5404 return -EPERM;
5405 }
5406
5407 return 0;
5408}
5409
5410#define check_feature(root, change_mask, flags, mask_base) \
Jeff Mahoney3b02a682013-11-01 13:07:02 -04005411check_feature_bits(root, FEAT_##mask_base, change_mask, flags, \
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005412 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
5413 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
5414 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5415
5416static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
5417{
5418 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
5419 struct btrfs_super_block *super_block = root->fs_info->super_copy;
5420 struct btrfs_ioctl_feature_flags flags[2];
5421 struct btrfs_trans_handle *trans;
5422 u64 newflags;
5423 int ret;
5424
5425 if (!capable(CAP_SYS_ADMIN))
5426 return -EPERM;
5427
5428 if (copy_from_user(flags, arg, sizeof(flags)))
5429 return -EFAULT;
5430
5431 /* Nothing to do */
5432 if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
5433 !flags[0].incompat_flags)
5434 return 0;
5435
5436 ret = check_feature(root, flags[0].compat_flags,
5437 flags[1].compat_flags, COMPAT);
5438 if (ret)
5439 return ret;
5440
5441 ret = check_feature(root, flags[0].compat_ro_flags,
5442 flags[1].compat_ro_flags, COMPAT_RO);
5443 if (ret)
5444 return ret;
5445
5446 ret = check_feature(root, flags[0].incompat_flags,
5447 flags[1].incompat_flags, INCOMPAT);
5448 if (ret)
5449 return ret;
5450
David Sterba8051aa12014-02-07 14:34:04 +01005451 trans = btrfs_start_transaction(root, 0);
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005452 if (IS_ERR(trans))
5453 return PTR_ERR(trans);
5454
5455 spin_lock(&root->fs_info->super_lock);
5456 newflags = btrfs_super_compat_flags(super_block);
5457 newflags |= flags[0].compat_flags & flags[1].compat_flags;
5458 newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
5459 btrfs_set_super_compat_flags(super_block, newflags);
5460
5461 newflags = btrfs_super_compat_ro_flags(super_block);
5462 newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
5463 newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
5464 btrfs_set_super_compat_ro_flags(super_block, newflags);
5465
5466 newflags = btrfs_super_incompat_flags(super_block);
5467 newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
5468 newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
5469 btrfs_set_super_incompat_flags(super_block, newflags);
5470 spin_unlock(&root->fs_info->super_lock);
5471
Jeff Mahoneyd0270ac2014-02-07 14:33:57 +01005472 return btrfs_commit_transaction(trans, root);
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005473}
5474
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04005475long btrfs_ioctl(struct file *file, unsigned int
5476 cmd, unsigned long arg)
5477{
Al Viro496ad9a2013-01-23 17:07:38 -05005478 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
Christoph Hellwig4bcabaa2008-12-02 06:36:08 -05005479 void __user *argp = (void __user *)arg;
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04005480
5481 switch (cmd) {
Christoph Hellwig6cbff002009-04-17 10:37:41 +02005482 case FS_IOC_GETFLAGS:
5483 return btrfs_ioctl_getflags(file, argp);
5484 case FS_IOC_SETFLAGS:
5485 return btrfs_ioctl_setflags(file, argp);
5486 case FS_IOC_GETVERSION:
5487 return btrfs_ioctl_getversion(file, argp);
Li Dongyangf7039b12011-03-24 10:24:28 +00005488 case FITRIM:
5489 return btrfs_ioctl_fitrim(file, argp);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04005490 case BTRFS_IOC_SNAP_CREATE:
Li Zefanfa0d2b92010-12-20 15:53:28 +08005491 return btrfs_ioctl_snap_create(file, argp, 0);
Li Zefanfdfb1e42010-12-10 06:41:56 +00005492 case BTRFS_IOC_SNAP_CREATE_V2:
Li Zefanfa0d2b92010-12-20 15:53:28 +08005493 return btrfs_ioctl_snap_create_v2(file, argp, 0);
Chris Mason3de45862008-11-17 21:02:50 -05005494 case BTRFS_IOC_SUBVOL_CREATE:
Li Zefanfa0d2b92010-12-20 15:53:28 +08005495 return btrfs_ioctl_snap_create(file, argp, 1);
Arne Jansen6f72c7e2011-09-14 15:58:21 +02005496 case BTRFS_IOC_SUBVOL_CREATE_V2:
5497 return btrfs_ioctl_snap_create_v2(file, argp, 1);
Yan, Zheng76dda932009-09-21 16:00:26 -04005498 case BTRFS_IOC_SNAP_DESTROY:
5499 return btrfs_ioctl_snap_destroy(file, argp);
Li Zefan0caa1022010-12-20 16:30:25 +08005500 case BTRFS_IOC_SUBVOL_GETFLAGS:
5501 return btrfs_ioctl_subvol_getflags(file, argp);
5502 case BTRFS_IOC_SUBVOL_SETFLAGS:
5503 return btrfs_ioctl_subvol_setflags(file, argp);
Josef Bacik6ef5ed02009-12-11 21:11:29 +00005504 case BTRFS_IOC_DEFAULT_SUBVOL:
5505 return btrfs_ioctl_default_subvol(file, argp);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04005506 case BTRFS_IOC_DEFRAG:
Chris Mason1e701a32010-03-11 09:42:04 -05005507 return btrfs_ioctl_defrag(file, NULL);
5508 case BTRFS_IOC_DEFRAG_RANGE:
5509 return btrfs_ioctl_defrag(file, argp);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04005510 case BTRFS_IOC_RESIZE:
Miao Xie198605a2012-11-26 08:43:45 +00005511 return btrfs_ioctl_resize(file, argp);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04005512 case BTRFS_IOC_ADD_DEV:
Christoph Hellwig4bcabaa2008-12-02 06:36:08 -05005513 return btrfs_ioctl_add_dev(root, argp);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04005514 case BTRFS_IOC_RM_DEV:
Miao Xieda249272012-11-26 08:44:50 +00005515 return btrfs_ioctl_rm_dev(file, argp);
Anand Jain6b526ed2016-02-13 10:01:39 +08005516 case BTRFS_IOC_RM_DEV_V2:
5517 return btrfs_ioctl_rm_dev_v2(file, argp);
Jan Schmidt475f6382011-03-11 15:41:01 +01005518 case BTRFS_IOC_FS_INFO:
5519 return btrfs_ioctl_fs_info(root, argp);
5520 case BTRFS_IOC_DEV_INFO:
5521 return btrfs_ioctl_dev_info(root, argp);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04005522 case BTRFS_IOC_BALANCE:
Liu Bo9ba1f6e2012-05-11 18:11:26 +08005523 return btrfs_ioctl_balance(file, NULL);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04005524 case BTRFS_IOC_TRANS_START:
5525 return btrfs_ioctl_trans_start(file);
5526 case BTRFS_IOC_TRANS_END:
5527 return btrfs_ioctl_trans_end(file);
Chris Masonac8e9812010-02-28 15:39:26 -05005528 case BTRFS_IOC_TREE_SEARCH:
5529 return btrfs_ioctl_tree_search(file, argp);
Gerhard Heiftcc68a8a2014-01-30 16:24:03 +01005530 case BTRFS_IOC_TREE_SEARCH_V2:
5531 return btrfs_ioctl_tree_search_v2(file, argp);
Chris Masonac8e9812010-02-28 15:39:26 -05005532 case BTRFS_IOC_INO_LOOKUP:
5533 return btrfs_ioctl_ino_lookup(file, argp);
Jan Schmidtd7728c92011-07-07 16:48:38 +02005534 case BTRFS_IOC_INO_PATHS:
5535 return btrfs_ioctl_ino_to_path(root, argp);
5536 case BTRFS_IOC_LOGICAL_INO:
5537 return btrfs_ioctl_logical_to_ino(root, argp);
Josef Bacik1406e432010-01-13 18:19:06 +00005538 case BTRFS_IOC_SPACE_INFO:
5539 return btrfs_ioctl_space_info(root, argp);
Filipe David Borba Manana9b199852013-09-23 11:35:11 +01005540 case BTRFS_IOC_SYNC: {
5541 int ret;
5542
Miao Xie6c255e62014-03-06 13:55:01 +08005543 ret = btrfs_start_delalloc_roots(root->fs_info, 0, -1);
Filipe David Borba Manana9b199852013-09-23 11:35:11 +01005544 if (ret)
5545 return ret;
Al Viroddb52f42014-10-21 20:21:18 -04005546 ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
David Sterba2fad4e82014-07-23 14:39:35 +02005547 /*
5548 * The transaction thread may want to do more work,
5549 * namely it pokes the cleaner ktread that will start
5550 * processing uncleaned subvols.
5551 */
5552 wake_up_process(root->fs_info->transaction_kthread);
Filipe David Borba Manana9b199852013-09-23 11:35:11 +01005553 return ret;
5554 }
Sage Weil46204592010-10-29 15:41:32 -04005555 case BTRFS_IOC_START_SYNC:
Miao Xie9a8c28b2012-11-26 08:40:43 +00005556 return btrfs_ioctl_start_sync(root, argp);
Sage Weil46204592010-10-29 15:41:32 -04005557 case BTRFS_IOC_WAIT_SYNC:
Miao Xie9a8c28b2012-11-26 08:40:43 +00005558 return btrfs_ioctl_wait_sync(root, argp);
Jan Schmidt475f6382011-03-11 15:41:01 +01005559 case BTRFS_IOC_SCRUB:
Miao Xieb8e95482012-11-26 08:48:01 +00005560 return btrfs_ioctl_scrub(file, argp);
Jan Schmidt475f6382011-03-11 15:41:01 +01005561 case BTRFS_IOC_SCRUB_CANCEL:
5562 return btrfs_ioctl_scrub_cancel(root, argp);
5563 case BTRFS_IOC_SCRUB_PROGRESS:
5564 return btrfs_ioctl_scrub_progress(root, argp);
Ilya Dryomovc9e9f972012-01-16 22:04:47 +02005565 case BTRFS_IOC_BALANCE_V2:
Liu Bo9ba1f6e2012-05-11 18:11:26 +08005566 return btrfs_ioctl_balance(file, argp);
Ilya Dryomov837d5b62012-01-16 22:04:49 +02005567 case BTRFS_IOC_BALANCE_CTL:
5568 return btrfs_ioctl_balance_ctl(root, arg);
Ilya Dryomov19a39dc2012-01-16 22:04:49 +02005569 case BTRFS_IOC_BALANCE_PROGRESS:
5570 return btrfs_ioctl_balance_progress(root, argp);
Alexander Block8ea05e32012-07-25 17:35:53 +02005571 case BTRFS_IOC_SET_RECEIVED_SUBVOL:
5572 return btrfs_ioctl_set_received_subvol(file, argp);
Hugo Millsabccd002014-01-30 20:17:00 +00005573#ifdef CONFIG_64BIT
5574 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
5575 return btrfs_ioctl_set_received_subvol_32(file, argp);
5576#endif
Alexander Block31db9f72012-07-25 23:19:24 +02005577 case BTRFS_IOC_SEND:
5578 return btrfs_ioctl_send(file, argp);
Stefan Behrensc11d2c22012-05-25 16:06:09 +02005579 case BTRFS_IOC_GET_DEV_STATS:
David Sterbab27f7c02012-06-22 06:30:39 -06005580 return btrfs_ioctl_get_dev_stats(root, argp);
Arne Jansen5d13a372011-09-14 15:53:51 +02005581 case BTRFS_IOC_QUOTA_CTL:
Miao Xie905b0dd2012-11-26 08:50:11 +00005582 return btrfs_ioctl_quota_ctl(file, argp);
Arne Jansen5d13a372011-09-14 15:53:51 +02005583 case BTRFS_IOC_QGROUP_ASSIGN:
Miao Xie905b0dd2012-11-26 08:50:11 +00005584 return btrfs_ioctl_qgroup_assign(file, argp);
Arne Jansen5d13a372011-09-14 15:53:51 +02005585 case BTRFS_IOC_QGROUP_CREATE:
Miao Xie905b0dd2012-11-26 08:50:11 +00005586 return btrfs_ioctl_qgroup_create(file, argp);
Arne Jansen5d13a372011-09-14 15:53:51 +02005587 case BTRFS_IOC_QGROUP_LIMIT:
Miao Xie905b0dd2012-11-26 08:50:11 +00005588 return btrfs_ioctl_qgroup_limit(file, argp);
Jan Schmidt2f232032013-04-25 16:04:51 +00005589 case BTRFS_IOC_QUOTA_RESCAN:
5590 return btrfs_ioctl_quota_rescan(file, argp);
5591 case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5592 return btrfs_ioctl_quota_rescan_status(file, argp);
Jan Schmidt57254b6e2013-05-06 19:14:17 +00005593 case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5594 return btrfs_ioctl_quota_rescan_wait(file, argp);
Stefan Behrens3f6bcfb2012-11-06 15:08:53 +01005595 case BTRFS_IOC_DEV_REPLACE:
5596 return btrfs_ioctl_dev_replace(root, argp);
jeff.liu867ab662013-01-05 02:48:01 +00005597 case BTRFS_IOC_GET_FSLABEL:
5598 return btrfs_ioctl_get_fslabel(file, argp);
jeff.liua8bfd4a2013-01-05 02:48:08 +00005599 case BTRFS_IOC_SET_FSLABEL:
5600 return btrfs_ioctl_set_fslabel(file, argp);
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005601 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
David Sterbad5131b62016-02-17 15:26:27 +01005602 return btrfs_ioctl_get_supported_features(argp);
Jeff Mahoney2eaa0552013-11-15 15:33:55 -05005603 case BTRFS_IOC_GET_FEATURES:
5604 return btrfs_ioctl_get_features(file, argp);
5605 case BTRFS_IOC_SET_FEATURES:
5606 return btrfs_ioctl_set_features(file, argp);
Christoph Hellwigf46b5a62008-06-11 21:53:53 -04005607 }
5608
5609 return -ENOTTY;
5610}