blob: a5fae484d4e104986a3d8f16df38ac621cb7f511 [file] [log] [blame]
Alexander Block31db9f72012-07-25 23:19:24 +02001/*
2 * Copyright (C) 2012 Alexander Block. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/bsearch.h>
20#include <linux/fs.h>
21#include <linux/file.h>
22#include <linux/sort.h>
23#include <linux/mount.h>
24#include <linux/xattr.h>
25#include <linux/posix_acl_xattr.h>
26#include <linux/radix-tree.h>
27#include <linux/crc32c.h>
Stephen Rothwella1857eb2012-07-27 10:11:13 +100028#include <linux/vmalloc.h>
Alexander Block31db9f72012-07-25 23:19:24 +020029
30#include "send.h"
31#include "backref.h"
32#include "locking.h"
33#include "disk-io.h"
34#include "btrfs_inode.h"
35#include "transaction.h"
36
37static int g_verbose = 0;
38
39#define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
40
41/*
42 * A fs_path is a helper to dynamically build path names with unknown size.
43 * It reallocates the internal buffer on demand.
44 * It allows fast adding of path elements on the right side (normal path) and
45 * fast adding to the left side (reversed path). A reversed path can also be
46 * unreversed if needed.
47 */
48struct fs_path {
49 union {
50 struct {
51 char *start;
52 char *end;
53 char *prepared;
54
55 char *buf;
56 int buf_len;
57 int reversed:1;
58 int virtual_mem:1;
59 char inline_buf[];
60 };
61 char pad[PAGE_SIZE];
62 };
63};
64#define FS_PATH_INLINE_SIZE \
65 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
66
67
68/* reused for each extent */
69struct clone_root {
70 struct btrfs_root *root;
71 u64 ino;
72 u64 offset;
73
74 u64 found_refs;
75};
76
77#define SEND_CTX_MAX_NAME_CACHE_SIZE 128
78#define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
79
80struct send_ctx {
81 struct file *send_filp;
82 loff_t send_off;
83 char *send_buf;
84 u32 send_size;
85 u32 send_max_size;
86 u64 total_send_size;
87 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
88
89 struct vfsmount *mnt;
90
91 struct btrfs_root *send_root;
92 struct btrfs_root *parent_root;
93 struct clone_root *clone_roots;
94 int clone_roots_cnt;
95
96 /* current state of the compare_tree call */
97 struct btrfs_path *left_path;
98 struct btrfs_path *right_path;
99 struct btrfs_key *cmp_key;
100
101 /*
102 * infos of the currently processed inode. In case of deleted inodes,
103 * these are the values from the deleted inode.
104 */
105 u64 cur_ino;
106 u64 cur_inode_gen;
107 int cur_inode_new;
108 int cur_inode_new_gen;
109 int cur_inode_deleted;
Alexander Block31db9f72012-07-25 23:19:24 +0200110 u64 cur_inode_size;
111 u64 cur_inode_mode;
112
113 u64 send_progress;
114
115 struct list_head new_refs;
116 struct list_head deleted_refs;
117
118 struct radix_tree_root name_cache;
119 struct list_head name_cache_list;
120 int name_cache_size;
121
122 struct file *cur_inode_filp;
123 char *read_buf;
124};
125
126struct name_cache_entry {
127 struct list_head list;
128 struct list_head use_list;
129 u64 ino;
130 u64 gen;
131 u64 parent_ino;
132 u64 parent_gen;
133 int ret;
134 int need_later_update;
135 int name_len;
136 char name[];
137};
138
139static void fs_path_reset(struct fs_path *p)
140{
141 if (p->reversed) {
142 p->start = p->buf + p->buf_len - 1;
143 p->end = p->start;
144 *p->start = 0;
145 } else {
146 p->start = p->buf;
147 p->end = p->start;
148 *p->start = 0;
149 }
150}
151
152static struct fs_path *fs_path_alloc(struct send_ctx *sctx)
153{
154 struct fs_path *p;
155
156 p = kmalloc(sizeof(*p), GFP_NOFS);
157 if (!p)
158 return NULL;
159 p->reversed = 0;
160 p->virtual_mem = 0;
161 p->buf = p->inline_buf;
162 p->buf_len = FS_PATH_INLINE_SIZE;
163 fs_path_reset(p);
164 return p;
165}
166
167static struct fs_path *fs_path_alloc_reversed(struct send_ctx *sctx)
168{
169 struct fs_path *p;
170
171 p = fs_path_alloc(sctx);
172 if (!p)
173 return NULL;
174 p->reversed = 1;
175 fs_path_reset(p);
176 return p;
177}
178
179static void fs_path_free(struct send_ctx *sctx, struct fs_path *p)
180{
181 if (!p)
182 return;
183 if (p->buf != p->inline_buf) {
184 if (p->virtual_mem)
185 vfree(p->buf);
186 else
187 kfree(p->buf);
188 }
189 kfree(p);
190}
191
192static int fs_path_len(struct fs_path *p)
193{
194 return p->end - p->start;
195}
196
197static int fs_path_ensure_buf(struct fs_path *p, int len)
198{
199 char *tmp_buf;
200 int path_len;
201 int old_buf_len;
202
203 len++;
204
205 if (p->buf_len >= len)
206 return 0;
207
208 path_len = p->end - p->start;
209 old_buf_len = p->buf_len;
210 len = PAGE_ALIGN(len);
211
212 if (p->buf == p->inline_buf) {
213 tmp_buf = kmalloc(len, GFP_NOFS);
214 if (!tmp_buf) {
215 tmp_buf = vmalloc(len);
216 if (!tmp_buf)
217 return -ENOMEM;
218 p->virtual_mem = 1;
219 }
220 memcpy(tmp_buf, p->buf, p->buf_len);
221 p->buf = tmp_buf;
222 p->buf_len = len;
223 } else {
224 if (p->virtual_mem) {
225 tmp_buf = vmalloc(len);
226 if (!tmp_buf)
227 return -ENOMEM;
228 memcpy(tmp_buf, p->buf, p->buf_len);
229 vfree(p->buf);
230 } else {
231 tmp_buf = krealloc(p->buf, len, GFP_NOFS);
232 if (!tmp_buf) {
233 tmp_buf = vmalloc(len);
234 if (!tmp_buf)
235 return -ENOMEM;
236 memcpy(tmp_buf, p->buf, p->buf_len);
237 kfree(p->buf);
238 p->virtual_mem = 1;
239 }
240 }
241 p->buf = tmp_buf;
242 p->buf_len = len;
243 }
244 if (p->reversed) {
245 tmp_buf = p->buf + old_buf_len - path_len - 1;
246 p->end = p->buf + p->buf_len - 1;
247 p->start = p->end - path_len;
248 memmove(p->start, tmp_buf, path_len + 1);
249 } else {
250 p->start = p->buf;
251 p->end = p->start + path_len;
252 }
253 return 0;
254}
255
256static int fs_path_prepare_for_add(struct fs_path *p, int name_len)
257{
258 int ret;
259 int new_len;
260
261 new_len = p->end - p->start + name_len;
262 if (p->start != p->end)
263 new_len++;
264 ret = fs_path_ensure_buf(p, new_len);
265 if (ret < 0)
266 goto out;
267
268 if (p->reversed) {
269 if (p->start != p->end)
270 *--p->start = '/';
271 p->start -= name_len;
272 p->prepared = p->start;
273 } else {
274 if (p->start != p->end)
275 *p->end++ = '/';
276 p->prepared = p->end;
277 p->end += name_len;
278 *p->end = 0;
279 }
280
281out:
282 return ret;
283}
284
285static int fs_path_add(struct fs_path *p, const char *name, int name_len)
286{
287 int ret;
288
289 ret = fs_path_prepare_for_add(p, name_len);
290 if (ret < 0)
291 goto out;
292 memcpy(p->prepared, name, name_len);
293 p->prepared = NULL;
294
295out:
296 return ret;
297}
298
299static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
300{
301 int ret;
302
303 ret = fs_path_prepare_for_add(p, p2->end - p2->start);
304 if (ret < 0)
305 goto out;
306 memcpy(p->prepared, p2->start, p2->end - p2->start);
307 p->prepared = NULL;
308
309out:
310 return ret;
311}
312
313static int fs_path_add_from_extent_buffer(struct fs_path *p,
314 struct extent_buffer *eb,
315 unsigned long off, int len)
316{
317 int ret;
318
319 ret = fs_path_prepare_for_add(p, len);
320 if (ret < 0)
321 goto out;
322
323 read_extent_buffer(eb, p->prepared, off, len);
324 p->prepared = NULL;
325
326out:
327 return ret;
328}
329
330static void fs_path_remove(struct fs_path *p)
331{
332 BUG_ON(p->reversed);
333 while (p->start != p->end && *p->end != '/')
334 p->end--;
335 *p->end = 0;
336}
337
338static int fs_path_copy(struct fs_path *p, struct fs_path *from)
339{
340 int ret;
341
342 p->reversed = from->reversed;
343 fs_path_reset(p);
344
345 ret = fs_path_add_path(p, from);
346
347 return ret;
348}
349
350
351static void fs_path_unreverse(struct fs_path *p)
352{
353 char *tmp;
354 int len;
355
356 if (!p->reversed)
357 return;
358
359 tmp = p->start;
360 len = p->end - p->start;
361 p->start = p->buf;
362 p->end = p->start + len;
363 memmove(p->start, tmp, len + 1);
364 p->reversed = 0;
365}
366
367static struct btrfs_path *alloc_path_for_send(void)
368{
369 struct btrfs_path *path;
370
371 path = btrfs_alloc_path();
372 if (!path)
373 return NULL;
374 path->search_commit_root = 1;
375 path->skip_locking = 1;
376 return path;
377}
378
379static int write_buf(struct send_ctx *sctx, const void *buf, u32 len)
380{
381 int ret;
382 mm_segment_t old_fs;
383 u32 pos = 0;
384
385 old_fs = get_fs();
386 set_fs(KERNEL_DS);
387
388 while (pos < len) {
389 ret = vfs_write(sctx->send_filp, (char *)buf + pos, len - pos,
390 &sctx->send_off);
391 /* TODO handle that correctly */
392 /*if (ret == -ERESTARTSYS) {
393 continue;
394 }*/
395 if (ret < 0)
396 goto out;
397 if (ret == 0) {
398 ret = -EIO;
399 goto out;
400 }
401 pos += ret;
402 }
403
404 ret = 0;
405
406out:
407 set_fs(old_fs);
408 return ret;
409}
410
411static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
412{
413 struct btrfs_tlv_header *hdr;
414 int total_len = sizeof(*hdr) + len;
415 int left = sctx->send_max_size - sctx->send_size;
416
417 if (unlikely(left < total_len))
418 return -EOVERFLOW;
419
420 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
421 hdr->tlv_type = cpu_to_le16(attr);
422 hdr->tlv_len = cpu_to_le16(len);
423 memcpy(hdr + 1, data, len);
424 sctx->send_size += total_len;
425
426 return 0;
427}
428
429#if 0
430static int tlv_put_u8(struct send_ctx *sctx, u16 attr, u8 value)
431{
432 return tlv_put(sctx, attr, &value, sizeof(value));
433}
434
435static int tlv_put_u16(struct send_ctx *sctx, u16 attr, u16 value)
436{
437 __le16 tmp = cpu_to_le16(value);
438 return tlv_put(sctx, attr, &tmp, sizeof(tmp));
439}
440
441static int tlv_put_u32(struct send_ctx *sctx, u16 attr, u32 value)
442{
443 __le32 tmp = cpu_to_le32(value);
444 return tlv_put(sctx, attr, &tmp, sizeof(tmp));
445}
446#endif
447
448static int tlv_put_u64(struct send_ctx *sctx, u16 attr, u64 value)
449{
450 __le64 tmp = cpu_to_le64(value);
451 return tlv_put(sctx, attr, &tmp, sizeof(tmp));
452}
453
454static int tlv_put_string(struct send_ctx *sctx, u16 attr,
455 const char *str, int len)
456{
457 if (len == -1)
458 len = strlen(str);
459 return tlv_put(sctx, attr, str, len);
460}
461
462static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
463 const u8 *uuid)
464{
465 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
466}
467
468#if 0
469static int tlv_put_timespec(struct send_ctx *sctx, u16 attr,
470 struct timespec *ts)
471{
472 struct btrfs_timespec bts;
473 bts.sec = cpu_to_le64(ts->tv_sec);
474 bts.nsec = cpu_to_le32(ts->tv_nsec);
475 return tlv_put(sctx, attr, &bts, sizeof(bts));
476}
477#endif
478
479static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
480 struct extent_buffer *eb,
481 struct btrfs_timespec *ts)
482{
483 struct btrfs_timespec bts;
484 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
485 return tlv_put(sctx, attr, &bts, sizeof(bts));
486}
487
488
489#define TLV_PUT(sctx, attrtype, attrlen, data) \
490 do { \
491 ret = tlv_put(sctx, attrtype, attrlen, data); \
492 if (ret < 0) \
493 goto tlv_put_failure; \
494 } while (0)
495
496#define TLV_PUT_INT(sctx, attrtype, bits, value) \
497 do { \
498 ret = tlv_put_u##bits(sctx, attrtype, value); \
499 if (ret < 0) \
500 goto tlv_put_failure; \
501 } while (0)
502
503#define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
504#define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
505#define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
506#define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
507#define TLV_PUT_STRING(sctx, attrtype, str, len) \
508 do { \
509 ret = tlv_put_string(sctx, attrtype, str, len); \
510 if (ret < 0) \
511 goto tlv_put_failure; \
512 } while (0)
513#define TLV_PUT_PATH(sctx, attrtype, p) \
514 do { \
515 ret = tlv_put_string(sctx, attrtype, p->start, \
516 p->end - p->start); \
517 if (ret < 0) \
518 goto tlv_put_failure; \
519 } while(0)
520#define TLV_PUT_UUID(sctx, attrtype, uuid) \
521 do { \
522 ret = tlv_put_uuid(sctx, attrtype, uuid); \
523 if (ret < 0) \
524 goto tlv_put_failure; \
525 } while (0)
526#define TLV_PUT_TIMESPEC(sctx, attrtype, ts) \
527 do { \
528 ret = tlv_put_timespec(sctx, attrtype, ts); \
529 if (ret < 0) \
530 goto tlv_put_failure; \
531 } while (0)
532#define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
533 do { \
534 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
535 if (ret < 0) \
536 goto tlv_put_failure; \
537 } while (0)
538
539static int send_header(struct send_ctx *sctx)
540{
541 struct btrfs_stream_header hdr;
542
543 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
544 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
545
546 return write_buf(sctx, &hdr, sizeof(hdr));
547}
548
549/*
550 * For each command/item we want to send to userspace, we call this function.
551 */
552static int begin_cmd(struct send_ctx *sctx, int cmd)
553{
554 struct btrfs_cmd_header *hdr;
555
556 if (!sctx->send_buf) {
557 WARN_ON(1);
558 return -EINVAL;
559 }
560
561 BUG_ON(sctx->send_size);
562
563 sctx->send_size += sizeof(*hdr);
564 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
565 hdr->cmd = cpu_to_le16(cmd);
566
567 return 0;
568}
569
570static int send_cmd(struct send_ctx *sctx)
571{
572 int ret;
573 struct btrfs_cmd_header *hdr;
574 u32 crc;
575
576 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
577 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
578 hdr->crc = 0;
579
580 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
581 hdr->crc = cpu_to_le32(crc);
582
583 ret = write_buf(sctx, sctx->send_buf, sctx->send_size);
584
585 sctx->total_send_size += sctx->send_size;
586 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
587 sctx->send_size = 0;
588
589 return ret;
590}
591
592/*
593 * Sends a move instruction to user space
594 */
595static int send_rename(struct send_ctx *sctx,
596 struct fs_path *from, struct fs_path *to)
597{
598 int ret;
599
600verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
601
602 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
603 if (ret < 0)
604 goto out;
605
606 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
607 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
608
609 ret = send_cmd(sctx);
610
611tlv_put_failure:
612out:
613 return ret;
614}
615
616/*
617 * Sends a link instruction to user space
618 */
619static int send_link(struct send_ctx *sctx,
620 struct fs_path *path, struct fs_path *lnk)
621{
622 int ret;
623
624verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
625
626 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
627 if (ret < 0)
628 goto out;
629
630 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
631 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
632
633 ret = send_cmd(sctx);
634
635tlv_put_failure:
636out:
637 return ret;
638}
639
640/*
641 * Sends an unlink instruction to user space
642 */
643static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
644{
645 int ret;
646
647verbose_printk("btrfs: send_unlink %s\n", path->start);
648
649 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
650 if (ret < 0)
651 goto out;
652
653 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
654
655 ret = send_cmd(sctx);
656
657tlv_put_failure:
658out:
659 return ret;
660}
661
662/*
663 * Sends a rmdir instruction to user space
664 */
665static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
666{
667 int ret;
668
669verbose_printk("btrfs: send_rmdir %s\n", path->start);
670
671 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
672 if (ret < 0)
673 goto out;
674
675 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
676
677 ret = send_cmd(sctx);
678
679tlv_put_failure:
680out:
681 return ret;
682}
683
684/*
685 * Helper function to retrieve some fields from an inode item.
686 */
687static int get_inode_info(struct btrfs_root *root,
688 u64 ino, u64 *size, u64 *gen,
Alexander Block85a7b332012-07-26 23:39:10 +0200689 u64 *mode, u64 *uid, u64 *gid,
690 u64 *rdev)
Alexander Block31db9f72012-07-25 23:19:24 +0200691{
692 int ret;
693 struct btrfs_inode_item *ii;
694 struct btrfs_key key;
695 struct btrfs_path *path;
696
697 path = alloc_path_for_send();
698 if (!path)
699 return -ENOMEM;
700
701 key.objectid = ino;
702 key.type = BTRFS_INODE_ITEM_KEY;
703 key.offset = 0;
704 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
705 if (ret < 0)
706 goto out;
707 if (ret) {
708 ret = -ENOENT;
709 goto out;
710 }
711
712 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
713 struct btrfs_inode_item);
714 if (size)
715 *size = btrfs_inode_size(path->nodes[0], ii);
716 if (gen)
717 *gen = btrfs_inode_generation(path->nodes[0], ii);
718 if (mode)
719 *mode = btrfs_inode_mode(path->nodes[0], ii);
720 if (uid)
721 *uid = btrfs_inode_uid(path->nodes[0], ii);
722 if (gid)
723 *gid = btrfs_inode_gid(path->nodes[0], ii);
Alexander Block85a7b332012-07-26 23:39:10 +0200724 if (rdev)
725 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
Alexander Block31db9f72012-07-25 23:19:24 +0200726
727out:
728 btrfs_free_path(path);
729 return ret;
730}
731
732typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
733 struct fs_path *p,
734 void *ctx);
735
736/*
737 * Helper function to iterate the entries in ONE btrfs_inode_ref.
738 * The iterate callback may return a non zero value to stop iteration. This can
739 * be a negative value for error codes or 1 to simply stop it.
740 *
741 * path must point to the INODE_REF when called.
742 */
743static int iterate_inode_ref(struct send_ctx *sctx,
744 struct btrfs_root *root, struct btrfs_path *path,
745 struct btrfs_key *found_key, int resolve,
746 iterate_inode_ref_t iterate, void *ctx)
747{
748 struct extent_buffer *eb;
749 struct btrfs_item *item;
750 struct btrfs_inode_ref *iref;
751 struct btrfs_path *tmp_path;
752 struct fs_path *p;
753 u32 cur;
754 u32 len;
755 u32 total;
756 int slot;
757 u32 name_len;
758 char *start;
759 int ret = 0;
760 int num;
761 int index;
762
763 p = fs_path_alloc_reversed(sctx);
764 if (!p)
765 return -ENOMEM;
766
767 tmp_path = alloc_path_for_send();
768 if (!tmp_path) {
769 fs_path_free(sctx, p);
770 return -ENOMEM;
771 }
772
773 eb = path->nodes[0];
774 slot = path->slots[0];
775 item = btrfs_item_nr(eb, slot);
776 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
777 cur = 0;
778 len = 0;
779 total = btrfs_item_size(eb, item);
780
781 num = 0;
782 while (cur < total) {
783 fs_path_reset(p);
784
785 name_len = btrfs_inode_ref_name_len(eb, iref);
786 index = btrfs_inode_ref_index(eb, iref);
787 if (resolve) {
788 start = btrfs_iref_to_path(root, tmp_path, iref, eb,
789 found_key->offset, p->buf,
790 p->buf_len);
791 if (IS_ERR(start)) {
792 ret = PTR_ERR(start);
793 goto out;
794 }
795 if (start < p->buf) {
796 /* overflow , try again with larger buffer */
797 ret = fs_path_ensure_buf(p,
798 p->buf_len + p->buf - start);
799 if (ret < 0)
800 goto out;
801 start = btrfs_iref_to_path(root, tmp_path, iref,
802 eb, found_key->offset, p->buf,
803 p->buf_len);
804 if (IS_ERR(start)) {
805 ret = PTR_ERR(start);
806 goto out;
807 }
808 BUG_ON(start < p->buf);
809 }
810 p->start = start;
811 } else {
812 ret = fs_path_add_from_extent_buffer(p, eb,
813 (unsigned long)(iref + 1), name_len);
814 if (ret < 0)
815 goto out;
816 }
817
818
819 len = sizeof(*iref) + name_len;
820 iref = (struct btrfs_inode_ref *)((char *)iref + len);
821 cur += len;
822
823 ret = iterate(num, found_key->offset, index, p, ctx);
824 if (ret)
825 goto out;
826
827 num++;
828 }
829
830out:
831 btrfs_free_path(tmp_path);
832 fs_path_free(sctx, p);
833 return ret;
834}
835
836typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
837 const char *name, int name_len,
838 const char *data, int data_len,
839 u8 type, void *ctx);
840
841/*
842 * Helper function to iterate the entries in ONE btrfs_dir_item.
843 * The iterate callback may return a non zero value to stop iteration. This can
844 * be a negative value for error codes or 1 to simply stop it.
845 *
846 * path must point to the dir item when called.
847 */
848static int iterate_dir_item(struct send_ctx *sctx,
849 struct btrfs_root *root, struct btrfs_path *path,
850 struct btrfs_key *found_key,
851 iterate_dir_item_t iterate, void *ctx)
852{
853 int ret = 0;
854 struct extent_buffer *eb;
855 struct btrfs_item *item;
856 struct btrfs_dir_item *di;
857 struct btrfs_path *tmp_path = NULL;
858 struct btrfs_key di_key;
859 char *buf = NULL;
860 char *buf2 = NULL;
861 int buf_len;
862 int buf_virtual = 0;
863 u32 name_len;
864 u32 data_len;
865 u32 cur;
866 u32 len;
867 u32 total;
868 int slot;
869 int num;
870 u8 type;
871
872 buf_len = PAGE_SIZE;
873 buf = kmalloc(buf_len, GFP_NOFS);
874 if (!buf) {
875 ret = -ENOMEM;
876 goto out;
877 }
878
879 tmp_path = alloc_path_for_send();
880 if (!tmp_path) {
881 ret = -ENOMEM;
882 goto out;
883 }
884
885 eb = path->nodes[0];
886 slot = path->slots[0];
887 item = btrfs_item_nr(eb, slot);
888 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
889 cur = 0;
890 len = 0;
891 total = btrfs_item_size(eb, item);
892
893 num = 0;
894 while (cur < total) {
895 name_len = btrfs_dir_name_len(eb, di);
896 data_len = btrfs_dir_data_len(eb, di);
897 type = btrfs_dir_type(eb, di);
898 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
899
900 if (name_len + data_len > buf_len) {
901 buf_len = PAGE_ALIGN(name_len + data_len);
902 if (buf_virtual) {
903 buf2 = vmalloc(buf_len);
904 if (!buf2) {
905 ret = -ENOMEM;
906 goto out;
907 }
908 vfree(buf);
909 } else {
910 buf2 = krealloc(buf, buf_len, GFP_NOFS);
911 if (!buf2) {
912 buf2 = vmalloc(buf_len);
913 if (!buf2) {
914 ret = -ENOMEM;
915 goto out;
916 }
917 kfree(buf);
918 buf_virtual = 1;
919 }
920 }
921
922 buf = buf2;
923 buf2 = NULL;
924 }
925
926 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
927 name_len + data_len);
928
929 len = sizeof(*di) + name_len + data_len;
930 di = (struct btrfs_dir_item *)((char *)di + len);
931 cur += len;
932
933 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
934 data_len, type, ctx);
935 if (ret < 0)
936 goto out;
937 if (ret) {
938 ret = 0;
939 goto out;
940 }
941
942 num++;
943 }
944
945out:
946 btrfs_free_path(tmp_path);
947 if (buf_virtual)
948 vfree(buf);
949 else
950 kfree(buf);
951 return ret;
952}
953
954static int __copy_first_ref(int num, u64 dir, int index,
955 struct fs_path *p, void *ctx)
956{
957 int ret;
958 struct fs_path *pt = ctx;
959
960 ret = fs_path_copy(pt, p);
961 if (ret < 0)
962 return ret;
963
964 /* we want the first only */
965 return 1;
966}
967
968/*
969 * Retrieve the first path of an inode. If an inode has more then one
970 * ref/hardlink, this is ignored.
971 */
972static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root,
973 u64 ino, struct fs_path *path)
974{
975 int ret;
976 struct btrfs_key key, found_key;
977 struct btrfs_path *p;
978
979 p = alloc_path_for_send();
980 if (!p)
981 return -ENOMEM;
982
983 fs_path_reset(path);
984
985 key.objectid = ino;
986 key.type = BTRFS_INODE_REF_KEY;
987 key.offset = 0;
988
989 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
990 if (ret < 0)
991 goto out;
992 if (ret) {
993 ret = 1;
994 goto out;
995 }
996 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
997 if (found_key.objectid != ino ||
998 found_key.type != BTRFS_INODE_REF_KEY) {
999 ret = -ENOENT;
1000 goto out;
1001 }
1002
1003 ret = iterate_inode_ref(sctx, root, p, &found_key, 1,
1004 __copy_first_ref, path);
1005 if (ret < 0)
1006 goto out;
1007 ret = 0;
1008
1009out:
1010 btrfs_free_path(p);
1011 return ret;
1012}
1013
1014struct backref_ctx {
1015 struct send_ctx *sctx;
1016
1017 /* number of total found references */
1018 u64 found;
1019
1020 /*
1021 * used for clones found in send_root. clones found behind cur_objectid
1022 * and cur_offset are not considered as allowed clones.
1023 */
1024 u64 cur_objectid;
1025 u64 cur_offset;
1026
1027 /* may be truncated in case it's the last extent in a file */
1028 u64 extent_len;
1029
1030 /* Just to check for bugs in backref resolving */
1031 int found_in_send_root;
1032};
1033
1034static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1035{
1036 u64 root = (u64)key;
1037 struct clone_root *cr = (struct clone_root *)elt;
1038
1039 if (root < cr->root->objectid)
1040 return -1;
1041 if (root > cr->root->objectid)
1042 return 1;
1043 return 0;
1044}
1045
1046static int __clone_root_cmp_sort(const void *e1, const void *e2)
1047{
1048 struct clone_root *cr1 = (struct clone_root *)e1;
1049 struct clone_root *cr2 = (struct clone_root *)e2;
1050
1051 if (cr1->root->objectid < cr2->root->objectid)
1052 return -1;
1053 if (cr1->root->objectid > cr2->root->objectid)
1054 return 1;
1055 return 0;
1056}
1057
1058/*
1059 * Called for every backref that is found for the current extent.
1060 */
1061static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1062{
1063 struct backref_ctx *bctx = ctx_;
1064 struct clone_root *found;
1065 int ret;
1066 u64 i_size;
1067
1068 /* First check if the root is in the list of accepted clone sources */
1069 found = bsearch((void *)root, bctx->sctx->clone_roots,
1070 bctx->sctx->clone_roots_cnt,
1071 sizeof(struct clone_root),
1072 __clone_root_cmp_bsearch);
1073 if (!found)
1074 return 0;
1075
1076 if (found->root == bctx->sctx->send_root &&
1077 ino == bctx->cur_objectid &&
1078 offset == bctx->cur_offset) {
1079 bctx->found_in_send_root = 1;
1080 }
1081
1082 /*
1083 * There are inodes that have extents that lie behind it's i_size. Don't
1084 * accept clones from these extents.
1085 */
Alexander Block85a7b332012-07-26 23:39:10 +02001086 ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL,
1087 NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02001088 if (ret < 0)
1089 return ret;
1090
1091 if (offset + bctx->extent_len > i_size)
1092 return 0;
1093
1094 /*
1095 * Make sure we don't consider clones from send_root that are
1096 * behind the current inode/offset.
1097 */
1098 if (found->root == bctx->sctx->send_root) {
1099 /*
1100 * TODO for the moment we don't accept clones from the inode
1101 * that is currently send. We may change this when
1102 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1103 * file.
1104 */
1105 if (ino >= bctx->cur_objectid)
1106 return 0;
1107 /*if (ino > ctx->cur_objectid)
1108 return 0;
1109 if (offset + ctx->extent_len > ctx->cur_offset)
1110 return 0;*/
1111
1112 bctx->found++;
1113 found->found_refs++;
1114 found->ino = ino;
1115 found->offset = offset;
1116 return 0;
1117 }
1118
1119 bctx->found++;
1120 found->found_refs++;
1121 if (ino < found->ino) {
1122 found->ino = ino;
1123 found->offset = offset;
1124 } else if (found->ino == ino) {
1125 /*
1126 * same extent found more then once in the same file.
1127 */
1128 if (found->offset > offset + bctx->extent_len)
1129 found->offset = offset;
1130 }
1131
1132 return 0;
1133}
1134
1135/*
1136 * path must point to the extent item when called.
1137 */
1138static int find_extent_clone(struct send_ctx *sctx,
1139 struct btrfs_path *path,
1140 u64 ino, u64 data_offset,
1141 u64 ino_size,
1142 struct clone_root **found)
1143{
1144 int ret;
1145 int extent_type;
1146 u64 logical;
1147 u64 num_bytes;
1148 u64 extent_item_pos;
1149 struct btrfs_file_extent_item *fi;
1150 struct extent_buffer *eb = path->nodes[0];
1151 struct backref_ctx backref_ctx;
1152 struct clone_root *cur_clone_root;
1153 struct btrfs_key found_key;
1154 struct btrfs_path *tmp_path;
1155 u32 i;
1156
1157 tmp_path = alloc_path_for_send();
1158 if (!tmp_path)
1159 return -ENOMEM;
1160
1161 if (data_offset >= ino_size) {
1162 /*
1163 * There may be extents that lie behind the file's size.
1164 * I at least had this in combination with snapshotting while
1165 * writing large files.
1166 */
1167 ret = 0;
1168 goto out;
1169 }
1170
1171 fi = btrfs_item_ptr(eb, path->slots[0],
1172 struct btrfs_file_extent_item);
1173 extent_type = btrfs_file_extent_type(eb, fi);
1174 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1175 ret = -ENOENT;
1176 goto out;
1177 }
1178
1179 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1180 logical = btrfs_file_extent_disk_bytenr(eb, fi);
1181 if (logical == 0) {
1182 ret = -ENOENT;
1183 goto out;
1184 }
1185 logical += btrfs_file_extent_offset(eb, fi);
1186
1187 ret = extent_from_logical(sctx->send_root->fs_info,
1188 logical, tmp_path, &found_key);
1189 btrfs_release_path(tmp_path);
1190
1191 if (ret < 0)
1192 goto out;
1193 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1194 ret = -EIO;
1195 goto out;
1196 }
1197
1198 /*
1199 * Setup the clone roots.
1200 */
1201 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1202 cur_clone_root = sctx->clone_roots + i;
1203 cur_clone_root->ino = (u64)-1;
1204 cur_clone_root->offset = 0;
1205 cur_clone_root->found_refs = 0;
1206 }
1207
1208 backref_ctx.sctx = sctx;
1209 backref_ctx.found = 0;
1210 backref_ctx.cur_objectid = ino;
1211 backref_ctx.cur_offset = data_offset;
1212 backref_ctx.found_in_send_root = 0;
1213 backref_ctx.extent_len = num_bytes;
1214
1215 /*
1216 * The last extent of a file may be too large due to page alignment.
1217 * We need to adjust extent_len in this case so that the checks in
1218 * __iterate_backrefs work.
1219 */
1220 if (data_offset + num_bytes >= ino_size)
1221 backref_ctx.extent_len = ino_size - data_offset;
1222
1223 /*
1224 * Now collect all backrefs.
1225 */
1226 extent_item_pos = logical - found_key.objectid;
1227 ret = iterate_extent_inodes(sctx->send_root->fs_info,
1228 found_key.objectid, extent_item_pos, 1,
1229 __iterate_backrefs, &backref_ctx);
1230 if (ret < 0)
1231 goto out;
1232
1233 if (!backref_ctx.found_in_send_root) {
1234 /* found a bug in backref code? */
1235 ret = -EIO;
1236 printk(KERN_ERR "btrfs: ERROR did not find backref in "
1237 "send_root. inode=%llu, offset=%llu, "
1238 "logical=%llu\n",
1239 ino, data_offset, logical);
1240 goto out;
1241 }
1242
1243verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
1244 "ino=%llu, "
1245 "num_bytes=%llu, logical=%llu\n",
1246 data_offset, ino, num_bytes, logical);
1247
1248 if (!backref_ctx.found)
1249 verbose_printk("btrfs: no clones found\n");
1250
1251 cur_clone_root = NULL;
1252 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1253 if (sctx->clone_roots[i].found_refs) {
1254 if (!cur_clone_root)
1255 cur_clone_root = sctx->clone_roots + i;
1256 else if (sctx->clone_roots[i].root == sctx->send_root)
1257 /* prefer clones from send_root over others */
1258 cur_clone_root = sctx->clone_roots + i;
1259 break;
1260 }
1261
1262 }
1263
1264 if (cur_clone_root) {
1265 *found = cur_clone_root;
1266 ret = 0;
1267 } else {
1268 ret = -ENOENT;
1269 }
1270
1271out:
1272 btrfs_free_path(tmp_path);
1273 return ret;
1274}
1275
1276static int read_symlink(struct send_ctx *sctx,
1277 struct btrfs_root *root,
1278 u64 ino,
1279 struct fs_path *dest)
1280{
1281 int ret;
1282 struct btrfs_path *path;
1283 struct btrfs_key key;
1284 struct btrfs_file_extent_item *ei;
1285 u8 type;
1286 u8 compression;
1287 unsigned long off;
1288 int len;
1289
1290 path = alloc_path_for_send();
1291 if (!path)
1292 return -ENOMEM;
1293
1294 key.objectid = ino;
1295 key.type = BTRFS_EXTENT_DATA_KEY;
1296 key.offset = 0;
1297 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1298 if (ret < 0)
1299 goto out;
1300 BUG_ON(ret);
1301
1302 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1303 struct btrfs_file_extent_item);
1304 type = btrfs_file_extent_type(path->nodes[0], ei);
1305 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1306 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1307 BUG_ON(compression);
1308
1309 off = btrfs_file_extent_inline_start(ei);
1310 len = btrfs_file_extent_inline_len(path->nodes[0], ei);
1311
1312 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1313 if (ret < 0)
1314 goto out;
1315
1316out:
1317 btrfs_free_path(path);
1318 return ret;
1319}
1320
1321/*
1322 * Helper function to generate a file name that is unique in the root of
1323 * send_root and parent_root. This is used to generate names for orphan inodes.
1324 */
1325static int gen_unique_name(struct send_ctx *sctx,
1326 u64 ino, u64 gen,
1327 struct fs_path *dest)
1328{
1329 int ret = 0;
1330 struct btrfs_path *path;
1331 struct btrfs_dir_item *di;
1332 char tmp[64];
1333 int len;
1334 u64 idx = 0;
1335
1336 path = alloc_path_for_send();
1337 if (!path)
1338 return -ENOMEM;
1339
1340 while (1) {
1341 len = snprintf(tmp, sizeof(tmp) - 1, "o%llu-%llu-%llu",
1342 ino, gen, idx);
1343 if (len >= sizeof(tmp)) {
1344 /* should really not happen */
1345 ret = -EOVERFLOW;
1346 goto out;
1347 }
1348
1349 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1350 path, BTRFS_FIRST_FREE_OBJECTID,
1351 tmp, strlen(tmp), 0);
1352 btrfs_release_path(path);
1353 if (IS_ERR(di)) {
1354 ret = PTR_ERR(di);
1355 goto out;
1356 }
1357 if (di) {
1358 /* not unique, try again */
1359 idx++;
1360 continue;
1361 }
1362
1363 if (!sctx->parent_root) {
1364 /* unique */
1365 ret = 0;
1366 break;
1367 }
1368
1369 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1370 path, BTRFS_FIRST_FREE_OBJECTID,
1371 tmp, strlen(tmp), 0);
1372 btrfs_release_path(path);
1373 if (IS_ERR(di)) {
1374 ret = PTR_ERR(di);
1375 goto out;
1376 }
1377 if (di) {
1378 /* not unique, try again */
1379 idx++;
1380 continue;
1381 }
1382 /* unique */
1383 break;
1384 }
1385
1386 ret = fs_path_add(dest, tmp, strlen(tmp));
1387
1388out:
1389 btrfs_free_path(path);
1390 return ret;
1391}
1392
1393enum inode_state {
1394 inode_state_no_change,
1395 inode_state_will_create,
1396 inode_state_did_create,
1397 inode_state_will_delete,
1398 inode_state_did_delete,
1399};
1400
1401static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1402{
1403 int ret;
1404 int left_ret;
1405 int right_ret;
1406 u64 left_gen;
1407 u64 right_gen;
1408
1409 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
Alexander Block85a7b332012-07-26 23:39:10 +02001410 NULL, NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02001411 if (ret < 0 && ret != -ENOENT)
1412 goto out;
1413 left_ret = ret;
1414
1415 if (!sctx->parent_root) {
1416 right_ret = -ENOENT;
1417 } else {
1418 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
Alexander Block85a7b332012-07-26 23:39:10 +02001419 NULL, NULL, NULL, NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02001420 if (ret < 0 && ret != -ENOENT)
1421 goto out;
1422 right_ret = ret;
1423 }
1424
1425 if (!left_ret && !right_ret) {
1426 if (left_gen == gen && right_gen == gen)
1427 ret = inode_state_no_change;
1428 else if (left_gen == gen) {
1429 if (ino < sctx->send_progress)
1430 ret = inode_state_did_create;
1431 else
1432 ret = inode_state_will_create;
1433 } else if (right_gen == gen) {
1434 if (ino < sctx->send_progress)
1435 ret = inode_state_did_delete;
1436 else
1437 ret = inode_state_will_delete;
1438 } else {
1439 ret = -ENOENT;
1440 }
1441 } else if (!left_ret) {
1442 if (left_gen == gen) {
1443 if (ino < sctx->send_progress)
1444 ret = inode_state_did_create;
1445 else
1446 ret = inode_state_will_create;
1447 } else {
1448 ret = -ENOENT;
1449 }
1450 } else if (!right_ret) {
1451 if (right_gen == gen) {
1452 if (ino < sctx->send_progress)
1453 ret = inode_state_did_delete;
1454 else
1455 ret = inode_state_will_delete;
1456 } else {
1457 ret = -ENOENT;
1458 }
1459 } else {
1460 ret = -ENOENT;
1461 }
1462
1463out:
1464 return ret;
1465}
1466
1467static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1468{
1469 int ret;
1470
1471 ret = get_cur_inode_state(sctx, ino, gen);
1472 if (ret < 0)
1473 goto out;
1474
1475 if (ret == inode_state_no_change ||
1476 ret == inode_state_did_create ||
1477 ret == inode_state_will_delete)
1478 ret = 1;
1479 else
1480 ret = 0;
1481
1482out:
1483 return ret;
1484}
1485
1486/*
1487 * Helper function to lookup a dir item in a dir.
1488 */
1489static int lookup_dir_item_inode(struct btrfs_root *root,
1490 u64 dir, const char *name, int name_len,
1491 u64 *found_inode,
1492 u8 *found_type)
1493{
1494 int ret = 0;
1495 struct btrfs_dir_item *di;
1496 struct btrfs_key key;
1497 struct btrfs_path *path;
1498
1499 path = alloc_path_for_send();
1500 if (!path)
1501 return -ENOMEM;
1502
1503 di = btrfs_lookup_dir_item(NULL, root, path,
1504 dir, name, name_len, 0);
1505 if (!di) {
1506 ret = -ENOENT;
1507 goto out;
1508 }
1509 if (IS_ERR(di)) {
1510 ret = PTR_ERR(di);
1511 goto out;
1512 }
1513 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1514 *found_inode = key.objectid;
1515 *found_type = btrfs_dir_type(path->nodes[0], di);
1516
1517out:
1518 btrfs_free_path(path);
1519 return ret;
1520}
1521
1522static int get_first_ref(struct send_ctx *sctx,
1523 struct btrfs_root *root, u64 ino,
1524 u64 *dir, u64 *dir_gen, struct fs_path *name)
1525{
1526 int ret;
1527 struct btrfs_key key;
1528 struct btrfs_key found_key;
1529 struct btrfs_path *path;
1530 struct btrfs_inode_ref *iref;
1531 int len;
1532
1533 path = alloc_path_for_send();
1534 if (!path)
1535 return -ENOMEM;
1536
1537 key.objectid = ino;
1538 key.type = BTRFS_INODE_REF_KEY;
1539 key.offset = 0;
1540
1541 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1542 if (ret < 0)
1543 goto out;
1544 if (!ret)
1545 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1546 path->slots[0]);
1547 if (ret || found_key.objectid != key.objectid ||
1548 found_key.type != key.type) {
1549 ret = -ENOENT;
1550 goto out;
1551 }
1552
1553 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1554 struct btrfs_inode_ref);
1555 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1556 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1557 (unsigned long)(iref + 1), len);
1558 if (ret < 0)
1559 goto out;
1560 btrfs_release_path(path);
1561
1562 ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL,
Alexander Block85a7b332012-07-26 23:39:10 +02001563 NULL, NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02001564 if (ret < 0)
1565 goto out;
1566
1567 *dir = found_key.offset;
1568
1569out:
1570 btrfs_free_path(path);
1571 return ret;
1572}
1573
1574static int is_first_ref(struct send_ctx *sctx,
1575 struct btrfs_root *root,
1576 u64 ino, u64 dir,
1577 const char *name, int name_len)
1578{
1579 int ret;
1580 struct fs_path *tmp_name;
1581 u64 tmp_dir;
1582 u64 tmp_dir_gen;
1583
1584 tmp_name = fs_path_alloc(sctx);
1585 if (!tmp_name)
1586 return -ENOMEM;
1587
1588 ret = get_first_ref(sctx, root, ino, &tmp_dir, &tmp_dir_gen, tmp_name);
1589 if (ret < 0)
1590 goto out;
1591
1592 if (name_len != fs_path_len(tmp_name)) {
1593 ret = 0;
1594 goto out;
1595 }
1596
1597 ret = memcmp(tmp_name->start, name, name_len);
1598 if (ret)
1599 ret = 0;
1600 else
1601 ret = 1;
1602
1603out:
1604 fs_path_free(sctx, tmp_name);
1605 return ret;
1606}
1607
1608static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1609 const char *name, int name_len,
1610 u64 *who_ino, u64 *who_gen)
1611{
1612 int ret = 0;
1613 u64 other_inode = 0;
1614 u8 other_type = 0;
1615
1616 if (!sctx->parent_root)
1617 goto out;
1618
1619 ret = is_inode_existent(sctx, dir, dir_gen);
1620 if (ret <= 0)
1621 goto out;
1622
1623 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1624 &other_inode, &other_type);
1625 if (ret < 0 && ret != -ENOENT)
1626 goto out;
1627 if (ret) {
1628 ret = 0;
1629 goto out;
1630 }
1631
1632 if (other_inode > sctx->send_progress) {
1633 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
Alexander Block85a7b332012-07-26 23:39:10 +02001634 who_gen, NULL, NULL, NULL, NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02001635 if (ret < 0)
1636 goto out;
1637
1638 ret = 1;
1639 *who_ino = other_inode;
1640 } else {
1641 ret = 0;
1642 }
1643
1644out:
1645 return ret;
1646}
1647
1648static int did_overwrite_ref(struct send_ctx *sctx,
1649 u64 dir, u64 dir_gen,
1650 u64 ino, u64 ino_gen,
1651 const char *name, int name_len)
1652{
1653 int ret = 0;
1654 u64 gen;
1655 u64 ow_inode;
1656 u8 other_type;
1657
1658 if (!sctx->parent_root)
1659 goto out;
1660
1661 ret = is_inode_existent(sctx, dir, dir_gen);
1662 if (ret <= 0)
1663 goto out;
1664
1665 /* check if the ref was overwritten by another ref */
1666 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1667 &ow_inode, &other_type);
1668 if (ret < 0 && ret != -ENOENT)
1669 goto out;
1670 if (ret) {
1671 /* was never and will never be overwritten */
1672 ret = 0;
1673 goto out;
1674 }
1675
1676 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
Alexander Block85a7b332012-07-26 23:39:10 +02001677 NULL, NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02001678 if (ret < 0)
1679 goto out;
1680
1681 if (ow_inode == ino && gen == ino_gen) {
1682 ret = 0;
1683 goto out;
1684 }
1685
1686 /* we know that it is or will be overwritten. check this now */
1687 if (ow_inode < sctx->send_progress)
1688 ret = 1;
1689 else
1690 ret = 0;
1691
1692out:
1693 return ret;
1694}
1695
1696static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1697{
1698 int ret = 0;
1699 struct fs_path *name = NULL;
1700 u64 dir;
1701 u64 dir_gen;
1702
1703 if (!sctx->parent_root)
1704 goto out;
1705
1706 name = fs_path_alloc(sctx);
1707 if (!name)
1708 return -ENOMEM;
1709
1710 ret = get_first_ref(sctx, sctx->parent_root, ino, &dir, &dir_gen, name);
1711 if (ret < 0)
1712 goto out;
1713
1714 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1715 name->start, fs_path_len(name));
1716 if (ret < 0)
1717 goto out;
1718
1719out:
1720 fs_path_free(sctx, name);
1721 return ret;
1722}
1723
1724static int name_cache_insert(struct send_ctx *sctx,
1725 struct name_cache_entry *nce)
1726{
1727 int ret = 0;
1728 struct name_cache_entry **ncea;
1729
1730 ncea = radix_tree_lookup(&sctx->name_cache, nce->ino);
1731 if (ncea) {
1732 if (!ncea[0])
1733 ncea[0] = nce;
1734 else if (!ncea[1])
1735 ncea[1] = nce;
1736 else
1737 BUG();
1738 } else {
1739 ncea = kmalloc(sizeof(void *) * 2, GFP_NOFS);
1740 if (!ncea)
1741 return -ENOMEM;
1742
1743 ncea[0] = nce;
1744 ncea[1] = NULL;
1745 ret = radix_tree_insert(&sctx->name_cache, nce->ino, ncea);
1746 if (ret < 0)
1747 return ret;
1748 }
1749 list_add_tail(&nce->list, &sctx->name_cache_list);
1750 sctx->name_cache_size++;
1751
1752 return ret;
1753}
1754
1755static void name_cache_delete(struct send_ctx *sctx,
1756 struct name_cache_entry *nce)
1757{
1758 struct name_cache_entry **ncea;
1759
1760 ncea = radix_tree_lookup(&sctx->name_cache, nce->ino);
1761 BUG_ON(!ncea);
1762
1763 if (ncea[0] == nce)
1764 ncea[0] = NULL;
1765 else if (ncea[1] == nce)
1766 ncea[1] = NULL;
1767 else
1768 BUG();
1769
1770 if (!ncea[0] && !ncea[1]) {
1771 radix_tree_delete(&sctx->name_cache, nce->ino);
1772 kfree(ncea);
1773 }
1774
1775 list_del(&nce->list);
1776
1777 sctx->name_cache_size--;
1778}
1779
1780static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
1781 u64 ino, u64 gen)
1782{
1783 struct name_cache_entry **ncea;
1784
1785 ncea = radix_tree_lookup(&sctx->name_cache, ino);
1786 if (!ncea)
1787 return NULL;
1788
1789 if (ncea[0] && ncea[0]->gen == gen)
1790 return ncea[0];
1791 else if (ncea[1] && ncea[1]->gen == gen)
1792 return ncea[1];
1793 return NULL;
1794}
1795
1796static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
1797{
1798 list_del(&nce->list);
1799 list_add_tail(&nce->list, &sctx->name_cache_list);
1800}
1801
1802static void name_cache_clean_unused(struct send_ctx *sctx)
1803{
1804 struct name_cache_entry *nce;
1805
1806 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
1807 return;
1808
1809 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
1810 nce = list_entry(sctx->name_cache_list.next,
1811 struct name_cache_entry, list);
1812 name_cache_delete(sctx, nce);
1813 kfree(nce);
1814 }
1815}
1816
1817static void name_cache_free(struct send_ctx *sctx)
1818{
1819 struct name_cache_entry *nce;
1820 struct name_cache_entry *tmp;
1821
1822 list_for_each_entry_safe(nce, tmp, &sctx->name_cache_list, list) {
1823 name_cache_delete(sctx, nce);
1824 }
1825}
1826
1827static int __get_cur_name_and_parent(struct send_ctx *sctx,
1828 u64 ino, u64 gen,
1829 u64 *parent_ino,
1830 u64 *parent_gen,
1831 struct fs_path *dest)
1832{
1833 int ret;
1834 int nce_ret;
1835 struct btrfs_path *path = NULL;
1836 struct name_cache_entry *nce = NULL;
1837
1838 nce = name_cache_search(sctx, ino, gen);
1839 if (nce) {
1840 if (ino < sctx->send_progress && nce->need_later_update) {
1841 name_cache_delete(sctx, nce);
1842 kfree(nce);
1843 nce = NULL;
1844 } else {
1845 name_cache_used(sctx, nce);
1846 *parent_ino = nce->parent_ino;
1847 *parent_gen = nce->parent_gen;
1848 ret = fs_path_add(dest, nce->name, nce->name_len);
1849 if (ret < 0)
1850 goto out;
1851 ret = nce->ret;
1852 goto out;
1853 }
1854 }
1855
1856 path = alloc_path_for_send();
1857 if (!path)
1858 return -ENOMEM;
1859
1860 ret = is_inode_existent(sctx, ino, gen);
1861 if (ret < 0)
1862 goto out;
1863
1864 if (!ret) {
1865 ret = gen_unique_name(sctx, ino, gen, dest);
1866 if (ret < 0)
1867 goto out;
1868 ret = 1;
1869 goto out_cache;
1870 }
1871
1872 if (ino < sctx->send_progress)
1873 ret = get_first_ref(sctx, sctx->send_root, ino,
1874 parent_ino, parent_gen, dest);
1875 else
1876 ret = get_first_ref(sctx, sctx->parent_root, ino,
1877 parent_ino, parent_gen, dest);
1878 if (ret < 0)
1879 goto out;
1880
1881 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
1882 dest->start, dest->end - dest->start);
1883 if (ret < 0)
1884 goto out;
1885 if (ret) {
1886 fs_path_reset(dest);
1887 ret = gen_unique_name(sctx, ino, gen, dest);
1888 if (ret < 0)
1889 goto out;
1890 ret = 1;
1891 }
1892
1893out_cache:
1894 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
1895 if (!nce) {
1896 ret = -ENOMEM;
1897 goto out;
1898 }
1899
1900 nce->ino = ino;
1901 nce->gen = gen;
1902 nce->parent_ino = *parent_ino;
1903 nce->parent_gen = *parent_gen;
1904 nce->name_len = fs_path_len(dest);
1905 nce->ret = ret;
1906 strcpy(nce->name, dest->start);
1907 memset(&nce->use_list, 0, sizeof(nce->use_list));
1908
1909 if (ino < sctx->send_progress)
1910 nce->need_later_update = 0;
1911 else
1912 nce->need_later_update = 1;
1913
1914 nce_ret = name_cache_insert(sctx, nce);
1915 if (nce_ret < 0)
1916 ret = nce_ret;
1917 name_cache_clean_unused(sctx);
1918
1919out:
1920 btrfs_free_path(path);
1921 return ret;
1922}
1923
1924/*
1925 * Magic happens here. This function returns the first ref to an inode as it
1926 * would look like while receiving the stream at this point in time.
1927 * We walk the path up to the root. For every inode in between, we check if it
1928 * was already processed/sent. If yes, we continue with the parent as found
1929 * in send_root. If not, we continue with the parent as found in parent_root.
1930 * If we encounter an inode that was deleted at this point in time, we use the
1931 * inodes "orphan" name instead of the real name and stop. Same with new inodes
1932 * that were not created yet and overwritten inodes/refs.
1933 *
1934 * When do we have have orphan inodes:
1935 * 1. When an inode is freshly created and thus no valid refs are available yet
1936 * 2. When a directory lost all it's refs (deleted) but still has dir items
1937 * inside which were not processed yet (pending for move/delete). If anyone
1938 * tried to get the path to the dir items, it would get a path inside that
1939 * orphan directory.
1940 * 3. When an inode is moved around or gets new links, it may overwrite the ref
1941 * of an unprocessed inode. If in that case the first ref would be
1942 * overwritten, the overwritten inode gets "orphanized". Later when we
1943 * process this overwritten inode, it is restored at a new place by moving
1944 * the orphan inode.
1945 *
1946 * sctx->send_progress tells this function at which point in time receiving
1947 * would be.
1948 */
1949static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
1950 struct fs_path *dest)
1951{
1952 int ret = 0;
1953 struct fs_path *name = NULL;
1954 u64 parent_inode = 0;
1955 u64 parent_gen = 0;
1956 int stop = 0;
1957
1958 name = fs_path_alloc(sctx);
1959 if (!name) {
1960 ret = -ENOMEM;
1961 goto out;
1962 }
1963
1964 dest->reversed = 1;
1965 fs_path_reset(dest);
1966
1967 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
1968 fs_path_reset(name);
1969
1970 ret = __get_cur_name_and_parent(sctx, ino, gen,
1971 &parent_inode, &parent_gen, name);
1972 if (ret < 0)
1973 goto out;
1974 if (ret)
1975 stop = 1;
1976
1977 ret = fs_path_add_path(dest, name);
1978 if (ret < 0)
1979 goto out;
1980
1981 ino = parent_inode;
1982 gen = parent_gen;
1983 }
1984
1985out:
1986 fs_path_free(sctx, name);
1987 if (!ret)
1988 fs_path_unreverse(dest);
1989 return ret;
1990}
1991
1992/*
1993 * Called for regular files when sending extents data. Opens a struct file
1994 * to read from the file.
1995 */
1996static int open_cur_inode_file(struct send_ctx *sctx)
1997{
1998 int ret = 0;
1999 struct btrfs_key key;
Linus Torvaldse2aed8d2012-07-26 14:48:55 -07002000 struct path path;
Alexander Block31db9f72012-07-25 23:19:24 +02002001 struct inode *inode;
2002 struct dentry *dentry;
2003 struct file *filp;
2004 int new = 0;
2005
2006 if (sctx->cur_inode_filp)
2007 goto out;
2008
2009 key.objectid = sctx->cur_ino;
2010 key.type = BTRFS_INODE_ITEM_KEY;
2011 key.offset = 0;
2012
2013 inode = btrfs_iget(sctx->send_root->fs_info->sb, &key, sctx->send_root,
2014 &new);
2015 if (IS_ERR(inode)) {
2016 ret = PTR_ERR(inode);
2017 goto out;
2018 }
2019
2020 dentry = d_obtain_alias(inode);
2021 inode = NULL;
2022 if (IS_ERR(dentry)) {
2023 ret = PTR_ERR(dentry);
2024 goto out;
2025 }
2026
Linus Torvaldse2aed8d2012-07-26 14:48:55 -07002027 path.mnt = sctx->mnt;
2028 path.dentry = dentry;
2029 filp = dentry_open(&path, O_RDONLY | O_LARGEFILE, current_cred());
2030 dput(dentry);
Alexander Block31db9f72012-07-25 23:19:24 +02002031 dentry = NULL;
Alexander Block31db9f72012-07-25 23:19:24 +02002032 if (IS_ERR(filp)) {
2033 ret = PTR_ERR(filp);
2034 goto out;
2035 }
2036 sctx->cur_inode_filp = filp;
2037
2038out:
2039 /*
2040 * no xxxput required here as every vfs op
2041 * does it by itself on failure
2042 */
2043 return ret;
2044}
2045
2046/*
2047 * Closes the struct file that was created in open_cur_inode_file
2048 */
2049static int close_cur_inode_file(struct send_ctx *sctx)
2050{
2051 int ret = 0;
2052
2053 if (!sctx->cur_inode_filp)
2054 goto out;
2055
2056 ret = filp_close(sctx->cur_inode_filp, NULL);
2057 sctx->cur_inode_filp = NULL;
2058
2059out:
2060 return ret;
2061}
2062
2063/*
2064 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2065 */
2066static int send_subvol_begin(struct send_ctx *sctx)
2067{
2068 int ret;
2069 struct btrfs_root *send_root = sctx->send_root;
2070 struct btrfs_root *parent_root = sctx->parent_root;
2071 struct btrfs_path *path;
2072 struct btrfs_key key;
2073 struct btrfs_root_ref *ref;
2074 struct extent_buffer *leaf;
2075 char *name = NULL;
2076 int namelen;
2077
2078 path = alloc_path_for_send();
2079 if (!path)
2080 return -ENOMEM;
2081
2082 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
2083 if (!name) {
2084 btrfs_free_path(path);
2085 return -ENOMEM;
2086 }
2087
2088 key.objectid = send_root->objectid;
2089 key.type = BTRFS_ROOT_BACKREF_KEY;
2090 key.offset = 0;
2091
2092 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2093 &key, path, 1, 0);
2094 if (ret < 0)
2095 goto out;
2096 if (ret) {
2097 ret = -ENOENT;
2098 goto out;
2099 }
2100
2101 leaf = path->nodes[0];
2102 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2103 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2104 key.objectid != send_root->objectid) {
2105 ret = -ENOENT;
2106 goto out;
2107 }
2108 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2109 namelen = btrfs_root_ref_name_len(leaf, ref);
2110 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2111 btrfs_release_path(path);
2112
2113 if (ret < 0)
2114 goto out;
2115
2116 if (parent_root) {
2117 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2118 if (ret < 0)
2119 goto out;
2120 } else {
2121 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2122 if (ret < 0)
2123 goto out;
2124 }
2125
2126 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2127 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2128 sctx->send_root->root_item.uuid);
2129 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2130 sctx->send_root->root_item.ctransid);
2131 if (parent_root) {
2132 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2133 sctx->parent_root->root_item.uuid);
2134 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2135 sctx->parent_root->root_item.ctransid);
2136 }
2137
2138 ret = send_cmd(sctx);
2139
2140tlv_put_failure:
2141out:
2142 btrfs_free_path(path);
2143 kfree(name);
2144 return ret;
2145}
2146
2147static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2148{
2149 int ret = 0;
2150 struct fs_path *p;
2151
2152verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
2153
2154 p = fs_path_alloc(sctx);
2155 if (!p)
2156 return -ENOMEM;
2157
2158 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2159 if (ret < 0)
2160 goto out;
2161
2162 ret = get_cur_path(sctx, ino, gen, p);
2163 if (ret < 0)
2164 goto out;
2165 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2166 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2167
2168 ret = send_cmd(sctx);
2169
2170tlv_put_failure:
2171out:
2172 fs_path_free(sctx, p);
2173 return ret;
2174}
2175
2176static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2177{
2178 int ret = 0;
2179 struct fs_path *p;
2180
2181verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
2182
2183 p = fs_path_alloc(sctx);
2184 if (!p)
2185 return -ENOMEM;
2186
2187 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2188 if (ret < 0)
2189 goto out;
2190
2191 ret = get_cur_path(sctx, ino, gen, p);
2192 if (ret < 0)
2193 goto out;
2194 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2195 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2196
2197 ret = send_cmd(sctx);
2198
2199tlv_put_failure:
2200out:
2201 fs_path_free(sctx, p);
2202 return ret;
2203}
2204
2205static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2206{
2207 int ret = 0;
2208 struct fs_path *p;
2209
2210verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
2211
2212 p = fs_path_alloc(sctx);
2213 if (!p)
2214 return -ENOMEM;
2215
2216 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2217 if (ret < 0)
2218 goto out;
2219
2220 ret = get_cur_path(sctx, ino, gen, p);
2221 if (ret < 0)
2222 goto out;
2223 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2224 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2225 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2226
2227 ret = send_cmd(sctx);
2228
2229tlv_put_failure:
2230out:
2231 fs_path_free(sctx, p);
2232 return ret;
2233}
2234
2235static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2236{
2237 int ret = 0;
2238 struct fs_path *p = NULL;
2239 struct btrfs_inode_item *ii;
2240 struct btrfs_path *path = NULL;
2241 struct extent_buffer *eb;
2242 struct btrfs_key key;
2243 int slot;
2244
2245verbose_printk("btrfs: send_utimes %llu\n", ino);
2246
2247 p = fs_path_alloc(sctx);
2248 if (!p)
2249 return -ENOMEM;
2250
2251 path = alloc_path_for_send();
2252 if (!path) {
2253 ret = -ENOMEM;
2254 goto out;
2255 }
2256
2257 key.objectid = ino;
2258 key.type = BTRFS_INODE_ITEM_KEY;
2259 key.offset = 0;
2260 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2261 if (ret < 0)
2262 goto out;
2263
2264 eb = path->nodes[0];
2265 slot = path->slots[0];
2266 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2267
2268 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2269 if (ret < 0)
2270 goto out;
2271
2272 ret = get_cur_path(sctx, ino, gen, p);
2273 if (ret < 0)
2274 goto out;
2275 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2276 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb,
2277 btrfs_inode_atime(ii));
2278 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb,
2279 btrfs_inode_mtime(ii));
2280 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
2281 btrfs_inode_ctime(ii));
2282 /* TODO otime? */
2283
2284 ret = send_cmd(sctx);
2285
2286tlv_put_failure:
2287out:
2288 fs_path_free(sctx, p);
2289 btrfs_free_path(path);
2290 return ret;
2291}
2292
2293/*
2294 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2295 * a valid path yet because we did not process the refs yet. So, the inode
2296 * is created as orphan.
2297 */
Alexander Block1f4692d2012-07-28 10:42:24 +02002298static int send_create_inode(struct send_ctx *sctx, u64 ino)
Alexander Block31db9f72012-07-25 23:19:24 +02002299{
2300 int ret = 0;
Alexander Block31db9f72012-07-25 23:19:24 +02002301 struct fs_path *p;
Alexander Block31db9f72012-07-25 23:19:24 +02002302 int cmd;
Alexander Block1f4692d2012-07-28 10:42:24 +02002303 u64 gen;
Alexander Block31db9f72012-07-25 23:19:24 +02002304 u64 mode;
Alexander Block1f4692d2012-07-28 10:42:24 +02002305 u64 rdev;
Alexander Block31db9f72012-07-25 23:19:24 +02002306
Alexander Block1f4692d2012-07-28 10:42:24 +02002307verbose_printk("btrfs: send_create_inode %llu\n", ino);
Alexander Block31db9f72012-07-25 23:19:24 +02002308
2309 p = fs_path_alloc(sctx);
2310 if (!p)
2311 return -ENOMEM;
2312
Alexander Block1f4692d2012-07-28 10:42:24 +02002313 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL,
2314 NULL, &rdev);
2315 if (ret < 0)
2316 goto out;
Alexander Block31db9f72012-07-25 23:19:24 +02002317
2318 if (S_ISREG(mode))
2319 cmd = BTRFS_SEND_C_MKFILE;
2320 else if (S_ISDIR(mode))
2321 cmd = BTRFS_SEND_C_MKDIR;
2322 else if (S_ISLNK(mode))
2323 cmd = BTRFS_SEND_C_SYMLINK;
2324 else if (S_ISCHR(mode) || S_ISBLK(mode))
2325 cmd = BTRFS_SEND_C_MKNOD;
2326 else if (S_ISFIFO(mode))
2327 cmd = BTRFS_SEND_C_MKFIFO;
2328 else if (S_ISSOCK(mode))
2329 cmd = BTRFS_SEND_C_MKSOCK;
2330 else {
2331 printk(KERN_WARNING "btrfs: unexpected inode type %o",
2332 (int)(mode & S_IFMT));
2333 ret = -ENOTSUPP;
2334 goto out;
2335 }
2336
2337 ret = begin_cmd(sctx, cmd);
2338 if (ret < 0)
2339 goto out;
2340
Alexander Block1f4692d2012-07-28 10:42:24 +02002341 ret = gen_unique_name(sctx, ino, gen, p);
Alexander Block31db9f72012-07-25 23:19:24 +02002342 if (ret < 0)
2343 goto out;
2344
2345 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
Alexander Block1f4692d2012-07-28 10:42:24 +02002346 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
Alexander Block31db9f72012-07-25 23:19:24 +02002347
2348 if (S_ISLNK(mode)) {
2349 fs_path_reset(p);
Alexander Block1f4692d2012-07-28 10:42:24 +02002350 ret = read_symlink(sctx, sctx->send_root, ino, p);
Alexander Block31db9f72012-07-25 23:19:24 +02002351 if (ret < 0)
2352 goto out;
2353 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2354 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2355 S_ISFIFO(mode) || S_ISSOCK(mode)) {
Alexander Block1f4692d2012-07-28 10:42:24 +02002356 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, rdev);
Alexander Block31db9f72012-07-25 23:19:24 +02002357 }
2358
2359 ret = send_cmd(sctx);
2360 if (ret < 0)
2361 goto out;
2362
2363
2364tlv_put_failure:
2365out:
2366 fs_path_free(sctx, p);
2367 return ret;
2368}
2369
Alexander Block1f4692d2012-07-28 10:42:24 +02002370/*
2371 * We need some special handling for inodes that get processed before the parent
2372 * directory got created. See process_recorded_refs for details.
2373 * This function does the check if we already created the dir out of order.
2374 */
2375static int did_create_dir(struct send_ctx *sctx, u64 dir)
2376{
2377 int ret = 0;
2378 struct btrfs_path *path = NULL;
2379 struct btrfs_key key;
2380 struct btrfs_key found_key;
2381 struct btrfs_key di_key;
2382 struct extent_buffer *eb;
2383 struct btrfs_dir_item *di;
2384 int slot;
2385
2386 path = alloc_path_for_send();
2387 if (!path) {
2388 ret = -ENOMEM;
2389 goto out;
2390 }
2391
2392 key.objectid = dir;
2393 key.type = BTRFS_DIR_INDEX_KEY;
2394 key.offset = 0;
2395 while (1) {
2396 ret = btrfs_search_slot_for_read(sctx->send_root, &key, path,
2397 1, 0);
2398 if (ret < 0)
2399 goto out;
2400 if (!ret) {
2401 eb = path->nodes[0];
2402 slot = path->slots[0];
2403 btrfs_item_key_to_cpu(eb, &found_key, slot);
2404 }
2405 if (ret || found_key.objectid != key.objectid ||
2406 found_key.type != key.type) {
2407 ret = 0;
2408 goto out;
2409 }
2410
2411 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2412 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2413
2414 if (di_key.objectid < sctx->send_progress) {
2415 ret = 1;
2416 goto out;
2417 }
2418
2419 key.offset = found_key.offset + 1;
2420 btrfs_release_path(path);
2421 }
2422
2423out:
2424 btrfs_free_path(path);
2425 return ret;
2426}
2427
2428/*
2429 * Only creates the inode if it is:
2430 * 1. Not a directory
2431 * 2. Or a directory which was not created already due to out of order
2432 * directories. See did_create_dir and process_recorded_refs for details.
2433 */
2434static int send_create_inode_if_needed(struct send_ctx *sctx)
2435{
2436 int ret;
2437
2438 if (S_ISDIR(sctx->cur_inode_mode)) {
2439 ret = did_create_dir(sctx, sctx->cur_ino);
2440 if (ret < 0)
2441 goto out;
2442 if (ret) {
2443 ret = 0;
2444 goto out;
2445 }
2446 }
2447
2448 ret = send_create_inode(sctx, sctx->cur_ino);
2449 if (ret < 0)
2450 goto out;
2451
2452out:
2453 return ret;
2454}
2455
Alexander Block31db9f72012-07-25 23:19:24 +02002456struct recorded_ref {
2457 struct list_head list;
2458 char *dir_path;
2459 char *name;
2460 struct fs_path *full_path;
2461 u64 dir;
2462 u64 dir_gen;
2463 int dir_path_len;
2464 int name_len;
2465};
2466
2467/*
2468 * We need to process new refs before deleted refs, but compare_tree gives us
2469 * everything mixed. So we first record all refs and later process them.
2470 * This function is a helper to record one ref.
2471 */
2472static int record_ref(struct list_head *head, u64 dir,
2473 u64 dir_gen, struct fs_path *path)
2474{
2475 struct recorded_ref *ref;
2476 char *tmp;
2477
2478 ref = kmalloc(sizeof(*ref), GFP_NOFS);
2479 if (!ref)
2480 return -ENOMEM;
2481
2482 ref->dir = dir;
2483 ref->dir_gen = dir_gen;
2484 ref->full_path = path;
2485
2486 tmp = strrchr(ref->full_path->start, '/');
2487 if (!tmp) {
2488 ref->name_len = ref->full_path->end - ref->full_path->start;
2489 ref->name = ref->full_path->start;
2490 ref->dir_path_len = 0;
2491 ref->dir_path = ref->full_path->start;
2492 } else {
2493 tmp++;
2494 ref->name_len = ref->full_path->end - tmp;
2495 ref->name = tmp;
2496 ref->dir_path = ref->full_path->start;
2497 ref->dir_path_len = ref->full_path->end -
2498 ref->full_path->start - 1 - ref->name_len;
2499 }
2500
2501 list_add_tail(&ref->list, head);
2502 return 0;
2503}
2504
2505static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head)
2506{
2507 struct recorded_ref *cur;
2508 struct recorded_ref *tmp;
2509
2510 list_for_each_entry_safe(cur, tmp, head, list) {
2511 fs_path_free(sctx, cur->full_path);
2512 kfree(cur);
2513 }
2514 INIT_LIST_HEAD(head);
2515}
2516
2517static void free_recorded_refs(struct send_ctx *sctx)
2518{
2519 __free_recorded_refs(sctx, &sctx->new_refs);
2520 __free_recorded_refs(sctx, &sctx->deleted_refs);
2521}
2522
2523/*
2524 * Renames/moves a file/dir to it's orphan name. Used when the first
2525 * ref of an unprocessed inode gets overwritten and for all non empty
2526 * directories.
2527 */
2528static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2529 struct fs_path *path)
2530{
2531 int ret;
2532 struct fs_path *orphan;
2533
2534 orphan = fs_path_alloc(sctx);
2535 if (!orphan)
2536 return -ENOMEM;
2537
2538 ret = gen_unique_name(sctx, ino, gen, orphan);
2539 if (ret < 0)
2540 goto out;
2541
2542 ret = send_rename(sctx, path, orphan);
2543
2544out:
2545 fs_path_free(sctx, orphan);
2546 return ret;
2547}
2548
2549/*
2550 * Returns 1 if a directory can be removed at this point in time.
2551 * We check this by iterating all dir items and checking if the inode behind
2552 * the dir item was already processed.
2553 */
2554static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress)
2555{
2556 int ret = 0;
2557 struct btrfs_root *root = sctx->parent_root;
2558 struct btrfs_path *path;
2559 struct btrfs_key key;
2560 struct btrfs_key found_key;
2561 struct btrfs_key loc;
2562 struct btrfs_dir_item *di;
2563
2564 path = alloc_path_for_send();
2565 if (!path)
2566 return -ENOMEM;
2567
2568 key.objectid = dir;
2569 key.type = BTRFS_DIR_INDEX_KEY;
2570 key.offset = 0;
2571
2572 while (1) {
2573 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
2574 if (ret < 0)
2575 goto out;
2576 if (!ret) {
2577 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2578 path->slots[0]);
2579 }
2580 if (ret || found_key.objectid != key.objectid ||
2581 found_key.type != key.type) {
2582 break;
2583 }
2584
2585 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2586 struct btrfs_dir_item);
2587 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2588
2589 if (loc.objectid > send_progress) {
2590 ret = 0;
2591 goto out;
2592 }
2593
2594 btrfs_release_path(path);
2595 key.offset = found_key.offset + 1;
2596 }
2597
2598 ret = 1;
2599
2600out:
2601 btrfs_free_path(path);
2602 return ret;
2603}
2604
Alexander Block31db9f72012-07-25 23:19:24 +02002605/*
2606 * This does all the move/link/unlink/rmdir magic.
2607 */
2608static int process_recorded_refs(struct send_ctx *sctx)
2609{
2610 int ret = 0;
2611 struct recorded_ref *cur;
Alexander Block1f4692d2012-07-28 10:42:24 +02002612 struct recorded_ref *cur2;
Alexander Block31db9f72012-07-25 23:19:24 +02002613 struct ulist *check_dirs = NULL;
2614 struct ulist_iterator uit;
2615 struct ulist_node *un;
2616 struct fs_path *valid_path = NULL;
Chris Masonb24baf62012-07-25 19:21:10 -04002617 u64 ow_inode = 0;
Alexander Block31db9f72012-07-25 23:19:24 +02002618 u64 ow_gen;
2619 int did_overwrite = 0;
2620 int is_orphan = 0;
2621
2622verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
2623
2624 valid_path = fs_path_alloc(sctx);
2625 if (!valid_path) {
2626 ret = -ENOMEM;
2627 goto out;
2628 }
2629
2630 check_dirs = ulist_alloc(GFP_NOFS);
2631 if (!check_dirs) {
2632 ret = -ENOMEM;
2633 goto out;
2634 }
2635
2636 /*
2637 * First, check if the first ref of the current inode was overwritten
2638 * before. If yes, we know that the current inode was already orphanized
2639 * and thus use the orphan name. If not, we can use get_cur_path to
2640 * get the path of the first ref as it would like while receiving at
2641 * this point in time.
2642 * New inodes are always orphan at the beginning, so force to use the
2643 * orphan name in this case.
2644 * The first ref is stored in valid_path and will be updated if it
2645 * gets moved around.
2646 */
2647 if (!sctx->cur_inode_new) {
2648 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
2649 sctx->cur_inode_gen);
2650 if (ret < 0)
2651 goto out;
2652 if (ret)
2653 did_overwrite = 1;
2654 }
2655 if (sctx->cur_inode_new || did_overwrite) {
2656 ret = gen_unique_name(sctx, sctx->cur_ino,
2657 sctx->cur_inode_gen, valid_path);
2658 if (ret < 0)
2659 goto out;
2660 is_orphan = 1;
2661 } else {
2662 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
2663 valid_path);
2664 if (ret < 0)
2665 goto out;
2666 }
2667
2668 list_for_each_entry(cur, &sctx->new_refs, list) {
2669 /*
Alexander Block1f4692d2012-07-28 10:42:24 +02002670 * We may have refs where the parent directory does not exist
2671 * yet. This happens if the parent directories inum is higher
2672 * the the current inum. To handle this case, we create the
2673 * parent directory out of order. But we need to check if this
2674 * did already happen before due to other refs in the same dir.
2675 */
2676 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
2677 if (ret < 0)
2678 goto out;
2679 if (ret == inode_state_will_create) {
2680 ret = 0;
2681 /*
2682 * First check if any of the current inodes refs did
2683 * already create the dir.
2684 */
2685 list_for_each_entry(cur2, &sctx->new_refs, list) {
2686 if (cur == cur2)
2687 break;
2688 if (cur2->dir == cur->dir) {
2689 ret = 1;
2690 break;
2691 }
2692 }
2693
2694 /*
2695 * If that did not happen, check if a previous inode
2696 * did already create the dir.
2697 */
2698 if (!ret)
2699 ret = did_create_dir(sctx, cur->dir);
2700 if (ret < 0)
2701 goto out;
2702 if (!ret) {
2703 ret = send_create_inode(sctx, cur->dir);
2704 if (ret < 0)
2705 goto out;
2706 }
2707 }
2708
2709 /*
Alexander Block31db9f72012-07-25 23:19:24 +02002710 * Check if this new ref would overwrite the first ref of
2711 * another unprocessed inode. If yes, orphanize the
2712 * overwritten inode. If we find an overwritten ref that is
2713 * not the first ref, simply unlink it.
2714 */
2715 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
2716 cur->name, cur->name_len,
2717 &ow_inode, &ow_gen);
2718 if (ret < 0)
2719 goto out;
2720 if (ret) {
2721 ret = is_first_ref(sctx, sctx->parent_root,
2722 ow_inode, cur->dir, cur->name,
2723 cur->name_len);
2724 if (ret < 0)
2725 goto out;
2726 if (ret) {
2727 ret = orphanize_inode(sctx, ow_inode, ow_gen,
2728 cur->full_path);
2729 if (ret < 0)
2730 goto out;
2731 } else {
2732 ret = send_unlink(sctx, cur->full_path);
2733 if (ret < 0)
2734 goto out;
2735 }
2736 }
2737
2738 /*
2739 * link/move the ref to the new place. If we have an orphan
2740 * inode, move it and update valid_path. If not, link or move
2741 * it depending on the inode mode.
2742 */
Alexander Block1f4692d2012-07-28 10:42:24 +02002743 if (is_orphan) {
Alexander Block31db9f72012-07-25 23:19:24 +02002744 ret = send_rename(sctx, valid_path, cur->full_path);
2745 if (ret < 0)
2746 goto out;
2747 is_orphan = 0;
2748 ret = fs_path_copy(valid_path, cur->full_path);
2749 if (ret < 0)
2750 goto out;
2751 } else {
2752 if (S_ISDIR(sctx->cur_inode_mode)) {
2753 /*
2754 * Dirs can't be linked, so move it. For moved
2755 * dirs, we always have one new and one deleted
2756 * ref. The deleted ref is ignored later.
2757 */
2758 ret = send_rename(sctx, valid_path,
2759 cur->full_path);
2760 if (ret < 0)
2761 goto out;
2762 ret = fs_path_copy(valid_path, cur->full_path);
2763 if (ret < 0)
2764 goto out;
2765 } else {
2766 ret = send_link(sctx, cur->full_path,
2767 valid_path);
2768 if (ret < 0)
2769 goto out;
2770 }
2771 }
2772 ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
2773 GFP_NOFS);
2774 if (ret < 0)
2775 goto out;
2776 }
2777
2778 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
2779 /*
2780 * Check if we can already rmdir the directory. If not,
2781 * orphanize it. For every dir item inside that gets deleted
2782 * later, we do this check again and rmdir it then if possible.
2783 * See the use of check_dirs for more details.
2784 */
2785 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino);
2786 if (ret < 0)
2787 goto out;
2788 if (ret) {
2789 ret = send_rmdir(sctx, valid_path);
2790 if (ret < 0)
2791 goto out;
2792 } else if (!is_orphan) {
2793 ret = orphanize_inode(sctx, sctx->cur_ino,
2794 sctx->cur_inode_gen, valid_path);
2795 if (ret < 0)
2796 goto out;
2797 is_orphan = 1;
2798 }
2799
2800 list_for_each_entry(cur, &sctx->deleted_refs, list) {
2801 ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
2802 GFP_NOFS);
2803 if (ret < 0)
2804 goto out;
2805 }
2806 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
2807 /*
2808 * We have a non dir inode. Go through all deleted refs and
2809 * unlink them if they were not already overwritten by other
2810 * inodes.
2811 */
2812 list_for_each_entry(cur, &sctx->deleted_refs, list) {
2813 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
2814 sctx->cur_ino, sctx->cur_inode_gen,
2815 cur->name, cur->name_len);
2816 if (ret < 0)
2817 goto out;
2818 if (!ret) {
Alexander Block1f4692d2012-07-28 10:42:24 +02002819 ret = send_unlink(sctx, cur->full_path);
2820 if (ret < 0)
2821 goto out;
Alexander Block31db9f72012-07-25 23:19:24 +02002822 }
2823 ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
2824 GFP_NOFS);
2825 if (ret < 0)
2826 goto out;
2827 }
2828
2829 /*
2830 * If the inode is still orphan, unlink the orphan. This may
2831 * happen when a previous inode did overwrite the first ref
2832 * of this inode and no new refs were added for the current
2833 * inode.
Alexander Block31db9f72012-07-25 23:19:24 +02002834 */
Alexander Block1f4692d2012-07-28 10:42:24 +02002835 if (is_orphan) {
Alexander Block31db9f72012-07-25 23:19:24 +02002836 ret = send_unlink(sctx, valid_path);
2837 if (ret < 0)
2838 goto out;
2839 }
2840 }
2841
2842 /*
2843 * We did collect all parent dirs where cur_inode was once located. We
2844 * now go through all these dirs and check if they are pending for
2845 * deletion and if it's finally possible to perform the rmdir now.
2846 * We also update the inode stats of the parent dirs here.
2847 */
2848 ULIST_ITER_INIT(&uit);
2849 while ((un = ulist_next(check_dirs, &uit))) {
2850 if (un->val > sctx->cur_ino)
2851 continue;
2852
2853 ret = get_cur_inode_state(sctx, un->val, un->aux);
2854 if (ret < 0)
2855 goto out;
2856
2857 if (ret == inode_state_did_create ||
2858 ret == inode_state_no_change) {
2859 /* TODO delayed utimes */
2860 ret = send_utimes(sctx, un->val, un->aux);
2861 if (ret < 0)
2862 goto out;
2863 } else if (ret == inode_state_did_delete) {
2864 ret = can_rmdir(sctx, un->val, sctx->cur_ino);
2865 if (ret < 0)
2866 goto out;
2867 if (ret) {
2868 ret = get_cur_path(sctx, un->val, un->aux,
2869 valid_path);
2870 if (ret < 0)
2871 goto out;
2872 ret = send_rmdir(sctx, valid_path);
2873 if (ret < 0)
2874 goto out;
2875 }
2876 }
2877 }
2878
2879 /*
2880 * Current inode is now at it's new position, so we must increase
2881 * send_progress
2882 */
2883 sctx->send_progress = sctx->cur_ino + 1;
2884
Alexander Block31db9f72012-07-25 23:19:24 +02002885 ret = 0;
2886
2887out:
2888 free_recorded_refs(sctx);
2889 ulist_free(check_dirs);
2890 fs_path_free(sctx, valid_path);
2891 return ret;
2892}
2893
2894static int __record_new_ref(int num, u64 dir, int index,
2895 struct fs_path *name,
2896 void *ctx)
2897{
2898 int ret = 0;
2899 struct send_ctx *sctx = ctx;
2900 struct fs_path *p;
2901 u64 gen;
2902
2903 p = fs_path_alloc(sctx);
2904 if (!p)
2905 return -ENOMEM;
2906
2907 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL,
Alexander Block85a7b332012-07-26 23:39:10 +02002908 NULL, NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02002909 if (ret < 0)
2910 goto out;
2911
Alexander Block31db9f72012-07-25 23:19:24 +02002912 ret = get_cur_path(sctx, dir, gen, p);
2913 if (ret < 0)
2914 goto out;
2915 ret = fs_path_add_path(p, name);
2916 if (ret < 0)
2917 goto out;
2918
2919 ret = record_ref(&sctx->new_refs, dir, gen, p);
2920
2921out:
2922 if (ret)
2923 fs_path_free(sctx, p);
2924 return ret;
2925}
2926
2927static int __record_deleted_ref(int num, u64 dir, int index,
2928 struct fs_path *name,
2929 void *ctx)
2930{
2931 int ret = 0;
2932 struct send_ctx *sctx = ctx;
2933 struct fs_path *p;
2934 u64 gen;
2935
2936 p = fs_path_alloc(sctx);
2937 if (!p)
2938 return -ENOMEM;
2939
2940 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL,
Alexander Block85a7b332012-07-26 23:39:10 +02002941 NULL, NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02002942 if (ret < 0)
2943 goto out;
2944
2945 ret = get_cur_path(sctx, dir, gen, p);
2946 if (ret < 0)
2947 goto out;
2948 ret = fs_path_add_path(p, name);
2949 if (ret < 0)
2950 goto out;
2951
2952 ret = record_ref(&sctx->deleted_refs, dir, gen, p);
2953
2954out:
2955 if (ret)
2956 fs_path_free(sctx, p);
2957 return ret;
2958}
2959
2960static int record_new_ref(struct send_ctx *sctx)
2961{
2962 int ret;
2963
2964 ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path,
2965 sctx->cmp_key, 0, __record_new_ref, sctx);
2966 if (ret < 0)
2967 goto out;
2968 ret = 0;
2969
2970out:
2971 return ret;
2972}
2973
2974static int record_deleted_ref(struct send_ctx *sctx)
2975{
2976 int ret;
2977
2978 ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path,
2979 sctx->cmp_key, 0, __record_deleted_ref, sctx);
2980 if (ret < 0)
2981 goto out;
2982 ret = 0;
2983
2984out:
2985 return ret;
2986}
2987
2988struct find_ref_ctx {
2989 u64 dir;
2990 struct fs_path *name;
2991 int found_idx;
2992};
2993
2994static int __find_iref(int num, u64 dir, int index,
2995 struct fs_path *name,
2996 void *ctx_)
2997{
2998 struct find_ref_ctx *ctx = ctx_;
2999
3000 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
3001 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
3002 ctx->found_idx = num;
3003 return 1;
3004 }
3005 return 0;
3006}
3007
3008static int find_iref(struct send_ctx *sctx,
3009 struct btrfs_root *root,
3010 struct btrfs_path *path,
3011 struct btrfs_key *key,
3012 u64 dir, struct fs_path *name)
3013{
3014 int ret;
3015 struct find_ref_ctx ctx;
3016
3017 ctx.dir = dir;
3018 ctx.name = name;
3019 ctx.found_idx = -1;
3020
3021 ret = iterate_inode_ref(sctx, root, path, key, 0, __find_iref, &ctx);
3022 if (ret < 0)
3023 return ret;
3024
3025 if (ctx.found_idx == -1)
3026 return -ENOENT;
3027
3028 return ctx.found_idx;
3029}
3030
3031static int __record_changed_new_ref(int num, u64 dir, int index,
3032 struct fs_path *name,
3033 void *ctx)
3034{
3035 int ret;
3036 struct send_ctx *sctx = ctx;
3037
3038 ret = find_iref(sctx, sctx->parent_root, sctx->right_path,
3039 sctx->cmp_key, dir, name);
3040 if (ret == -ENOENT)
3041 ret = __record_new_ref(num, dir, index, name, sctx);
3042 else if (ret > 0)
3043 ret = 0;
3044
3045 return ret;
3046}
3047
3048static int __record_changed_deleted_ref(int num, u64 dir, int index,
3049 struct fs_path *name,
3050 void *ctx)
3051{
3052 int ret;
3053 struct send_ctx *sctx = ctx;
3054
3055 ret = find_iref(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key,
3056 dir, name);
3057 if (ret == -ENOENT)
3058 ret = __record_deleted_ref(num, dir, index, name, sctx);
3059 else if (ret > 0)
3060 ret = 0;
3061
3062 return ret;
3063}
3064
3065static int record_changed_ref(struct send_ctx *sctx)
3066{
3067 int ret = 0;
3068
3069 ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path,
3070 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
3071 if (ret < 0)
3072 goto out;
3073 ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path,
3074 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
3075 if (ret < 0)
3076 goto out;
3077 ret = 0;
3078
3079out:
3080 return ret;
3081}
3082
3083/*
3084 * Record and process all refs at once. Needed when an inode changes the
3085 * generation number, which means that it was deleted and recreated.
3086 */
3087static int process_all_refs(struct send_ctx *sctx,
3088 enum btrfs_compare_tree_result cmd)
3089{
3090 int ret;
3091 struct btrfs_root *root;
3092 struct btrfs_path *path;
3093 struct btrfs_key key;
3094 struct btrfs_key found_key;
3095 struct extent_buffer *eb;
3096 int slot;
3097 iterate_inode_ref_t cb;
3098
3099 path = alloc_path_for_send();
3100 if (!path)
3101 return -ENOMEM;
3102
3103 if (cmd == BTRFS_COMPARE_TREE_NEW) {
3104 root = sctx->send_root;
3105 cb = __record_new_ref;
3106 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
3107 root = sctx->parent_root;
3108 cb = __record_deleted_ref;
3109 } else {
3110 BUG();
3111 }
3112
3113 key.objectid = sctx->cmp_key->objectid;
3114 key.type = BTRFS_INODE_REF_KEY;
3115 key.offset = 0;
3116 while (1) {
3117 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
3118 if (ret < 0) {
3119 btrfs_release_path(path);
3120 goto out;
3121 }
3122 if (ret) {
3123 btrfs_release_path(path);
3124 break;
3125 }
3126
3127 eb = path->nodes[0];
3128 slot = path->slots[0];
3129 btrfs_item_key_to_cpu(eb, &found_key, slot);
3130
3131 if (found_key.objectid != key.objectid ||
3132 found_key.type != key.type) {
3133 btrfs_release_path(path);
3134 break;
3135 }
3136
3137 ret = iterate_inode_ref(sctx, sctx->parent_root, path,
3138 &found_key, 0, cb, sctx);
3139 btrfs_release_path(path);
3140 if (ret < 0)
3141 goto out;
3142
3143 key.offset = found_key.offset + 1;
3144 }
3145
3146 ret = process_recorded_refs(sctx);
3147
3148out:
3149 btrfs_free_path(path);
3150 return ret;
3151}
3152
3153static int send_set_xattr(struct send_ctx *sctx,
3154 struct fs_path *path,
3155 const char *name, int name_len,
3156 const char *data, int data_len)
3157{
3158 int ret = 0;
3159
3160 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
3161 if (ret < 0)
3162 goto out;
3163
3164 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3165 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3166 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
3167
3168 ret = send_cmd(sctx);
3169
3170tlv_put_failure:
3171out:
3172 return ret;
3173}
3174
3175static int send_remove_xattr(struct send_ctx *sctx,
3176 struct fs_path *path,
3177 const char *name, int name_len)
3178{
3179 int ret = 0;
3180
3181 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
3182 if (ret < 0)
3183 goto out;
3184
3185 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3186 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3187
3188 ret = send_cmd(sctx);
3189
3190tlv_put_failure:
3191out:
3192 return ret;
3193}
3194
3195static int __process_new_xattr(int num, struct btrfs_key *di_key,
3196 const char *name, int name_len,
3197 const char *data, int data_len,
3198 u8 type, void *ctx)
3199{
3200 int ret;
3201 struct send_ctx *sctx = ctx;
3202 struct fs_path *p;
3203 posix_acl_xattr_header dummy_acl;
3204
3205 p = fs_path_alloc(sctx);
3206 if (!p)
3207 return -ENOMEM;
3208
3209 /*
3210 * This hack is needed because empty acl's are stored as zero byte
3211 * data in xattrs. Problem with that is, that receiving these zero byte
3212 * acl's will fail later. To fix this, we send a dummy acl list that
3213 * only contains the version number and no entries.
3214 */
3215 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
3216 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
3217 if (data_len == 0) {
3218 dummy_acl.a_version =
3219 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
3220 data = (char *)&dummy_acl;
3221 data_len = sizeof(dummy_acl);
3222 }
3223 }
3224
3225 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3226 if (ret < 0)
3227 goto out;
3228
3229 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
3230
3231out:
3232 fs_path_free(sctx, p);
3233 return ret;
3234}
3235
3236static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
3237 const char *name, int name_len,
3238 const char *data, int data_len,
3239 u8 type, void *ctx)
3240{
3241 int ret;
3242 struct send_ctx *sctx = ctx;
3243 struct fs_path *p;
3244
3245 p = fs_path_alloc(sctx);
3246 if (!p)
3247 return -ENOMEM;
3248
3249 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3250 if (ret < 0)
3251 goto out;
3252
3253 ret = send_remove_xattr(sctx, p, name, name_len);
3254
3255out:
3256 fs_path_free(sctx, p);
3257 return ret;
3258}
3259
3260static int process_new_xattr(struct send_ctx *sctx)
3261{
3262 int ret = 0;
3263
3264 ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path,
3265 sctx->cmp_key, __process_new_xattr, sctx);
3266
3267 return ret;
3268}
3269
3270static int process_deleted_xattr(struct send_ctx *sctx)
3271{
3272 int ret;
3273
3274 ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path,
3275 sctx->cmp_key, __process_deleted_xattr, sctx);
3276
3277 return ret;
3278}
3279
3280struct find_xattr_ctx {
3281 const char *name;
3282 int name_len;
3283 int found_idx;
3284 char *found_data;
3285 int found_data_len;
3286};
3287
3288static int __find_xattr(int num, struct btrfs_key *di_key,
3289 const char *name, int name_len,
3290 const char *data, int data_len,
3291 u8 type, void *vctx)
3292{
3293 struct find_xattr_ctx *ctx = vctx;
3294
3295 if (name_len == ctx->name_len &&
3296 strncmp(name, ctx->name, name_len) == 0) {
3297 ctx->found_idx = num;
3298 ctx->found_data_len = data_len;
3299 ctx->found_data = kmalloc(data_len, GFP_NOFS);
3300 if (!ctx->found_data)
3301 return -ENOMEM;
3302 memcpy(ctx->found_data, data, data_len);
3303 return 1;
3304 }
3305 return 0;
3306}
3307
3308static int find_xattr(struct send_ctx *sctx,
3309 struct btrfs_root *root,
3310 struct btrfs_path *path,
3311 struct btrfs_key *key,
3312 const char *name, int name_len,
3313 char **data, int *data_len)
3314{
3315 int ret;
3316 struct find_xattr_ctx ctx;
3317
3318 ctx.name = name;
3319 ctx.name_len = name_len;
3320 ctx.found_idx = -1;
3321 ctx.found_data = NULL;
3322 ctx.found_data_len = 0;
3323
3324 ret = iterate_dir_item(sctx, root, path, key, __find_xattr, &ctx);
3325 if (ret < 0)
3326 return ret;
3327
3328 if (ctx.found_idx == -1)
3329 return -ENOENT;
3330 if (data) {
3331 *data = ctx.found_data;
3332 *data_len = ctx.found_data_len;
3333 } else {
3334 kfree(ctx.found_data);
3335 }
3336 return ctx.found_idx;
3337}
3338
3339
3340static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
3341 const char *name, int name_len,
3342 const char *data, int data_len,
3343 u8 type, void *ctx)
3344{
3345 int ret;
3346 struct send_ctx *sctx = ctx;
3347 char *found_data = NULL;
3348 int found_data_len = 0;
3349 struct fs_path *p = NULL;
3350
3351 ret = find_xattr(sctx, sctx->parent_root, sctx->right_path,
3352 sctx->cmp_key, name, name_len, &found_data,
3353 &found_data_len);
3354 if (ret == -ENOENT) {
3355 ret = __process_new_xattr(num, di_key, name, name_len, data,
3356 data_len, type, ctx);
3357 } else if (ret >= 0) {
3358 if (data_len != found_data_len ||
3359 memcmp(data, found_data, data_len)) {
3360 ret = __process_new_xattr(num, di_key, name, name_len,
3361 data, data_len, type, ctx);
3362 } else {
3363 ret = 0;
3364 }
3365 }
3366
3367 kfree(found_data);
3368 fs_path_free(sctx, p);
3369 return ret;
3370}
3371
3372static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
3373 const char *name, int name_len,
3374 const char *data, int data_len,
3375 u8 type, void *ctx)
3376{
3377 int ret;
3378 struct send_ctx *sctx = ctx;
3379
3380 ret = find_xattr(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key,
3381 name, name_len, NULL, NULL);
3382 if (ret == -ENOENT)
3383 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
3384 data_len, type, ctx);
3385 else if (ret >= 0)
3386 ret = 0;
3387
3388 return ret;
3389}
3390
3391static int process_changed_xattr(struct send_ctx *sctx)
3392{
3393 int ret = 0;
3394
3395 ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path,
3396 sctx->cmp_key, __process_changed_new_xattr, sctx);
3397 if (ret < 0)
3398 goto out;
3399 ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path,
3400 sctx->cmp_key, __process_changed_deleted_xattr, sctx);
3401
3402out:
3403 return ret;
3404}
3405
3406static int process_all_new_xattrs(struct send_ctx *sctx)
3407{
3408 int ret;
3409 struct btrfs_root *root;
3410 struct btrfs_path *path;
3411 struct btrfs_key key;
3412 struct btrfs_key found_key;
3413 struct extent_buffer *eb;
3414 int slot;
3415
3416 path = alloc_path_for_send();
3417 if (!path)
3418 return -ENOMEM;
3419
3420 root = sctx->send_root;
3421
3422 key.objectid = sctx->cmp_key->objectid;
3423 key.type = BTRFS_XATTR_ITEM_KEY;
3424 key.offset = 0;
3425 while (1) {
3426 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
3427 if (ret < 0)
3428 goto out;
3429 if (ret) {
3430 ret = 0;
3431 goto out;
3432 }
3433
3434 eb = path->nodes[0];
3435 slot = path->slots[0];
3436 btrfs_item_key_to_cpu(eb, &found_key, slot);
3437
3438 if (found_key.objectid != key.objectid ||
3439 found_key.type != key.type) {
3440 ret = 0;
3441 goto out;
3442 }
3443
3444 ret = iterate_dir_item(sctx, root, path, &found_key,
3445 __process_new_xattr, sctx);
3446 if (ret < 0)
3447 goto out;
3448
3449 btrfs_release_path(path);
3450 key.offset = found_key.offset + 1;
3451 }
3452
3453out:
3454 btrfs_free_path(path);
3455 return ret;
3456}
3457
3458/*
3459 * Read some bytes from the current inode/file and send a write command to
3460 * user space.
3461 */
3462static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
3463{
3464 int ret = 0;
3465 struct fs_path *p;
3466 loff_t pos = offset;
Chris Masonb24baf62012-07-25 19:21:10 -04003467 int readed = 0;
Alexander Block31db9f72012-07-25 23:19:24 +02003468 mm_segment_t old_fs;
3469
3470 p = fs_path_alloc(sctx);
3471 if (!p)
3472 return -ENOMEM;
3473
3474 /*
3475 * vfs normally only accepts user space buffers for security reasons.
3476 * we only read from the file and also only provide the read_buf buffer
3477 * to vfs. As this buffer does not come from a user space call, it's
3478 * ok to temporary allow kernel space buffers.
3479 */
3480 old_fs = get_fs();
3481 set_fs(KERNEL_DS);
3482
3483verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
3484
3485 ret = open_cur_inode_file(sctx);
3486 if (ret < 0)
3487 goto out;
3488
3489 ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos);
3490 if (ret < 0)
3491 goto out;
3492 readed = ret;
3493 if (!readed)
3494 goto out;
3495
3496 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
3497 if (ret < 0)
3498 goto out;
3499
3500 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3501 if (ret < 0)
3502 goto out;
3503
3504 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
3505 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
3506 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, readed);
3507
3508 ret = send_cmd(sctx);
3509
3510tlv_put_failure:
3511out:
3512 fs_path_free(sctx, p);
3513 set_fs(old_fs);
3514 if (ret < 0)
3515 return ret;
3516 return readed;
3517}
3518
3519/*
3520 * Send a clone command to user space.
3521 */
3522static int send_clone(struct send_ctx *sctx,
3523 u64 offset, u32 len,
3524 struct clone_root *clone_root)
3525{
3526 int ret = 0;
3527 struct btrfs_root *clone_root2 = clone_root->root;
3528 struct fs_path *p;
3529 u64 gen;
3530
3531verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
3532 "clone_inode=%llu, clone_offset=%llu\n", offset, len,
3533 clone_root->root->objectid, clone_root->ino,
3534 clone_root->offset);
3535
3536 p = fs_path_alloc(sctx);
3537 if (!p)
3538 return -ENOMEM;
3539
3540 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
3541 if (ret < 0)
3542 goto out;
3543
3544 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3545 if (ret < 0)
3546 goto out;
3547
3548 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
3549 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
3550 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
3551
3552 if (clone_root2 == sctx->send_root) {
3553 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
Alexander Block85a7b332012-07-26 23:39:10 +02003554 &gen, NULL, NULL, NULL, NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02003555 if (ret < 0)
3556 goto out;
3557 ret = get_cur_path(sctx, clone_root->ino, gen, p);
3558 } else {
3559 ret = get_inode_path(sctx, clone_root2, clone_root->ino, p);
3560 }
3561 if (ret < 0)
3562 goto out;
3563
3564 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
3565 clone_root2->root_item.uuid);
3566 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
3567 clone_root2->root_item.ctransid);
3568 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
3569 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
3570 clone_root->offset);
3571
3572 ret = send_cmd(sctx);
3573
3574tlv_put_failure:
3575out:
3576 fs_path_free(sctx, p);
3577 return ret;
3578}
3579
3580static int send_write_or_clone(struct send_ctx *sctx,
3581 struct btrfs_path *path,
3582 struct btrfs_key *key,
3583 struct clone_root *clone_root)
3584{
3585 int ret = 0;
3586 struct btrfs_file_extent_item *ei;
3587 u64 offset = key->offset;
3588 u64 pos = 0;
3589 u64 len;
3590 u32 l;
3591 u8 type;
3592
3593 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3594 struct btrfs_file_extent_item);
3595 type = btrfs_file_extent_type(path->nodes[0], ei);
3596 if (type == BTRFS_FILE_EXTENT_INLINE)
3597 len = btrfs_file_extent_inline_len(path->nodes[0], ei);
3598 else
3599 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3600
3601 if (offset + len > sctx->cur_inode_size)
3602 len = sctx->cur_inode_size - offset;
3603 if (len == 0) {
3604 ret = 0;
3605 goto out;
3606 }
3607
3608 if (!clone_root) {
3609 while (pos < len) {
3610 l = len - pos;
3611 if (l > BTRFS_SEND_READ_SIZE)
3612 l = BTRFS_SEND_READ_SIZE;
3613 ret = send_write(sctx, pos + offset, l);
3614 if (ret < 0)
3615 goto out;
3616 if (!ret)
3617 break;
3618 pos += ret;
3619 }
3620 ret = 0;
3621 } else {
3622 ret = send_clone(sctx, offset, len, clone_root);
3623 }
3624
3625out:
3626 return ret;
3627}
3628
3629static int is_extent_unchanged(struct send_ctx *sctx,
3630 struct btrfs_path *left_path,
3631 struct btrfs_key *ekey)
3632{
3633 int ret = 0;
3634 struct btrfs_key key;
3635 struct btrfs_path *path = NULL;
3636 struct extent_buffer *eb;
3637 int slot;
3638 struct btrfs_key found_key;
3639 struct btrfs_file_extent_item *ei;
3640 u64 left_disknr;
3641 u64 right_disknr;
3642 u64 left_offset;
3643 u64 right_offset;
3644 u64 left_offset_fixed;
3645 u64 left_len;
3646 u64 right_len;
3647 u8 left_type;
3648 u8 right_type;
3649
3650 path = alloc_path_for_send();
3651 if (!path)
3652 return -ENOMEM;
3653
3654 eb = left_path->nodes[0];
3655 slot = left_path->slots[0];
3656
3657 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
3658 left_type = btrfs_file_extent_type(eb, ei);
3659 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
3660 left_len = btrfs_file_extent_num_bytes(eb, ei);
3661 left_offset = btrfs_file_extent_offset(eb, ei);
3662
3663 if (left_type != BTRFS_FILE_EXTENT_REG) {
3664 ret = 0;
3665 goto out;
3666 }
3667
3668 /*
3669 * Following comments will refer to these graphics. L is the left
3670 * extents which we are checking at the moment. 1-8 are the right
3671 * extents that we iterate.
3672 *
3673 * |-----L-----|
3674 * |-1-|-2a-|-3-|-4-|-5-|-6-|
3675 *
3676 * |-----L-----|
3677 * |--1--|-2b-|...(same as above)
3678 *
3679 * Alternative situation. Happens on files where extents got split.
3680 * |-----L-----|
3681 * |-----------7-----------|-6-|
3682 *
3683 * Alternative situation. Happens on files which got larger.
3684 * |-----L-----|
3685 * |-8-|
3686 * Nothing follows after 8.
3687 */
3688
3689 key.objectid = ekey->objectid;
3690 key.type = BTRFS_EXTENT_DATA_KEY;
3691 key.offset = ekey->offset;
3692 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
3693 if (ret < 0)
3694 goto out;
3695 if (ret) {
3696 ret = 0;
3697 goto out;
3698 }
3699
3700 /*
3701 * Handle special case where the right side has no extents at all.
3702 */
3703 eb = path->nodes[0];
3704 slot = path->slots[0];
3705 btrfs_item_key_to_cpu(eb, &found_key, slot);
3706 if (found_key.objectid != key.objectid ||
3707 found_key.type != key.type) {
3708 ret = 0;
3709 goto out;
3710 }
3711
3712 /*
3713 * We're now on 2a, 2b or 7.
3714 */
3715 key = found_key;
3716 while (key.offset < ekey->offset + left_len) {
3717 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
3718 right_type = btrfs_file_extent_type(eb, ei);
3719 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
3720 right_len = btrfs_file_extent_num_bytes(eb, ei);
3721 right_offset = btrfs_file_extent_offset(eb, ei);
3722
3723 if (right_type != BTRFS_FILE_EXTENT_REG) {
3724 ret = 0;
3725 goto out;
3726 }
3727
3728 /*
3729 * Are we at extent 8? If yes, we know the extent is changed.
3730 * This may only happen on the first iteration.
3731 */
3732 if (found_key.offset + right_len < ekey->offset) {
3733 ret = 0;
3734 goto out;
3735 }
3736
3737 left_offset_fixed = left_offset;
3738 if (key.offset < ekey->offset) {
3739 /* Fix the right offset for 2a and 7. */
3740 right_offset += ekey->offset - key.offset;
3741 } else {
3742 /* Fix the left offset for all behind 2a and 2b */
3743 left_offset_fixed += key.offset - ekey->offset;
3744 }
3745
3746 /*
3747 * Check if we have the same extent.
3748 */
3749 if (left_disknr + left_offset_fixed !=
3750 right_disknr + right_offset) {
3751 ret = 0;
3752 goto out;
3753 }
3754
3755 /*
3756 * Go to the next extent.
3757 */
3758 ret = btrfs_next_item(sctx->parent_root, path);
3759 if (ret < 0)
3760 goto out;
3761 if (!ret) {
3762 eb = path->nodes[0];
3763 slot = path->slots[0];
3764 btrfs_item_key_to_cpu(eb, &found_key, slot);
3765 }
3766 if (ret || found_key.objectid != key.objectid ||
3767 found_key.type != key.type) {
3768 key.offset += right_len;
3769 break;
3770 } else {
3771 if (found_key.offset != key.offset + right_len) {
3772 /* Should really not happen */
3773 ret = -EIO;
3774 goto out;
3775 }
3776 }
3777 key = found_key;
3778 }
3779
3780 /*
3781 * We're now behind the left extent (treat as unchanged) or at the end
3782 * of the right side (treat as changed).
3783 */
3784 if (key.offset >= ekey->offset + left_len)
3785 ret = 1;
3786 else
3787 ret = 0;
3788
3789
3790out:
3791 btrfs_free_path(path);
3792 return ret;
3793}
3794
3795static int process_extent(struct send_ctx *sctx,
3796 struct btrfs_path *path,
3797 struct btrfs_key *key)
3798{
3799 int ret = 0;
3800 struct clone_root *found_clone = NULL;
3801
3802 if (S_ISLNK(sctx->cur_inode_mode))
3803 return 0;
3804
3805 if (sctx->parent_root && !sctx->cur_inode_new) {
3806 ret = is_extent_unchanged(sctx, path, key);
3807 if (ret < 0)
3808 goto out;
3809 if (ret) {
3810 ret = 0;
3811 goto out;
3812 }
3813 }
3814
3815 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
3816 sctx->cur_inode_size, &found_clone);
3817 if (ret != -ENOENT && ret < 0)
3818 goto out;
3819
3820 ret = send_write_or_clone(sctx, path, key, found_clone);
3821
3822out:
3823 return ret;
3824}
3825
3826static int process_all_extents(struct send_ctx *sctx)
3827{
3828 int ret;
3829 struct btrfs_root *root;
3830 struct btrfs_path *path;
3831 struct btrfs_key key;
3832 struct btrfs_key found_key;
3833 struct extent_buffer *eb;
3834 int slot;
3835
3836 root = sctx->send_root;
3837 path = alloc_path_for_send();
3838 if (!path)
3839 return -ENOMEM;
3840
3841 key.objectid = sctx->cmp_key->objectid;
3842 key.type = BTRFS_EXTENT_DATA_KEY;
3843 key.offset = 0;
3844 while (1) {
3845 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
3846 if (ret < 0)
3847 goto out;
3848 if (ret) {
3849 ret = 0;
3850 goto out;
3851 }
3852
3853 eb = path->nodes[0];
3854 slot = path->slots[0];
3855 btrfs_item_key_to_cpu(eb, &found_key, slot);
3856
3857 if (found_key.objectid != key.objectid ||
3858 found_key.type != key.type) {
3859 ret = 0;
3860 goto out;
3861 }
3862
3863 ret = process_extent(sctx, path, &found_key);
3864 if (ret < 0)
3865 goto out;
3866
3867 btrfs_release_path(path);
3868 key.offset = found_key.offset + 1;
3869 }
3870
3871out:
3872 btrfs_free_path(path);
3873 return ret;
3874}
3875
3876static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
3877{
3878 int ret = 0;
3879
3880 if (sctx->cur_ino == 0)
3881 goto out;
3882 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
3883 sctx->cmp_key->type <= BTRFS_INODE_REF_KEY)
3884 goto out;
3885 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
3886 goto out;
3887
3888 ret = process_recorded_refs(sctx);
3889
3890out:
3891 return ret;
3892}
3893
3894static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
3895{
3896 int ret = 0;
3897 u64 left_mode;
3898 u64 left_uid;
3899 u64 left_gid;
3900 u64 right_mode;
3901 u64 right_uid;
3902 u64 right_gid;
3903 int need_chmod = 0;
3904 int need_chown = 0;
3905
3906 ret = process_recorded_refs_if_needed(sctx, at_end);
3907 if (ret < 0)
3908 goto out;
3909
3910 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
3911 goto out;
3912 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
3913 goto out;
3914
3915 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
Alexander Block85a7b332012-07-26 23:39:10 +02003916 &left_mode, &left_uid, &left_gid, NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02003917 if (ret < 0)
3918 goto out;
3919
3920 if (!S_ISLNK(sctx->cur_inode_mode)) {
3921 if (!sctx->parent_root || sctx->cur_inode_new) {
3922 need_chmod = 1;
3923 need_chown = 1;
3924 } else {
3925 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
3926 NULL, NULL, &right_mode, &right_uid,
Alexander Block85a7b332012-07-26 23:39:10 +02003927 &right_gid, NULL);
Alexander Block31db9f72012-07-25 23:19:24 +02003928 if (ret < 0)
3929 goto out;
3930
3931 if (left_uid != right_uid || left_gid != right_gid)
3932 need_chown = 1;
3933 if (left_mode != right_mode)
3934 need_chmod = 1;
3935 }
3936 }
3937
3938 if (S_ISREG(sctx->cur_inode_mode)) {
3939 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3940 sctx->cur_inode_size);
3941 if (ret < 0)
3942 goto out;
3943 }
3944
3945 if (need_chown) {
3946 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3947 left_uid, left_gid);
3948 if (ret < 0)
3949 goto out;
3950 }
3951 if (need_chmod) {
3952 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3953 left_mode);
3954 if (ret < 0)
3955 goto out;
3956 }
3957
3958 /*
3959 * Need to send that every time, no matter if it actually changed
3960 * between the two trees as we have done changes to the inode before.
3961 */
3962 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
3963 if (ret < 0)
3964 goto out;
3965
3966out:
3967 return ret;
3968}
3969
3970static int changed_inode(struct send_ctx *sctx,
3971 enum btrfs_compare_tree_result result)
3972{
3973 int ret = 0;
3974 struct btrfs_key *key = sctx->cmp_key;
3975 struct btrfs_inode_item *left_ii = NULL;
3976 struct btrfs_inode_item *right_ii = NULL;
3977 u64 left_gen = 0;
3978 u64 right_gen = 0;
3979
3980 ret = close_cur_inode_file(sctx);
3981 if (ret < 0)
3982 goto out;
3983
3984 sctx->cur_ino = key->objectid;
3985 sctx->cur_inode_new_gen = 0;
Alexander Block31db9f72012-07-25 23:19:24 +02003986 sctx->send_progress = sctx->cur_ino;
3987
3988 if (result == BTRFS_COMPARE_TREE_NEW ||
3989 result == BTRFS_COMPARE_TREE_CHANGED) {
3990 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
3991 sctx->left_path->slots[0],
3992 struct btrfs_inode_item);
3993 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
3994 left_ii);
3995 } else {
3996 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
3997 sctx->right_path->slots[0],
3998 struct btrfs_inode_item);
3999 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
4000 right_ii);
4001 }
4002 if (result == BTRFS_COMPARE_TREE_CHANGED) {
4003 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
4004 sctx->right_path->slots[0],
4005 struct btrfs_inode_item);
4006
4007 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
4008 right_ii);
4009 if (left_gen != right_gen)
4010 sctx->cur_inode_new_gen = 1;
4011 }
4012
4013 if (result == BTRFS_COMPARE_TREE_NEW) {
4014 sctx->cur_inode_gen = left_gen;
4015 sctx->cur_inode_new = 1;
4016 sctx->cur_inode_deleted = 0;
4017 sctx->cur_inode_size = btrfs_inode_size(
4018 sctx->left_path->nodes[0], left_ii);
4019 sctx->cur_inode_mode = btrfs_inode_mode(
4020 sctx->left_path->nodes[0], left_ii);
4021 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
Alexander Block1f4692d2012-07-28 10:42:24 +02004022 ret = send_create_inode_if_needed(sctx);
Alexander Block31db9f72012-07-25 23:19:24 +02004023 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
4024 sctx->cur_inode_gen = right_gen;
4025 sctx->cur_inode_new = 0;
4026 sctx->cur_inode_deleted = 1;
4027 sctx->cur_inode_size = btrfs_inode_size(
4028 sctx->right_path->nodes[0], right_ii);
4029 sctx->cur_inode_mode = btrfs_inode_mode(
4030 sctx->right_path->nodes[0], right_ii);
4031 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
4032 if (sctx->cur_inode_new_gen) {
4033 sctx->cur_inode_gen = right_gen;
4034 sctx->cur_inode_new = 0;
4035 sctx->cur_inode_deleted = 1;
4036 sctx->cur_inode_size = btrfs_inode_size(
4037 sctx->right_path->nodes[0], right_ii);
4038 sctx->cur_inode_mode = btrfs_inode_mode(
4039 sctx->right_path->nodes[0], right_ii);
4040 ret = process_all_refs(sctx,
4041 BTRFS_COMPARE_TREE_DELETED);
4042 if (ret < 0)
4043 goto out;
4044
4045 sctx->cur_inode_gen = left_gen;
4046 sctx->cur_inode_new = 1;
4047 sctx->cur_inode_deleted = 0;
4048 sctx->cur_inode_size = btrfs_inode_size(
4049 sctx->left_path->nodes[0], left_ii);
4050 sctx->cur_inode_mode = btrfs_inode_mode(
4051 sctx->left_path->nodes[0], left_ii);
Alexander Block1f4692d2012-07-28 10:42:24 +02004052 ret = send_create_inode_if_needed(sctx);
Alexander Block31db9f72012-07-25 23:19:24 +02004053 if (ret < 0)
4054 goto out;
4055
4056 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
4057 if (ret < 0)
4058 goto out;
4059 ret = process_all_extents(sctx);
4060 if (ret < 0)
4061 goto out;
4062 ret = process_all_new_xattrs(sctx);
4063 if (ret < 0)
4064 goto out;
4065 } else {
4066 sctx->cur_inode_gen = left_gen;
4067 sctx->cur_inode_new = 0;
4068 sctx->cur_inode_new_gen = 0;
4069 sctx->cur_inode_deleted = 0;
4070 sctx->cur_inode_size = btrfs_inode_size(
4071 sctx->left_path->nodes[0], left_ii);
4072 sctx->cur_inode_mode = btrfs_inode_mode(
4073 sctx->left_path->nodes[0], left_ii);
4074 }
4075 }
4076
4077out:
4078 return ret;
4079}
4080
4081static int changed_ref(struct send_ctx *sctx,
4082 enum btrfs_compare_tree_result result)
4083{
4084 int ret = 0;
4085
4086 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
4087
4088 if (!sctx->cur_inode_new_gen &&
4089 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
4090 if (result == BTRFS_COMPARE_TREE_NEW)
4091 ret = record_new_ref(sctx);
4092 else if (result == BTRFS_COMPARE_TREE_DELETED)
4093 ret = record_deleted_ref(sctx);
4094 else if (result == BTRFS_COMPARE_TREE_CHANGED)
4095 ret = record_changed_ref(sctx);
4096 }
4097
4098 return ret;
4099}
4100
4101static int changed_xattr(struct send_ctx *sctx,
4102 enum btrfs_compare_tree_result result)
4103{
4104 int ret = 0;
4105
4106 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
4107
4108 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
4109 if (result == BTRFS_COMPARE_TREE_NEW)
4110 ret = process_new_xattr(sctx);
4111 else if (result == BTRFS_COMPARE_TREE_DELETED)
4112 ret = process_deleted_xattr(sctx);
4113 else if (result == BTRFS_COMPARE_TREE_CHANGED)
4114 ret = process_changed_xattr(sctx);
4115 }
4116
4117 return ret;
4118}
4119
4120static int changed_extent(struct send_ctx *sctx,
4121 enum btrfs_compare_tree_result result)
4122{
4123 int ret = 0;
4124
4125 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
4126
4127 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
4128 if (result != BTRFS_COMPARE_TREE_DELETED)
4129 ret = process_extent(sctx, sctx->left_path,
4130 sctx->cmp_key);
4131 }
4132
4133 return ret;
4134}
4135
4136
4137static int changed_cb(struct btrfs_root *left_root,
4138 struct btrfs_root *right_root,
4139 struct btrfs_path *left_path,
4140 struct btrfs_path *right_path,
4141 struct btrfs_key *key,
4142 enum btrfs_compare_tree_result result,
4143 void *ctx)
4144{
4145 int ret = 0;
4146 struct send_ctx *sctx = ctx;
4147
4148 sctx->left_path = left_path;
4149 sctx->right_path = right_path;
4150 sctx->cmp_key = key;
4151
4152 ret = finish_inode_if_needed(sctx, 0);
4153 if (ret < 0)
4154 goto out;
4155
4156 if (key->type == BTRFS_INODE_ITEM_KEY)
4157 ret = changed_inode(sctx, result);
4158 else if (key->type == BTRFS_INODE_REF_KEY)
4159 ret = changed_ref(sctx, result);
4160 else if (key->type == BTRFS_XATTR_ITEM_KEY)
4161 ret = changed_xattr(sctx, result);
4162 else if (key->type == BTRFS_EXTENT_DATA_KEY)
4163 ret = changed_extent(sctx, result);
4164
4165out:
4166 return ret;
4167}
4168
4169static int full_send_tree(struct send_ctx *sctx)
4170{
4171 int ret;
4172 struct btrfs_trans_handle *trans = NULL;
4173 struct btrfs_root *send_root = sctx->send_root;
4174 struct btrfs_key key;
4175 struct btrfs_key found_key;
4176 struct btrfs_path *path;
4177 struct extent_buffer *eb;
4178 int slot;
4179 u64 start_ctransid;
4180 u64 ctransid;
4181
4182 path = alloc_path_for_send();
4183 if (!path)
4184 return -ENOMEM;
4185
4186 spin_lock(&send_root->root_times_lock);
4187 start_ctransid = btrfs_root_ctransid(&send_root->root_item);
4188 spin_unlock(&send_root->root_times_lock);
4189
4190 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
4191 key.type = BTRFS_INODE_ITEM_KEY;
4192 key.offset = 0;
4193
4194join_trans:
4195 /*
4196 * We need to make sure the transaction does not get committed
4197 * while we do anything on commit roots. Join a transaction to prevent
4198 * this.
4199 */
4200 trans = btrfs_join_transaction(send_root);
4201 if (IS_ERR(trans)) {
4202 ret = PTR_ERR(trans);
4203 trans = NULL;
4204 goto out;
4205 }
4206
4207 /*
4208 * Make sure the tree has not changed
4209 */
4210 spin_lock(&send_root->root_times_lock);
4211 ctransid = btrfs_root_ctransid(&send_root->root_item);
4212 spin_unlock(&send_root->root_times_lock);
4213
4214 if (ctransid != start_ctransid) {
4215 WARN(1, KERN_WARNING "btrfs: the root that you're trying to "
4216 "send was modified in between. This is "
4217 "probably a bug.\n");
4218 ret = -EIO;
4219 goto out;
4220 }
4221
4222 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
4223 if (ret < 0)
4224 goto out;
4225 if (ret)
4226 goto out_finish;
4227
4228 while (1) {
4229 /*
4230 * When someone want to commit while we iterate, end the
4231 * joined transaction and rejoin.
4232 */
4233 if (btrfs_should_end_transaction(trans, send_root)) {
4234 ret = btrfs_end_transaction(trans, send_root);
4235 trans = NULL;
4236 if (ret < 0)
4237 goto out;
4238 btrfs_release_path(path);
4239 goto join_trans;
4240 }
4241
4242 eb = path->nodes[0];
4243 slot = path->slots[0];
4244 btrfs_item_key_to_cpu(eb, &found_key, slot);
4245
4246 ret = changed_cb(send_root, NULL, path, NULL,
4247 &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
4248 if (ret < 0)
4249 goto out;
4250
4251 key.objectid = found_key.objectid;
4252 key.type = found_key.type;
4253 key.offset = found_key.offset + 1;
4254
4255 ret = btrfs_next_item(send_root, path);
4256 if (ret < 0)
4257 goto out;
4258 if (ret) {
4259 ret = 0;
4260 break;
4261 }
4262 }
4263
4264out_finish:
4265 ret = finish_inode_if_needed(sctx, 1);
4266
4267out:
4268 btrfs_free_path(path);
4269 if (trans) {
4270 if (!ret)
4271 ret = btrfs_end_transaction(trans, send_root);
4272 else
4273 btrfs_end_transaction(trans, send_root);
4274 }
4275 return ret;
4276}
4277
4278static int send_subvol(struct send_ctx *sctx)
4279{
4280 int ret;
4281
4282 ret = send_header(sctx);
4283 if (ret < 0)
4284 goto out;
4285
4286 ret = send_subvol_begin(sctx);
4287 if (ret < 0)
4288 goto out;
4289
4290 if (sctx->parent_root) {
4291 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
4292 changed_cb, sctx);
4293 if (ret < 0)
4294 goto out;
4295 ret = finish_inode_if_needed(sctx, 1);
4296 if (ret < 0)
4297 goto out;
4298 } else {
4299 ret = full_send_tree(sctx);
4300 if (ret < 0)
4301 goto out;
4302 }
4303
4304out:
4305 if (!ret)
4306 ret = close_cur_inode_file(sctx);
4307 else
4308 close_cur_inode_file(sctx);
4309
4310 free_recorded_refs(sctx);
4311 return ret;
4312}
4313
4314long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
4315{
4316 int ret = 0;
4317 struct btrfs_root *send_root;
4318 struct btrfs_root *clone_root;
4319 struct btrfs_fs_info *fs_info;
4320 struct btrfs_ioctl_send_args *arg = NULL;
4321 struct btrfs_key key;
4322 struct file *filp = NULL;
4323 struct send_ctx *sctx = NULL;
4324 u32 i;
4325 u64 *clone_sources_tmp = NULL;
4326
4327 if (!capable(CAP_SYS_ADMIN))
4328 return -EPERM;
4329
4330 send_root = BTRFS_I(fdentry(mnt_file)->d_inode)->root;
4331 fs_info = send_root->fs_info;
4332
4333 arg = memdup_user(arg_, sizeof(*arg));
4334 if (IS_ERR(arg)) {
4335 ret = PTR_ERR(arg);
4336 arg = NULL;
4337 goto out;
4338 }
4339
4340 if (!access_ok(VERIFY_READ, arg->clone_sources,
4341 sizeof(*arg->clone_sources *
4342 arg->clone_sources_count))) {
4343 ret = -EFAULT;
4344 goto out;
4345 }
4346
4347 sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
4348 if (!sctx) {
4349 ret = -ENOMEM;
4350 goto out;
4351 }
4352
4353 INIT_LIST_HEAD(&sctx->new_refs);
4354 INIT_LIST_HEAD(&sctx->deleted_refs);
4355 INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
4356 INIT_LIST_HEAD(&sctx->name_cache_list);
4357
4358 sctx->send_filp = fget(arg->send_fd);
4359 if (IS_ERR(sctx->send_filp)) {
4360 ret = PTR_ERR(sctx->send_filp);
4361 goto out;
4362 }
4363
4364 sctx->mnt = mnt_file->f_path.mnt;
4365
4366 sctx->send_root = send_root;
4367 sctx->clone_roots_cnt = arg->clone_sources_count;
4368
4369 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
4370 sctx->send_buf = vmalloc(sctx->send_max_size);
4371 if (!sctx->send_buf) {
4372 ret = -ENOMEM;
4373 goto out;
4374 }
4375
4376 sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
4377 if (!sctx->read_buf) {
4378 ret = -ENOMEM;
4379 goto out;
4380 }
4381
4382 sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
4383 (arg->clone_sources_count + 1));
4384 if (!sctx->clone_roots) {
4385 ret = -ENOMEM;
4386 goto out;
4387 }
4388
4389 if (arg->clone_sources_count) {
4390 clone_sources_tmp = vmalloc(arg->clone_sources_count *
4391 sizeof(*arg->clone_sources));
4392 if (!clone_sources_tmp) {
4393 ret = -ENOMEM;
4394 goto out;
4395 }
4396
4397 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
4398 arg->clone_sources_count *
4399 sizeof(*arg->clone_sources));
4400 if (ret) {
4401 ret = -EFAULT;
4402 goto out;
4403 }
4404
4405 for (i = 0; i < arg->clone_sources_count; i++) {
4406 key.objectid = clone_sources_tmp[i];
4407 key.type = BTRFS_ROOT_ITEM_KEY;
4408 key.offset = (u64)-1;
4409 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
4410 if (!clone_root) {
4411 ret = -EINVAL;
4412 goto out;
4413 }
4414 if (IS_ERR(clone_root)) {
4415 ret = PTR_ERR(clone_root);
4416 goto out;
4417 }
4418 sctx->clone_roots[i].root = clone_root;
4419 }
4420 vfree(clone_sources_tmp);
4421 clone_sources_tmp = NULL;
4422 }
4423
4424 if (arg->parent_root) {
4425 key.objectid = arg->parent_root;
4426 key.type = BTRFS_ROOT_ITEM_KEY;
4427 key.offset = (u64)-1;
4428 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
4429 if (!sctx->parent_root) {
4430 ret = -EINVAL;
4431 goto out;
4432 }
4433 }
4434
4435 /*
4436 * Clones from send_root are allowed, but only if the clone source
4437 * is behind the current send position. This is checked while searching
4438 * for possible clone sources.
4439 */
4440 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
4441
4442 /* We do a bsearch later */
4443 sort(sctx->clone_roots, sctx->clone_roots_cnt,
4444 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
4445 NULL);
4446
4447 ret = send_subvol(sctx);
4448 if (ret < 0)
4449 goto out;
4450
4451 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
4452 if (ret < 0)
4453 goto out;
4454 ret = send_cmd(sctx);
4455 if (ret < 0)
4456 goto out;
4457
4458out:
4459 if (filp)
4460 fput(filp);
4461 kfree(arg);
4462 vfree(clone_sources_tmp);
4463
4464 if (sctx) {
4465 if (sctx->send_filp)
4466 fput(sctx->send_filp);
4467
4468 vfree(sctx->clone_roots);
4469 vfree(sctx->send_buf);
4470 vfree(sctx->read_buf);
4471
4472 name_cache_free(sctx);
4473
4474 kfree(sctx);
4475 }
4476
4477 return ret;
4478}