| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2012 Alexander Block.  All rights reserved. | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or | 
|  | 5 | * modify it under the terms of the GNU General Public | 
|  | 6 | * License v2 as published by the Free Software Foundation. | 
|  | 7 | * | 
|  | 8 | * This program is distributed in the hope that it will be useful, | 
|  | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 11 | * General Public License for more details. | 
|  | 12 | * | 
|  | 13 | * You should have received a copy of the GNU General Public | 
|  | 14 | * License along with this program; if not, write to the | 
|  | 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
|  | 16 | * Boston, MA 021110-1307, USA. | 
|  | 17 | */ | 
|  | 18 |  | 
|  | 19 | #include <linux/bsearch.h> | 
|  | 20 | #include <linux/fs.h> | 
|  | 21 | #include <linux/file.h> | 
|  | 22 | #include <linux/sort.h> | 
|  | 23 | #include <linux/mount.h> | 
|  | 24 | #include <linux/xattr.h> | 
|  | 25 | #include <linux/posix_acl_xattr.h> | 
|  | 26 | #include <linux/radix-tree.h> | 
|  | 27 | #include <linux/crc32c.h> | 
| Stephen Rothwell | a1857eb | 2012-07-27 10:11:13 +1000 | [diff] [blame] | 28 | #include <linux/vmalloc.h> | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 29 |  | 
|  | 30 | #include "send.h" | 
|  | 31 | #include "backref.h" | 
|  | 32 | #include "locking.h" | 
|  | 33 | #include "disk-io.h" | 
|  | 34 | #include "btrfs_inode.h" | 
|  | 35 | #include "transaction.h" | 
|  | 36 |  | 
|  | 37 | static int g_verbose = 0; | 
|  | 38 |  | 
|  | 39 | #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__) | 
|  | 40 |  | 
|  | 41 | /* | 
|  | 42 | * A fs_path is a helper to dynamically build path names with unknown size. | 
|  | 43 | * It reallocates the internal buffer on demand. | 
|  | 44 | * It allows fast adding of path elements on the right side (normal path) and | 
|  | 45 | * fast adding to the left side (reversed path). A reversed path can also be | 
|  | 46 | * unreversed if needed. | 
|  | 47 | */ | 
|  | 48 | struct fs_path { | 
|  | 49 | union { | 
|  | 50 | struct { | 
|  | 51 | char *start; | 
|  | 52 | char *end; | 
|  | 53 | char *prepared; | 
|  | 54 |  | 
|  | 55 | char *buf; | 
|  | 56 | int buf_len; | 
|  | 57 | int reversed:1; | 
|  | 58 | int virtual_mem:1; | 
|  | 59 | char inline_buf[]; | 
|  | 60 | }; | 
|  | 61 | char pad[PAGE_SIZE]; | 
|  | 62 | }; | 
|  | 63 | }; | 
|  | 64 | #define FS_PATH_INLINE_SIZE \ | 
|  | 65 | (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf)) | 
|  | 66 |  | 
|  | 67 |  | 
|  | 68 | /* reused for each extent */ | 
|  | 69 | struct clone_root { | 
|  | 70 | struct btrfs_root *root; | 
|  | 71 | u64 ino; | 
|  | 72 | u64 offset; | 
|  | 73 |  | 
|  | 74 | u64 found_refs; | 
|  | 75 | }; | 
|  | 76 |  | 
|  | 77 | #define SEND_CTX_MAX_NAME_CACHE_SIZE 128 | 
|  | 78 | #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2) | 
|  | 79 |  | 
|  | 80 | struct send_ctx { | 
|  | 81 | struct file *send_filp; | 
|  | 82 | loff_t send_off; | 
|  | 83 | char *send_buf; | 
|  | 84 | u32 send_size; | 
|  | 85 | u32 send_max_size; | 
|  | 86 | u64 total_send_size; | 
|  | 87 | u64 cmd_send_size[BTRFS_SEND_C_MAX + 1]; | 
|  | 88 |  | 
|  | 89 | struct vfsmount *mnt; | 
|  | 90 |  | 
|  | 91 | struct btrfs_root *send_root; | 
|  | 92 | struct btrfs_root *parent_root; | 
|  | 93 | struct clone_root *clone_roots; | 
|  | 94 | int clone_roots_cnt; | 
|  | 95 |  | 
|  | 96 | /* current state of the compare_tree call */ | 
|  | 97 | struct btrfs_path *left_path; | 
|  | 98 | struct btrfs_path *right_path; | 
|  | 99 | struct btrfs_key *cmp_key; | 
|  | 100 |  | 
|  | 101 | /* | 
|  | 102 | * infos of the currently processed inode. In case of deleted inodes, | 
|  | 103 | * these are the values from the deleted inode. | 
|  | 104 | */ | 
|  | 105 | u64 cur_ino; | 
|  | 106 | u64 cur_inode_gen; | 
|  | 107 | int cur_inode_new; | 
|  | 108 | int cur_inode_new_gen; | 
|  | 109 | int cur_inode_deleted; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 110 | u64 cur_inode_size; | 
|  | 111 | u64 cur_inode_mode; | 
|  | 112 |  | 
|  | 113 | u64 send_progress; | 
|  | 114 |  | 
|  | 115 | struct list_head new_refs; | 
|  | 116 | struct list_head deleted_refs; | 
|  | 117 |  | 
|  | 118 | struct radix_tree_root name_cache; | 
|  | 119 | struct list_head name_cache_list; | 
|  | 120 | int name_cache_size; | 
|  | 121 |  | 
|  | 122 | struct file *cur_inode_filp; | 
|  | 123 | char *read_buf; | 
|  | 124 | }; | 
|  | 125 |  | 
|  | 126 | struct name_cache_entry { | 
|  | 127 | struct list_head list; | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 128 | /* | 
|  | 129 | * radix_tree has only 32bit entries but we need to handle 64bit inums. | 
|  | 130 | * We use the lower 32bit of the 64bit inum to store it in the tree. If | 
|  | 131 | * more then one inum would fall into the same entry, we use radix_list | 
|  | 132 | * to store the additional entries. radix_list is also used to store | 
|  | 133 | * entries where two entries have the same inum but different | 
|  | 134 | * generations. | 
|  | 135 | */ | 
|  | 136 | struct list_head radix_list; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 137 | u64 ino; | 
|  | 138 | u64 gen; | 
|  | 139 | u64 parent_ino; | 
|  | 140 | u64 parent_gen; | 
|  | 141 | int ret; | 
|  | 142 | int need_later_update; | 
|  | 143 | int name_len; | 
|  | 144 | char name[]; | 
|  | 145 | }; | 
|  | 146 |  | 
|  | 147 | static void fs_path_reset(struct fs_path *p) | 
|  | 148 | { | 
|  | 149 | if (p->reversed) { | 
|  | 150 | p->start = p->buf + p->buf_len - 1; | 
|  | 151 | p->end = p->start; | 
|  | 152 | *p->start = 0; | 
|  | 153 | } else { | 
|  | 154 | p->start = p->buf; | 
|  | 155 | p->end = p->start; | 
|  | 156 | *p->start = 0; | 
|  | 157 | } | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | static struct fs_path *fs_path_alloc(struct send_ctx *sctx) | 
|  | 161 | { | 
|  | 162 | struct fs_path *p; | 
|  | 163 |  | 
|  | 164 | p = kmalloc(sizeof(*p), GFP_NOFS); | 
|  | 165 | if (!p) | 
|  | 166 | return NULL; | 
|  | 167 | p->reversed = 0; | 
|  | 168 | p->virtual_mem = 0; | 
|  | 169 | p->buf = p->inline_buf; | 
|  | 170 | p->buf_len = FS_PATH_INLINE_SIZE; | 
|  | 171 | fs_path_reset(p); | 
|  | 172 | return p; | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | static struct fs_path *fs_path_alloc_reversed(struct send_ctx *sctx) | 
|  | 176 | { | 
|  | 177 | struct fs_path *p; | 
|  | 178 |  | 
|  | 179 | p = fs_path_alloc(sctx); | 
|  | 180 | if (!p) | 
|  | 181 | return NULL; | 
|  | 182 | p->reversed = 1; | 
|  | 183 | fs_path_reset(p); | 
|  | 184 | return p; | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | static void fs_path_free(struct send_ctx *sctx, struct fs_path *p) | 
|  | 188 | { | 
|  | 189 | if (!p) | 
|  | 190 | return; | 
|  | 191 | if (p->buf != p->inline_buf) { | 
|  | 192 | if (p->virtual_mem) | 
|  | 193 | vfree(p->buf); | 
|  | 194 | else | 
|  | 195 | kfree(p->buf); | 
|  | 196 | } | 
|  | 197 | kfree(p); | 
|  | 198 | } | 
|  | 199 |  | 
|  | 200 | static int fs_path_len(struct fs_path *p) | 
|  | 201 | { | 
|  | 202 | return p->end - p->start; | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | static int fs_path_ensure_buf(struct fs_path *p, int len) | 
|  | 206 | { | 
|  | 207 | char *tmp_buf; | 
|  | 208 | int path_len; | 
|  | 209 | int old_buf_len; | 
|  | 210 |  | 
|  | 211 | len++; | 
|  | 212 |  | 
|  | 213 | if (p->buf_len >= len) | 
|  | 214 | return 0; | 
|  | 215 |  | 
|  | 216 | path_len = p->end - p->start; | 
|  | 217 | old_buf_len = p->buf_len; | 
|  | 218 | len = PAGE_ALIGN(len); | 
|  | 219 |  | 
|  | 220 | if (p->buf == p->inline_buf) { | 
|  | 221 | tmp_buf = kmalloc(len, GFP_NOFS); | 
|  | 222 | if (!tmp_buf) { | 
|  | 223 | tmp_buf = vmalloc(len); | 
|  | 224 | if (!tmp_buf) | 
|  | 225 | return -ENOMEM; | 
|  | 226 | p->virtual_mem = 1; | 
|  | 227 | } | 
|  | 228 | memcpy(tmp_buf, p->buf, p->buf_len); | 
|  | 229 | p->buf = tmp_buf; | 
|  | 230 | p->buf_len = len; | 
|  | 231 | } else { | 
|  | 232 | if (p->virtual_mem) { | 
|  | 233 | tmp_buf = vmalloc(len); | 
|  | 234 | if (!tmp_buf) | 
|  | 235 | return -ENOMEM; | 
|  | 236 | memcpy(tmp_buf, p->buf, p->buf_len); | 
|  | 237 | vfree(p->buf); | 
|  | 238 | } else { | 
|  | 239 | tmp_buf = krealloc(p->buf, len, GFP_NOFS); | 
|  | 240 | if (!tmp_buf) { | 
|  | 241 | tmp_buf = vmalloc(len); | 
|  | 242 | if (!tmp_buf) | 
|  | 243 | return -ENOMEM; | 
|  | 244 | memcpy(tmp_buf, p->buf, p->buf_len); | 
|  | 245 | kfree(p->buf); | 
|  | 246 | p->virtual_mem = 1; | 
|  | 247 | } | 
|  | 248 | } | 
|  | 249 | p->buf = tmp_buf; | 
|  | 250 | p->buf_len = len; | 
|  | 251 | } | 
|  | 252 | if (p->reversed) { | 
|  | 253 | tmp_buf = p->buf + old_buf_len - path_len - 1; | 
|  | 254 | p->end = p->buf + p->buf_len - 1; | 
|  | 255 | p->start = p->end - path_len; | 
|  | 256 | memmove(p->start, tmp_buf, path_len + 1); | 
|  | 257 | } else { | 
|  | 258 | p->start = p->buf; | 
|  | 259 | p->end = p->start + path_len; | 
|  | 260 | } | 
|  | 261 | return 0; | 
|  | 262 | } | 
|  | 263 |  | 
|  | 264 | static int fs_path_prepare_for_add(struct fs_path *p, int name_len) | 
|  | 265 | { | 
|  | 266 | int ret; | 
|  | 267 | int new_len; | 
|  | 268 |  | 
|  | 269 | new_len = p->end - p->start + name_len; | 
|  | 270 | if (p->start != p->end) | 
|  | 271 | new_len++; | 
|  | 272 | ret = fs_path_ensure_buf(p, new_len); | 
|  | 273 | if (ret < 0) | 
|  | 274 | goto out; | 
|  | 275 |  | 
|  | 276 | if (p->reversed) { | 
|  | 277 | if (p->start != p->end) | 
|  | 278 | *--p->start = '/'; | 
|  | 279 | p->start -= name_len; | 
|  | 280 | p->prepared = p->start; | 
|  | 281 | } else { | 
|  | 282 | if (p->start != p->end) | 
|  | 283 | *p->end++ = '/'; | 
|  | 284 | p->prepared = p->end; | 
|  | 285 | p->end += name_len; | 
|  | 286 | *p->end = 0; | 
|  | 287 | } | 
|  | 288 |  | 
|  | 289 | out: | 
|  | 290 | return ret; | 
|  | 291 | } | 
|  | 292 |  | 
|  | 293 | static int fs_path_add(struct fs_path *p, const char *name, int name_len) | 
|  | 294 | { | 
|  | 295 | int ret; | 
|  | 296 |  | 
|  | 297 | ret = fs_path_prepare_for_add(p, name_len); | 
|  | 298 | if (ret < 0) | 
|  | 299 | goto out; | 
|  | 300 | memcpy(p->prepared, name, name_len); | 
|  | 301 | p->prepared = NULL; | 
|  | 302 |  | 
|  | 303 | out: | 
|  | 304 | return ret; | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) | 
|  | 308 | { | 
|  | 309 | int ret; | 
|  | 310 |  | 
|  | 311 | ret = fs_path_prepare_for_add(p, p2->end - p2->start); | 
|  | 312 | if (ret < 0) | 
|  | 313 | goto out; | 
|  | 314 | memcpy(p->prepared, p2->start, p2->end - p2->start); | 
|  | 315 | p->prepared = NULL; | 
|  | 316 |  | 
|  | 317 | out: | 
|  | 318 | return ret; | 
|  | 319 | } | 
|  | 320 |  | 
|  | 321 | static int fs_path_add_from_extent_buffer(struct fs_path *p, | 
|  | 322 | struct extent_buffer *eb, | 
|  | 323 | unsigned long off, int len) | 
|  | 324 | { | 
|  | 325 | int ret; | 
|  | 326 |  | 
|  | 327 | ret = fs_path_prepare_for_add(p, len); | 
|  | 328 | if (ret < 0) | 
|  | 329 | goto out; | 
|  | 330 |  | 
|  | 331 | read_extent_buffer(eb, p->prepared, off, len); | 
|  | 332 | p->prepared = NULL; | 
|  | 333 |  | 
|  | 334 | out: | 
|  | 335 | return ret; | 
|  | 336 | } | 
|  | 337 |  | 
| Alexander Block | 9ea3ef5 | 2012-07-28 11:08:09 +0200 | [diff] [blame] | 338 | #if 0 | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 339 | static void fs_path_remove(struct fs_path *p) | 
|  | 340 | { | 
|  | 341 | BUG_ON(p->reversed); | 
|  | 342 | while (p->start != p->end && *p->end != '/') | 
|  | 343 | p->end--; | 
|  | 344 | *p->end = 0; | 
|  | 345 | } | 
| Alexander Block | 9ea3ef5 | 2012-07-28 11:08:09 +0200 | [diff] [blame] | 346 | #endif | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 347 |  | 
|  | 348 | static int fs_path_copy(struct fs_path *p, struct fs_path *from) | 
|  | 349 | { | 
|  | 350 | int ret; | 
|  | 351 |  | 
|  | 352 | p->reversed = from->reversed; | 
|  | 353 | fs_path_reset(p); | 
|  | 354 |  | 
|  | 355 | ret = fs_path_add_path(p, from); | 
|  | 356 |  | 
|  | 357 | return ret; | 
|  | 358 | } | 
|  | 359 |  | 
|  | 360 |  | 
|  | 361 | static void fs_path_unreverse(struct fs_path *p) | 
|  | 362 | { | 
|  | 363 | char *tmp; | 
|  | 364 | int len; | 
|  | 365 |  | 
|  | 366 | if (!p->reversed) | 
|  | 367 | return; | 
|  | 368 |  | 
|  | 369 | tmp = p->start; | 
|  | 370 | len = p->end - p->start; | 
|  | 371 | p->start = p->buf; | 
|  | 372 | p->end = p->start + len; | 
|  | 373 | memmove(p->start, tmp, len + 1); | 
|  | 374 | p->reversed = 0; | 
|  | 375 | } | 
|  | 376 |  | 
|  | 377 | static struct btrfs_path *alloc_path_for_send(void) | 
|  | 378 | { | 
|  | 379 | struct btrfs_path *path; | 
|  | 380 |  | 
|  | 381 | path = btrfs_alloc_path(); | 
|  | 382 | if (!path) | 
|  | 383 | return NULL; | 
|  | 384 | path->search_commit_root = 1; | 
|  | 385 | path->skip_locking = 1; | 
|  | 386 | return path; | 
|  | 387 | } | 
|  | 388 |  | 
| Anand Jain | 1bcea35 | 2012-09-14 00:04:21 -0600 | [diff] [blame] | 389 | int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 390 | { | 
|  | 391 | int ret; | 
|  | 392 | mm_segment_t old_fs; | 
|  | 393 | u32 pos = 0; | 
|  | 394 |  | 
|  | 395 | old_fs = get_fs(); | 
|  | 396 | set_fs(KERNEL_DS); | 
|  | 397 |  | 
|  | 398 | while (pos < len) { | 
| Anand Jain | 1bcea35 | 2012-09-14 00:04:21 -0600 | [diff] [blame] | 399 | ret = vfs_write(filp, (char *)buf + pos, len - pos, off); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 400 | /* TODO handle that correctly */ | 
|  | 401 | /*if (ret == -ERESTARTSYS) { | 
|  | 402 | continue; | 
|  | 403 | }*/ | 
|  | 404 | if (ret < 0) | 
|  | 405 | goto out; | 
|  | 406 | if (ret == 0) { | 
|  | 407 | ret = -EIO; | 
|  | 408 | goto out; | 
|  | 409 | } | 
|  | 410 | pos += ret; | 
|  | 411 | } | 
|  | 412 |  | 
|  | 413 | ret = 0; | 
|  | 414 |  | 
|  | 415 | out: | 
|  | 416 | set_fs(old_fs); | 
|  | 417 | return ret; | 
|  | 418 | } | 
|  | 419 |  | 
|  | 420 | static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) | 
|  | 421 | { | 
|  | 422 | struct btrfs_tlv_header *hdr; | 
|  | 423 | int total_len = sizeof(*hdr) + len; | 
|  | 424 | int left = sctx->send_max_size - sctx->send_size; | 
|  | 425 |  | 
|  | 426 | if (unlikely(left < total_len)) | 
|  | 427 | return -EOVERFLOW; | 
|  | 428 |  | 
|  | 429 | hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); | 
|  | 430 | hdr->tlv_type = cpu_to_le16(attr); | 
|  | 431 | hdr->tlv_len = cpu_to_le16(len); | 
|  | 432 | memcpy(hdr + 1, data, len); | 
|  | 433 | sctx->send_size += total_len; | 
|  | 434 |  | 
|  | 435 | return 0; | 
|  | 436 | } | 
|  | 437 |  | 
|  | 438 | #if 0 | 
|  | 439 | static int tlv_put_u8(struct send_ctx *sctx, u16 attr, u8 value) | 
|  | 440 | { | 
|  | 441 | return tlv_put(sctx, attr, &value, sizeof(value)); | 
|  | 442 | } | 
|  | 443 |  | 
|  | 444 | static int tlv_put_u16(struct send_ctx *sctx, u16 attr, u16 value) | 
|  | 445 | { | 
|  | 446 | __le16 tmp = cpu_to_le16(value); | 
|  | 447 | return tlv_put(sctx, attr, &tmp, sizeof(tmp)); | 
|  | 448 | } | 
|  | 449 |  | 
|  | 450 | static int tlv_put_u32(struct send_ctx *sctx, u16 attr, u32 value) | 
|  | 451 | { | 
|  | 452 | __le32 tmp = cpu_to_le32(value); | 
|  | 453 | return tlv_put(sctx, attr, &tmp, sizeof(tmp)); | 
|  | 454 | } | 
|  | 455 | #endif | 
|  | 456 |  | 
|  | 457 | static int tlv_put_u64(struct send_ctx *sctx, u16 attr, u64 value) | 
|  | 458 | { | 
|  | 459 | __le64 tmp = cpu_to_le64(value); | 
|  | 460 | return tlv_put(sctx, attr, &tmp, sizeof(tmp)); | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 | static int tlv_put_string(struct send_ctx *sctx, u16 attr, | 
|  | 464 | const char *str, int len) | 
|  | 465 | { | 
|  | 466 | if (len == -1) | 
|  | 467 | len = strlen(str); | 
|  | 468 | return tlv_put(sctx, attr, str, len); | 
|  | 469 | } | 
|  | 470 |  | 
|  | 471 | static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, | 
|  | 472 | const u8 *uuid) | 
|  | 473 | { | 
|  | 474 | return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); | 
|  | 475 | } | 
|  | 476 |  | 
|  | 477 | #if 0 | 
|  | 478 | static int tlv_put_timespec(struct send_ctx *sctx, u16 attr, | 
|  | 479 | struct timespec *ts) | 
|  | 480 | { | 
|  | 481 | struct btrfs_timespec bts; | 
|  | 482 | bts.sec = cpu_to_le64(ts->tv_sec); | 
|  | 483 | bts.nsec = cpu_to_le32(ts->tv_nsec); | 
|  | 484 | return tlv_put(sctx, attr, &bts, sizeof(bts)); | 
|  | 485 | } | 
|  | 486 | #endif | 
|  | 487 |  | 
|  | 488 | static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, | 
|  | 489 | struct extent_buffer *eb, | 
|  | 490 | struct btrfs_timespec *ts) | 
|  | 491 | { | 
|  | 492 | struct btrfs_timespec bts; | 
|  | 493 | read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); | 
|  | 494 | return tlv_put(sctx, attr, &bts, sizeof(bts)); | 
|  | 495 | } | 
|  | 496 |  | 
|  | 497 |  | 
|  | 498 | #define TLV_PUT(sctx, attrtype, attrlen, data) \ | 
|  | 499 | do { \ | 
|  | 500 | ret = tlv_put(sctx, attrtype, attrlen, data); \ | 
|  | 501 | if (ret < 0) \ | 
|  | 502 | goto tlv_put_failure; \ | 
|  | 503 | } while (0) | 
|  | 504 |  | 
|  | 505 | #define TLV_PUT_INT(sctx, attrtype, bits, value) \ | 
|  | 506 | do { \ | 
|  | 507 | ret = tlv_put_u##bits(sctx, attrtype, value); \ | 
|  | 508 | if (ret < 0) \ | 
|  | 509 | goto tlv_put_failure; \ | 
|  | 510 | } while (0) | 
|  | 511 |  | 
|  | 512 | #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data) | 
|  | 513 | #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data) | 
|  | 514 | #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data) | 
|  | 515 | #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data) | 
|  | 516 | #define TLV_PUT_STRING(sctx, attrtype, str, len) \ | 
|  | 517 | do { \ | 
|  | 518 | ret = tlv_put_string(sctx, attrtype, str, len); \ | 
|  | 519 | if (ret < 0) \ | 
|  | 520 | goto tlv_put_failure; \ | 
|  | 521 | } while (0) | 
|  | 522 | #define TLV_PUT_PATH(sctx, attrtype, p) \ | 
|  | 523 | do { \ | 
|  | 524 | ret = tlv_put_string(sctx, attrtype, p->start, \ | 
|  | 525 | p->end - p->start); \ | 
|  | 526 | if (ret < 0) \ | 
|  | 527 | goto tlv_put_failure; \ | 
|  | 528 | } while(0) | 
|  | 529 | #define TLV_PUT_UUID(sctx, attrtype, uuid) \ | 
|  | 530 | do { \ | 
|  | 531 | ret = tlv_put_uuid(sctx, attrtype, uuid); \ | 
|  | 532 | if (ret < 0) \ | 
|  | 533 | goto tlv_put_failure; \ | 
|  | 534 | } while (0) | 
|  | 535 | #define TLV_PUT_TIMESPEC(sctx, attrtype, ts) \ | 
|  | 536 | do { \ | 
|  | 537 | ret = tlv_put_timespec(sctx, attrtype, ts); \ | 
|  | 538 | if (ret < 0) \ | 
|  | 539 | goto tlv_put_failure; \ | 
|  | 540 | } while (0) | 
|  | 541 | #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ | 
|  | 542 | do { \ | 
|  | 543 | ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ | 
|  | 544 | if (ret < 0) \ | 
|  | 545 | goto tlv_put_failure; \ | 
|  | 546 | } while (0) | 
|  | 547 |  | 
|  | 548 | static int send_header(struct send_ctx *sctx) | 
|  | 549 | { | 
|  | 550 | struct btrfs_stream_header hdr; | 
|  | 551 |  | 
|  | 552 | strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); | 
|  | 553 | hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION); | 
|  | 554 |  | 
| Anand Jain | 1bcea35 | 2012-09-14 00:04:21 -0600 | [diff] [blame] | 555 | return write_buf(sctx->send_filp, &hdr, sizeof(hdr), | 
|  | 556 | &sctx->send_off); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 557 | } | 
|  | 558 |  | 
|  | 559 | /* | 
|  | 560 | * For each command/item we want to send to userspace, we call this function. | 
|  | 561 | */ | 
|  | 562 | static int begin_cmd(struct send_ctx *sctx, int cmd) | 
|  | 563 | { | 
|  | 564 | struct btrfs_cmd_header *hdr; | 
|  | 565 |  | 
|  | 566 | if (!sctx->send_buf) { | 
|  | 567 | WARN_ON(1); | 
|  | 568 | return -EINVAL; | 
|  | 569 | } | 
|  | 570 |  | 
|  | 571 | BUG_ON(sctx->send_size); | 
|  | 572 |  | 
|  | 573 | sctx->send_size += sizeof(*hdr); | 
|  | 574 | hdr = (struct btrfs_cmd_header *)sctx->send_buf; | 
|  | 575 | hdr->cmd = cpu_to_le16(cmd); | 
|  | 576 |  | 
|  | 577 | return 0; | 
|  | 578 | } | 
|  | 579 |  | 
|  | 580 | static int send_cmd(struct send_ctx *sctx) | 
|  | 581 | { | 
|  | 582 | int ret; | 
|  | 583 | struct btrfs_cmd_header *hdr; | 
|  | 584 | u32 crc; | 
|  | 585 |  | 
|  | 586 | hdr = (struct btrfs_cmd_header *)sctx->send_buf; | 
|  | 587 | hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr)); | 
|  | 588 | hdr->crc = 0; | 
|  | 589 |  | 
|  | 590 | crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); | 
|  | 591 | hdr->crc = cpu_to_le32(crc); | 
|  | 592 |  | 
| Anand Jain | 1bcea35 | 2012-09-14 00:04:21 -0600 | [diff] [blame] | 593 | ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, | 
|  | 594 | &sctx->send_off); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 595 |  | 
|  | 596 | sctx->total_send_size += sctx->send_size; | 
|  | 597 | sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size; | 
|  | 598 | sctx->send_size = 0; | 
|  | 599 |  | 
|  | 600 | return ret; | 
|  | 601 | } | 
|  | 602 |  | 
|  | 603 | /* | 
|  | 604 | * Sends a move instruction to user space | 
|  | 605 | */ | 
|  | 606 | static int send_rename(struct send_ctx *sctx, | 
|  | 607 | struct fs_path *from, struct fs_path *to) | 
|  | 608 | { | 
|  | 609 | int ret; | 
|  | 610 |  | 
|  | 611 | verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start); | 
|  | 612 |  | 
|  | 613 | ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); | 
|  | 614 | if (ret < 0) | 
|  | 615 | goto out; | 
|  | 616 |  | 
|  | 617 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); | 
|  | 618 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); | 
|  | 619 |  | 
|  | 620 | ret = send_cmd(sctx); | 
|  | 621 |  | 
|  | 622 | tlv_put_failure: | 
|  | 623 | out: | 
|  | 624 | return ret; | 
|  | 625 | } | 
|  | 626 |  | 
|  | 627 | /* | 
|  | 628 | * Sends a link instruction to user space | 
|  | 629 | */ | 
|  | 630 | static int send_link(struct send_ctx *sctx, | 
|  | 631 | struct fs_path *path, struct fs_path *lnk) | 
|  | 632 | { | 
|  | 633 | int ret; | 
|  | 634 |  | 
|  | 635 | verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start); | 
|  | 636 |  | 
|  | 637 | ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); | 
|  | 638 | if (ret < 0) | 
|  | 639 | goto out; | 
|  | 640 |  | 
|  | 641 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | 
|  | 642 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); | 
|  | 643 |  | 
|  | 644 | ret = send_cmd(sctx); | 
|  | 645 |  | 
|  | 646 | tlv_put_failure: | 
|  | 647 | out: | 
|  | 648 | return ret; | 
|  | 649 | } | 
|  | 650 |  | 
|  | 651 | /* | 
|  | 652 | * Sends an unlink instruction to user space | 
|  | 653 | */ | 
|  | 654 | static int send_unlink(struct send_ctx *sctx, struct fs_path *path) | 
|  | 655 | { | 
|  | 656 | int ret; | 
|  | 657 |  | 
|  | 658 | verbose_printk("btrfs: send_unlink %s\n", path->start); | 
|  | 659 |  | 
|  | 660 | ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); | 
|  | 661 | if (ret < 0) | 
|  | 662 | goto out; | 
|  | 663 |  | 
|  | 664 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | 
|  | 665 |  | 
|  | 666 | ret = send_cmd(sctx); | 
|  | 667 |  | 
|  | 668 | tlv_put_failure: | 
|  | 669 | out: | 
|  | 670 | return ret; | 
|  | 671 | } | 
|  | 672 |  | 
|  | 673 | /* | 
|  | 674 | * Sends a rmdir instruction to user space | 
|  | 675 | */ | 
|  | 676 | static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) | 
|  | 677 | { | 
|  | 678 | int ret; | 
|  | 679 |  | 
|  | 680 | verbose_printk("btrfs: send_rmdir %s\n", path->start); | 
|  | 681 |  | 
|  | 682 | ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); | 
|  | 683 | if (ret < 0) | 
|  | 684 | goto out; | 
|  | 685 |  | 
|  | 686 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | 
|  | 687 |  | 
|  | 688 | ret = send_cmd(sctx); | 
|  | 689 |  | 
|  | 690 | tlv_put_failure: | 
|  | 691 | out: | 
|  | 692 | return ret; | 
|  | 693 | } | 
|  | 694 |  | 
|  | 695 | /* | 
|  | 696 | * Helper function to retrieve some fields from an inode item. | 
|  | 697 | */ | 
|  | 698 | static int get_inode_info(struct btrfs_root *root, | 
|  | 699 | u64 ino, u64 *size, u64 *gen, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 700 | u64 *mode, u64 *uid, u64 *gid, | 
|  | 701 | u64 *rdev) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 702 | { | 
|  | 703 | int ret; | 
|  | 704 | struct btrfs_inode_item *ii; | 
|  | 705 | struct btrfs_key key; | 
|  | 706 | struct btrfs_path *path; | 
|  | 707 |  | 
|  | 708 | path = alloc_path_for_send(); | 
|  | 709 | if (!path) | 
|  | 710 | return -ENOMEM; | 
|  | 711 |  | 
|  | 712 | key.objectid = ino; | 
|  | 713 | key.type = BTRFS_INODE_ITEM_KEY; | 
|  | 714 | key.offset = 0; | 
|  | 715 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 
|  | 716 | if (ret < 0) | 
|  | 717 | goto out; | 
|  | 718 | if (ret) { | 
|  | 719 | ret = -ENOENT; | 
|  | 720 | goto out; | 
|  | 721 | } | 
|  | 722 |  | 
|  | 723 | ii = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 724 | struct btrfs_inode_item); | 
|  | 725 | if (size) | 
|  | 726 | *size = btrfs_inode_size(path->nodes[0], ii); | 
|  | 727 | if (gen) | 
|  | 728 | *gen = btrfs_inode_generation(path->nodes[0], ii); | 
|  | 729 | if (mode) | 
|  | 730 | *mode = btrfs_inode_mode(path->nodes[0], ii); | 
|  | 731 | if (uid) | 
|  | 732 | *uid = btrfs_inode_uid(path->nodes[0], ii); | 
|  | 733 | if (gid) | 
|  | 734 | *gid = btrfs_inode_gid(path->nodes[0], ii); | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 735 | if (rdev) | 
|  | 736 | *rdev = btrfs_inode_rdev(path->nodes[0], ii); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 737 |  | 
|  | 738 | out: | 
|  | 739 | btrfs_free_path(path); | 
|  | 740 | return ret; | 
|  | 741 | } | 
|  | 742 |  | 
|  | 743 | typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index, | 
|  | 744 | struct fs_path *p, | 
|  | 745 | void *ctx); | 
|  | 746 |  | 
|  | 747 | /* | 
|  | 748 | * Helper function to iterate the entries in ONE btrfs_inode_ref. | 
|  | 749 | * The iterate callback may return a non zero value to stop iteration. This can | 
|  | 750 | * be a negative value for error codes or 1 to simply stop it. | 
|  | 751 | * | 
|  | 752 | * path must point to the INODE_REF when called. | 
|  | 753 | */ | 
|  | 754 | static int iterate_inode_ref(struct send_ctx *sctx, | 
|  | 755 | struct btrfs_root *root, struct btrfs_path *path, | 
|  | 756 | struct btrfs_key *found_key, int resolve, | 
|  | 757 | iterate_inode_ref_t iterate, void *ctx) | 
|  | 758 | { | 
|  | 759 | struct extent_buffer *eb; | 
|  | 760 | struct btrfs_item *item; | 
|  | 761 | struct btrfs_inode_ref *iref; | 
|  | 762 | struct btrfs_path *tmp_path; | 
|  | 763 | struct fs_path *p; | 
|  | 764 | u32 cur; | 
|  | 765 | u32 len; | 
|  | 766 | u32 total; | 
|  | 767 | int slot; | 
|  | 768 | u32 name_len; | 
|  | 769 | char *start; | 
|  | 770 | int ret = 0; | 
|  | 771 | int num; | 
|  | 772 | int index; | 
|  | 773 |  | 
|  | 774 | p = fs_path_alloc_reversed(sctx); | 
|  | 775 | if (!p) | 
|  | 776 | return -ENOMEM; | 
|  | 777 |  | 
|  | 778 | tmp_path = alloc_path_for_send(); | 
|  | 779 | if (!tmp_path) { | 
|  | 780 | fs_path_free(sctx, p); | 
|  | 781 | return -ENOMEM; | 
|  | 782 | } | 
|  | 783 |  | 
|  | 784 | eb = path->nodes[0]; | 
|  | 785 | slot = path->slots[0]; | 
|  | 786 | item = btrfs_item_nr(eb, slot); | 
|  | 787 | iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); | 
|  | 788 | cur = 0; | 
|  | 789 | len = 0; | 
|  | 790 | total = btrfs_item_size(eb, item); | 
|  | 791 |  | 
|  | 792 | num = 0; | 
|  | 793 | while (cur < total) { | 
|  | 794 | fs_path_reset(p); | 
|  | 795 |  | 
|  | 796 | name_len = btrfs_inode_ref_name_len(eb, iref); | 
|  | 797 | index = btrfs_inode_ref_index(eb, iref); | 
|  | 798 | if (resolve) { | 
|  | 799 | start = btrfs_iref_to_path(root, tmp_path, iref, eb, | 
|  | 800 | found_key->offset, p->buf, | 
|  | 801 | p->buf_len); | 
|  | 802 | if (IS_ERR(start)) { | 
|  | 803 | ret = PTR_ERR(start); | 
|  | 804 | goto out; | 
|  | 805 | } | 
|  | 806 | if (start < p->buf) { | 
|  | 807 | /* overflow , try again with larger buffer */ | 
|  | 808 | ret = fs_path_ensure_buf(p, | 
|  | 809 | p->buf_len + p->buf - start); | 
|  | 810 | if (ret < 0) | 
|  | 811 | goto out; | 
|  | 812 | start = btrfs_iref_to_path(root, tmp_path, iref, | 
|  | 813 | eb, found_key->offset, p->buf, | 
|  | 814 | p->buf_len); | 
|  | 815 | if (IS_ERR(start)) { | 
|  | 816 | ret = PTR_ERR(start); | 
|  | 817 | goto out; | 
|  | 818 | } | 
|  | 819 | BUG_ON(start < p->buf); | 
|  | 820 | } | 
|  | 821 | p->start = start; | 
|  | 822 | } else { | 
|  | 823 | ret = fs_path_add_from_extent_buffer(p, eb, | 
|  | 824 | (unsigned long)(iref + 1), name_len); | 
|  | 825 | if (ret < 0) | 
|  | 826 | goto out; | 
|  | 827 | } | 
|  | 828 |  | 
|  | 829 |  | 
|  | 830 | len = sizeof(*iref) + name_len; | 
|  | 831 | iref = (struct btrfs_inode_ref *)((char *)iref + len); | 
|  | 832 | cur += len; | 
|  | 833 |  | 
|  | 834 | ret = iterate(num, found_key->offset, index, p, ctx); | 
|  | 835 | if (ret) | 
|  | 836 | goto out; | 
|  | 837 |  | 
|  | 838 | num++; | 
|  | 839 | } | 
|  | 840 |  | 
|  | 841 | out: | 
|  | 842 | btrfs_free_path(tmp_path); | 
|  | 843 | fs_path_free(sctx, p); | 
|  | 844 | return ret; | 
|  | 845 | } | 
|  | 846 |  | 
|  | 847 | typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key, | 
|  | 848 | const char *name, int name_len, | 
|  | 849 | const char *data, int data_len, | 
|  | 850 | u8 type, void *ctx); | 
|  | 851 |  | 
|  | 852 | /* | 
|  | 853 | * Helper function to iterate the entries in ONE btrfs_dir_item. | 
|  | 854 | * The iterate callback may return a non zero value to stop iteration. This can | 
|  | 855 | * be a negative value for error codes or 1 to simply stop it. | 
|  | 856 | * | 
|  | 857 | * path must point to the dir item when called. | 
|  | 858 | */ | 
|  | 859 | static int iterate_dir_item(struct send_ctx *sctx, | 
|  | 860 | struct btrfs_root *root, struct btrfs_path *path, | 
|  | 861 | struct btrfs_key *found_key, | 
|  | 862 | iterate_dir_item_t iterate, void *ctx) | 
|  | 863 | { | 
|  | 864 | int ret = 0; | 
|  | 865 | struct extent_buffer *eb; | 
|  | 866 | struct btrfs_item *item; | 
|  | 867 | struct btrfs_dir_item *di; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 868 | struct btrfs_key di_key; | 
|  | 869 | char *buf = NULL; | 
|  | 870 | char *buf2 = NULL; | 
|  | 871 | int buf_len; | 
|  | 872 | int buf_virtual = 0; | 
|  | 873 | u32 name_len; | 
|  | 874 | u32 data_len; | 
|  | 875 | u32 cur; | 
|  | 876 | u32 len; | 
|  | 877 | u32 total; | 
|  | 878 | int slot; | 
|  | 879 | int num; | 
|  | 880 | u8 type; | 
|  | 881 |  | 
|  | 882 | buf_len = PAGE_SIZE; | 
|  | 883 | buf = kmalloc(buf_len, GFP_NOFS); | 
|  | 884 | if (!buf) { | 
|  | 885 | ret = -ENOMEM; | 
|  | 886 | goto out; | 
|  | 887 | } | 
|  | 888 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 889 | eb = path->nodes[0]; | 
|  | 890 | slot = path->slots[0]; | 
|  | 891 | item = btrfs_item_nr(eb, slot); | 
|  | 892 | di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); | 
|  | 893 | cur = 0; | 
|  | 894 | len = 0; | 
|  | 895 | total = btrfs_item_size(eb, item); | 
|  | 896 |  | 
|  | 897 | num = 0; | 
|  | 898 | while (cur < total) { | 
|  | 899 | name_len = btrfs_dir_name_len(eb, di); | 
|  | 900 | data_len = btrfs_dir_data_len(eb, di); | 
|  | 901 | type = btrfs_dir_type(eb, di); | 
|  | 902 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); | 
|  | 903 |  | 
|  | 904 | if (name_len + data_len > buf_len) { | 
|  | 905 | buf_len = PAGE_ALIGN(name_len + data_len); | 
|  | 906 | if (buf_virtual) { | 
|  | 907 | buf2 = vmalloc(buf_len); | 
|  | 908 | if (!buf2) { | 
|  | 909 | ret = -ENOMEM; | 
|  | 910 | goto out; | 
|  | 911 | } | 
|  | 912 | vfree(buf); | 
|  | 913 | } else { | 
|  | 914 | buf2 = krealloc(buf, buf_len, GFP_NOFS); | 
|  | 915 | if (!buf2) { | 
|  | 916 | buf2 = vmalloc(buf_len); | 
|  | 917 | if (!buf2) { | 
|  | 918 | ret = -ENOMEM; | 
|  | 919 | goto out; | 
|  | 920 | } | 
|  | 921 | kfree(buf); | 
|  | 922 | buf_virtual = 1; | 
|  | 923 | } | 
|  | 924 | } | 
|  | 925 |  | 
|  | 926 | buf = buf2; | 
|  | 927 | buf2 = NULL; | 
|  | 928 | } | 
|  | 929 |  | 
|  | 930 | read_extent_buffer(eb, buf, (unsigned long)(di + 1), | 
|  | 931 | name_len + data_len); | 
|  | 932 |  | 
|  | 933 | len = sizeof(*di) + name_len + data_len; | 
|  | 934 | di = (struct btrfs_dir_item *)((char *)di + len); | 
|  | 935 | cur += len; | 
|  | 936 |  | 
|  | 937 | ret = iterate(num, &di_key, buf, name_len, buf + name_len, | 
|  | 938 | data_len, type, ctx); | 
|  | 939 | if (ret < 0) | 
|  | 940 | goto out; | 
|  | 941 | if (ret) { | 
|  | 942 | ret = 0; | 
|  | 943 | goto out; | 
|  | 944 | } | 
|  | 945 |  | 
|  | 946 | num++; | 
|  | 947 | } | 
|  | 948 |  | 
|  | 949 | out: | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 950 | if (buf_virtual) | 
|  | 951 | vfree(buf); | 
|  | 952 | else | 
|  | 953 | kfree(buf); | 
|  | 954 | return ret; | 
|  | 955 | } | 
|  | 956 |  | 
|  | 957 | static int __copy_first_ref(int num, u64 dir, int index, | 
|  | 958 | struct fs_path *p, void *ctx) | 
|  | 959 | { | 
|  | 960 | int ret; | 
|  | 961 | struct fs_path *pt = ctx; | 
|  | 962 |  | 
|  | 963 | ret = fs_path_copy(pt, p); | 
|  | 964 | if (ret < 0) | 
|  | 965 | return ret; | 
|  | 966 |  | 
|  | 967 | /* we want the first only */ | 
|  | 968 | return 1; | 
|  | 969 | } | 
|  | 970 |  | 
|  | 971 | /* | 
|  | 972 | * Retrieve the first path of an inode. If an inode has more then one | 
|  | 973 | * ref/hardlink, this is ignored. | 
|  | 974 | */ | 
|  | 975 | static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root, | 
|  | 976 | u64 ino, struct fs_path *path) | 
|  | 977 | { | 
|  | 978 | int ret; | 
|  | 979 | struct btrfs_key key, found_key; | 
|  | 980 | struct btrfs_path *p; | 
|  | 981 |  | 
|  | 982 | p = alloc_path_for_send(); | 
|  | 983 | if (!p) | 
|  | 984 | return -ENOMEM; | 
|  | 985 |  | 
|  | 986 | fs_path_reset(path); | 
|  | 987 |  | 
|  | 988 | key.objectid = ino; | 
|  | 989 | key.type = BTRFS_INODE_REF_KEY; | 
|  | 990 | key.offset = 0; | 
|  | 991 |  | 
|  | 992 | ret = btrfs_search_slot_for_read(root, &key, p, 1, 0); | 
|  | 993 | if (ret < 0) | 
|  | 994 | goto out; | 
|  | 995 | if (ret) { | 
|  | 996 | ret = 1; | 
|  | 997 | goto out; | 
|  | 998 | } | 
|  | 999 | btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); | 
|  | 1000 | if (found_key.objectid != ino || | 
|  | 1001 | found_key.type != BTRFS_INODE_REF_KEY) { | 
|  | 1002 | ret = -ENOENT; | 
|  | 1003 | goto out; | 
|  | 1004 | } | 
|  | 1005 |  | 
|  | 1006 | ret = iterate_inode_ref(sctx, root, p, &found_key, 1, | 
|  | 1007 | __copy_first_ref, path); | 
|  | 1008 | if (ret < 0) | 
|  | 1009 | goto out; | 
|  | 1010 | ret = 0; | 
|  | 1011 |  | 
|  | 1012 | out: | 
|  | 1013 | btrfs_free_path(p); | 
|  | 1014 | return ret; | 
|  | 1015 | } | 
|  | 1016 |  | 
|  | 1017 | struct backref_ctx { | 
|  | 1018 | struct send_ctx *sctx; | 
|  | 1019 |  | 
|  | 1020 | /* number of total found references */ | 
|  | 1021 | u64 found; | 
|  | 1022 |  | 
|  | 1023 | /* | 
|  | 1024 | * used for clones found in send_root. clones found behind cur_objectid | 
|  | 1025 | * and cur_offset are not considered as allowed clones. | 
|  | 1026 | */ | 
|  | 1027 | u64 cur_objectid; | 
|  | 1028 | u64 cur_offset; | 
|  | 1029 |  | 
|  | 1030 | /* may be truncated in case it's the last extent in a file */ | 
|  | 1031 | u64 extent_len; | 
|  | 1032 |  | 
|  | 1033 | /* Just to check for bugs in backref resolving */ | 
| Alexander Block | ee849c0 | 2012-07-28 12:42:05 +0200 | [diff] [blame] | 1034 | int found_itself; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1035 | }; | 
|  | 1036 |  | 
|  | 1037 | static int __clone_root_cmp_bsearch(const void *key, const void *elt) | 
|  | 1038 | { | 
| Jan Schmidt | 995e01b | 2012-08-13 02:52:38 -0600 | [diff] [blame] | 1039 | u64 root = (u64)(uintptr_t)key; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1040 | struct clone_root *cr = (struct clone_root *)elt; | 
|  | 1041 |  | 
|  | 1042 | if (root < cr->root->objectid) | 
|  | 1043 | return -1; | 
|  | 1044 | if (root > cr->root->objectid) | 
|  | 1045 | return 1; | 
|  | 1046 | return 0; | 
|  | 1047 | } | 
|  | 1048 |  | 
|  | 1049 | static int __clone_root_cmp_sort(const void *e1, const void *e2) | 
|  | 1050 | { | 
|  | 1051 | struct clone_root *cr1 = (struct clone_root *)e1; | 
|  | 1052 | struct clone_root *cr2 = (struct clone_root *)e2; | 
|  | 1053 |  | 
|  | 1054 | if (cr1->root->objectid < cr2->root->objectid) | 
|  | 1055 | return -1; | 
|  | 1056 | if (cr1->root->objectid > cr2->root->objectid) | 
|  | 1057 | return 1; | 
|  | 1058 | return 0; | 
|  | 1059 | } | 
|  | 1060 |  | 
|  | 1061 | /* | 
|  | 1062 | * Called for every backref that is found for the current extent. | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1063 | * Results are collected in sctx->clone_roots->ino/offset/found_refs | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1064 | */ | 
|  | 1065 | static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) | 
|  | 1066 | { | 
|  | 1067 | struct backref_ctx *bctx = ctx_; | 
|  | 1068 | struct clone_root *found; | 
|  | 1069 | int ret; | 
|  | 1070 | u64 i_size; | 
|  | 1071 |  | 
|  | 1072 | /* First check if the root is in the list of accepted clone sources */ | 
| Jan Schmidt | 995e01b | 2012-08-13 02:52:38 -0600 | [diff] [blame] | 1073 | found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots, | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1074 | bctx->sctx->clone_roots_cnt, | 
|  | 1075 | sizeof(struct clone_root), | 
|  | 1076 | __clone_root_cmp_bsearch); | 
|  | 1077 | if (!found) | 
|  | 1078 | return 0; | 
|  | 1079 |  | 
|  | 1080 | if (found->root == bctx->sctx->send_root && | 
|  | 1081 | ino == bctx->cur_objectid && | 
|  | 1082 | offset == bctx->cur_offset) { | 
| Alexander Block | ee849c0 | 2012-07-28 12:42:05 +0200 | [diff] [blame] | 1083 | bctx->found_itself = 1; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1084 | } | 
|  | 1085 |  | 
|  | 1086 | /* | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1087 | * There are inodes that have extents that lie behind its i_size. Don't | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1088 | * accept clones from these extents. | 
|  | 1089 | */ | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1090 | ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL, | 
|  | 1091 | NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1092 | if (ret < 0) | 
|  | 1093 | return ret; | 
|  | 1094 |  | 
|  | 1095 | if (offset + bctx->extent_len > i_size) | 
|  | 1096 | return 0; | 
|  | 1097 |  | 
|  | 1098 | /* | 
|  | 1099 | * Make sure we don't consider clones from send_root that are | 
|  | 1100 | * behind the current inode/offset. | 
|  | 1101 | */ | 
|  | 1102 | if (found->root == bctx->sctx->send_root) { | 
|  | 1103 | /* | 
|  | 1104 | * TODO for the moment we don't accept clones from the inode | 
|  | 1105 | * that is currently send. We may change this when | 
|  | 1106 | * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same | 
|  | 1107 | * file. | 
|  | 1108 | */ | 
|  | 1109 | if (ino >= bctx->cur_objectid) | 
|  | 1110 | return 0; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1111 | #if 0 | 
|  | 1112 | if (ino > bctx->cur_objectid) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1113 | return 0; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1114 | if (offset + bctx->extent_len > bctx->cur_offset) | 
|  | 1115 | return 0; | 
|  | 1116 | #endif | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1117 | } | 
|  | 1118 |  | 
|  | 1119 | bctx->found++; | 
|  | 1120 | found->found_refs++; | 
|  | 1121 | if (ino < found->ino) { | 
|  | 1122 | found->ino = ino; | 
|  | 1123 | found->offset = offset; | 
|  | 1124 | } else if (found->ino == ino) { | 
|  | 1125 | /* | 
|  | 1126 | * same extent found more then once in the same file. | 
|  | 1127 | */ | 
|  | 1128 | if (found->offset > offset + bctx->extent_len) | 
|  | 1129 | found->offset = offset; | 
|  | 1130 | } | 
|  | 1131 |  | 
|  | 1132 | return 0; | 
|  | 1133 | } | 
|  | 1134 |  | 
|  | 1135 | /* | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1136 | * Given an inode, offset and extent item, it finds a good clone for a clone | 
|  | 1137 | * instruction. Returns -ENOENT when none could be found. The function makes | 
|  | 1138 | * sure that the returned clone is usable at the point where sending is at the | 
|  | 1139 | * moment. This means, that no clones are accepted which lie behind the current | 
|  | 1140 | * inode+offset. | 
|  | 1141 | * | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1142 | * path must point to the extent item when called. | 
|  | 1143 | */ | 
|  | 1144 | static int find_extent_clone(struct send_ctx *sctx, | 
|  | 1145 | struct btrfs_path *path, | 
|  | 1146 | u64 ino, u64 data_offset, | 
|  | 1147 | u64 ino_size, | 
|  | 1148 | struct clone_root **found) | 
|  | 1149 | { | 
|  | 1150 | int ret; | 
|  | 1151 | int extent_type; | 
|  | 1152 | u64 logical; | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1153 | u64 disk_byte; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1154 | u64 num_bytes; | 
|  | 1155 | u64 extent_item_pos; | 
| Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 1156 | u64 flags = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1157 | struct btrfs_file_extent_item *fi; | 
|  | 1158 | struct extent_buffer *eb = path->nodes[0]; | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1159 | struct backref_ctx *backref_ctx = NULL; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1160 | struct clone_root *cur_clone_root; | 
|  | 1161 | struct btrfs_key found_key; | 
|  | 1162 | struct btrfs_path *tmp_path; | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1163 | int compressed; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1164 | u32 i; | 
|  | 1165 |  | 
|  | 1166 | tmp_path = alloc_path_for_send(); | 
|  | 1167 | if (!tmp_path) | 
|  | 1168 | return -ENOMEM; | 
|  | 1169 |  | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1170 | backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS); | 
|  | 1171 | if (!backref_ctx) { | 
|  | 1172 | ret = -ENOMEM; | 
|  | 1173 | goto out; | 
|  | 1174 | } | 
|  | 1175 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1176 | if (data_offset >= ino_size) { | 
|  | 1177 | /* | 
|  | 1178 | * There may be extents that lie behind the file's size. | 
|  | 1179 | * I at least had this in combination with snapshotting while | 
|  | 1180 | * writing large files. | 
|  | 1181 | */ | 
|  | 1182 | ret = 0; | 
|  | 1183 | goto out; | 
|  | 1184 | } | 
|  | 1185 |  | 
|  | 1186 | fi = btrfs_item_ptr(eb, path->slots[0], | 
|  | 1187 | struct btrfs_file_extent_item); | 
|  | 1188 | extent_type = btrfs_file_extent_type(eb, fi); | 
|  | 1189 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | 
|  | 1190 | ret = -ENOENT; | 
|  | 1191 | goto out; | 
|  | 1192 | } | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1193 | compressed = btrfs_file_extent_compression(eb, fi); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1194 |  | 
|  | 1195 | num_bytes = btrfs_file_extent_num_bytes(eb, fi); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1196 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); | 
|  | 1197 | if (disk_byte == 0) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1198 | ret = -ENOENT; | 
|  | 1199 | goto out; | 
|  | 1200 | } | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1201 | logical = disk_byte + btrfs_file_extent_offset(eb, fi); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1202 |  | 
| Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 1203 | ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path, | 
|  | 1204 | &found_key, &flags); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1205 | btrfs_release_path(tmp_path); | 
|  | 1206 |  | 
|  | 1207 | if (ret < 0) | 
|  | 1208 | goto out; | 
| Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 1209 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1210 | ret = -EIO; | 
|  | 1211 | goto out; | 
|  | 1212 | } | 
|  | 1213 |  | 
|  | 1214 | /* | 
|  | 1215 | * Setup the clone roots. | 
|  | 1216 | */ | 
|  | 1217 | for (i = 0; i < sctx->clone_roots_cnt; i++) { | 
|  | 1218 | cur_clone_root = sctx->clone_roots + i; | 
|  | 1219 | cur_clone_root->ino = (u64)-1; | 
|  | 1220 | cur_clone_root->offset = 0; | 
|  | 1221 | cur_clone_root->found_refs = 0; | 
|  | 1222 | } | 
|  | 1223 |  | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1224 | backref_ctx->sctx = sctx; | 
|  | 1225 | backref_ctx->found = 0; | 
|  | 1226 | backref_ctx->cur_objectid = ino; | 
|  | 1227 | backref_ctx->cur_offset = data_offset; | 
|  | 1228 | backref_ctx->found_itself = 0; | 
|  | 1229 | backref_ctx->extent_len = num_bytes; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1230 |  | 
|  | 1231 | /* | 
|  | 1232 | * The last extent of a file may be too large due to page alignment. | 
|  | 1233 | * We need to adjust extent_len in this case so that the checks in | 
|  | 1234 | * __iterate_backrefs work. | 
|  | 1235 | */ | 
|  | 1236 | if (data_offset + num_bytes >= ino_size) | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1237 | backref_ctx->extent_len = ino_size - data_offset; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1238 |  | 
|  | 1239 | /* | 
|  | 1240 | * Now collect all backrefs. | 
|  | 1241 | */ | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1242 | if (compressed == BTRFS_COMPRESS_NONE) | 
|  | 1243 | extent_item_pos = logical - found_key.objectid; | 
|  | 1244 | else | 
|  | 1245 | extent_item_pos = 0; | 
|  | 1246 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1247 | extent_item_pos = logical - found_key.objectid; | 
|  | 1248 | ret = iterate_extent_inodes(sctx->send_root->fs_info, | 
|  | 1249 | found_key.objectid, extent_item_pos, 1, | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1250 | __iterate_backrefs, backref_ctx); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1251 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1252 | if (ret < 0) | 
|  | 1253 | goto out; | 
|  | 1254 |  | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1255 | if (!backref_ctx->found_itself) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1256 | /* found a bug in backref code? */ | 
|  | 1257 | ret = -EIO; | 
|  | 1258 | printk(KERN_ERR "btrfs: ERROR did not find backref in " | 
|  | 1259 | "send_root. inode=%llu, offset=%llu, " | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1260 | "disk_byte=%llu found extent=%llu\n", | 
|  | 1261 | ino, data_offset, disk_byte, found_key.objectid); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1262 | goto out; | 
|  | 1263 | } | 
|  | 1264 |  | 
|  | 1265 | verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, " | 
|  | 1266 | "ino=%llu, " | 
|  | 1267 | "num_bytes=%llu, logical=%llu\n", | 
|  | 1268 | data_offset, ino, num_bytes, logical); | 
|  | 1269 |  | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1270 | if (!backref_ctx->found) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1271 | verbose_printk("btrfs:    no clones found\n"); | 
|  | 1272 |  | 
|  | 1273 | cur_clone_root = NULL; | 
|  | 1274 | for (i = 0; i < sctx->clone_roots_cnt; i++) { | 
|  | 1275 | if (sctx->clone_roots[i].found_refs) { | 
|  | 1276 | if (!cur_clone_root) | 
|  | 1277 | cur_clone_root = sctx->clone_roots + i; | 
|  | 1278 | else if (sctx->clone_roots[i].root == sctx->send_root) | 
|  | 1279 | /* prefer clones from send_root over others */ | 
|  | 1280 | cur_clone_root = sctx->clone_roots + i; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1281 | } | 
|  | 1282 |  | 
|  | 1283 | } | 
|  | 1284 |  | 
|  | 1285 | if (cur_clone_root) { | 
|  | 1286 | *found = cur_clone_root; | 
|  | 1287 | ret = 0; | 
|  | 1288 | } else { | 
|  | 1289 | ret = -ENOENT; | 
|  | 1290 | } | 
|  | 1291 |  | 
|  | 1292 | out: | 
|  | 1293 | btrfs_free_path(tmp_path); | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1294 | kfree(backref_ctx); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1295 | return ret; | 
|  | 1296 | } | 
|  | 1297 |  | 
|  | 1298 | static int read_symlink(struct send_ctx *sctx, | 
|  | 1299 | struct btrfs_root *root, | 
|  | 1300 | u64 ino, | 
|  | 1301 | struct fs_path *dest) | 
|  | 1302 | { | 
|  | 1303 | int ret; | 
|  | 1304 | struct btrfs_path *path; | 
|  | 1305 | struct btrfs_key key; | 
|  | 1306 | struct btrfs_file_extent_item *ei; | 
|  | 1307 | u8 type; | 
|  | 1308 | u8 compression; | 
|  | 1309 | unsigned long off; | 
|  | 1310 | int len; | 
|  | 1311 |  | 
|  | 1312 | path = alloc_path_for_send(); | 
|  | 1313 | if (!path) | 
|  | 1314 | return -ENOMEM; | 
|  | 1315 |  | 
|  | 1316 | key.objectid = ino; | 
|  | 1317 | key.type = BTRFS_EXTENT_DATA_KEY; | 
|  | 1318 | key.offset = 0; | 
|  | 1319 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 
|  | 1320 | if (ret < 0) | 
|  | 1321 | goto out; | 
|  | 1322 | BUG_ON(ret); | 
|  | 1323 |  | 
|  | 1324 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 1325 | struct btrfs_file_extent_item); | 
|  | 1326 | type = btrfs_file_extent_type(path->nodes[0], ei); | 
|  | 1327 | compression = btrfs_file_extent_compression(path->nodes[0], ei); | 
|  | 1328 | BUG_ON(type != BTRFS_FILE_EXTENT_INLINE); | 
|  | 1329 | BUG_ON(compression); | 
|  | 1330 |  | 
|  | 1331 | off = btrfs_file_extent_inline_start(ei); | 
|  | 1332 | len = btrfs_file_extent_inline_len(path->nodes[0], ei); | 
|  | 1333 |  | 
|  | 1334 | ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1335 |  | 
|  | 1336 | out: | 
|  | 1337 | btrfs_free_path(path); | 
|  | 1338 | return ret; | 
|  | 1339 | } | 
|  | 1340 |  | 
|  | 1341 | /* | 
|  | 1342 | * Helper function to generate a file name that is unique in the root of | 
|  | 1343 | * send_root and parent_root. This is used to generate names for orphan inodes. | 
|  | 1344 | */ | 
|  | 1345 | static int gen_unique_name(struct send_ctx *sctx, | 
|  | 1346 | u64 ino, u64 gen, | 
|  | 1347 | struct fs_path *dest) | 
|  | 1348 | { | 
|  | 1349 | int ret = 0; | 
|  | 1350 | struct btrfs_path *path; | 
|  | 1351 | struct btrfs_dir_item *di; | 
|  | 1352 | char tmp[64]; | 
|  | 1353 | int len; | 
|  | 1354 | u64 idx = 0; | 
|  | 1355 |  | 
|  | 1356 | path = alloc_path_for_send(); | 
|  | 1357 | if (!path) | 
|  | 1358 | return -ENOMEM; | 
|  | 1359 |  | 
|  | 1360 | while (1) { | 
|  | 1361 | len = snprintf(tmp, sizeof(tmp) - 1, "o%llu-%llu-%llu", | 
|  | 1362 | ino, gen, idx); | 
|  | 1363 | if (len >= sizeof(tmp)) { | 
|  | 1364 | /* should really not happen */ | 
|  | 1365 | ret = -EOVERFLOW; | 
|  | 1366 | goto out; | 
|  | 1367 | } | 
|  | 1368 |  | 
|  | 1369 | di = btrfs_lookup_dir_item(NULL, sctx->send_root, | 
|  | 1370 | path, BTRFS_FIRST_FREE_OBJECTID, | 
|  | 1371 | tmp, strlen(tmp), 0); | 
|  | 1372 | btrfs_release_path(path); | 
|  | 1373 | if (IS_ERR(di)) { | 
|  | 1374 | ret = PTR_ERR(di); | 
|  | 1375 | goto out; | 
|  | 1376 | } | 
|  | 1377 | if (di) { | 
|  | 1378 | /* not unique, try again */ | 
|  | 1379 | idx++; | 
|  | 1380 | continue; | 
|  | 1381 | } | 
|  | 1382 |  | 
|  | 1383 | if (!sctx->parent_root) { | 
|  | 1384 | /* unique */ | 
|  | 1385 | ret = 0; | 
|  | 1386 | break; | 
|  | 1387 | } | 
|  | 1388 |  | 
|  | 1389 | di = btrfs_lookup_dir_item(NULL, sctx->parent_root, | 
|  | 1390 | path, BTRFS_FIRST_FREE_OBJECTID, | 
|  | 1391 | tmp, strlen(tmp), 0); | 
|  | 1392 | btrfs_release_path(path); | 
|  | 1393 | if (IS_ERR(di)) { | 
|  | 1394 | ret = PTR_ERR(di); | 
|  | 1395 | goto out; | 
|  | 1396 | } | 
|  | 1397 | if (di) { | 
|  | 1398 | /* not unique, try again */ | 
|  | 1399 | idx++; | 
|  | 1400 | continue; | 
|  | 1401 | } | 
|  | 1402 | /* unique */ | 
|  | 1403 | break; | 
|  | 1404 | } | 
|  | 1405 |  | 
|  | 1406 | ret = fs_path_add(dest, tmp, strlen(tmp)); | 
|  | 1407 |  | 
|  | 1408 | out: | 
|  | 1409 | btrfs_free_path(path); | 
|  | 1410 | return ret; | 
|  | 1411 | } | 
|  | 1412 |  | 
|  | 1413 | enum inode_state { | 
|  | 1414 | inode_state_no_change, | 
|  | 1415 | inode_state_will_create, | 
|  | 1416 | inode_state_did_create, | 
|  | 1417 | inode_state_will_delete, | 
|  | 1418 | inode_state_did_delete, | 
|  | 1419 | }; | 
|  | 1420 |  | 
|  | 1421 | static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) | 
|  | 1422 | { | 
|  | 1423 | int ret; | 
|  | 1424 | int left_ret; | 
|  | 1425 | int right_ret; | 
|  | 1426 | u64 left_gen; | 
|  | 1427 | u64 right_gen; | 
|  | 1428 |  | 
|  | 1429 | ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1430 | NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1431 | if (ret < 0 && ret != -ENOENT) | 
|  | 1432 | goto out; | 
|  | 1433 | left_ret = ret; | 
|  | 1434 |  | 
|  | 1435 | if (!sctx->parent_root) { | 
|  | 1436 | right_ret = -ENOENT; | 
|  | 1437 | } else { | 
|  | 1438 | ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1439 | NULL, NULL, NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1440 | if (ret < 0 && ret != -ENOENT) | 
|  | 1441 | goto out; | 
|  | 1442 | right_ret = ret; | 
|  | 1443 | } | 
|  | 1444 |  | 
|  | 1445 | if (!left_ret && !right_ret) { | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1446 | if (left_gen == gen && right_gen == gen) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1447 | ret = inode_state_no_change; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1448 | } else if (left_gen == gen) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1449 | if (ino < sctx->send_progress) | 
|  | 1450 | ret = inode_state_did_create; | 
|  | 1451 | else | 
|  | 1452 | ret = inode_state_will_create; | 
|  | 1453 | } else if (right_gen == gen) { | 
|  | 1454 | if (ino < sctx->send_progress) | 
|  | 1455 | ret = inode_state_did_delete; | 
|  | 1456 | else | 
|  | 1457 | ret = inode_state_will_delete; | 
|  | 1458 | } else  { | 
|  | 1459 | ret = -ENOENT; | 
|  | 1460 | } | 
|  | 1461 | } else if (!left_ret) { | 
|  | 1462 | if (left_gen == gen) { | 
|  | 1463 | if (ino < sctx->send_progress) | 
|  | 1464 | ret = inode_state_did_create; | 
|  | 1465 | else | 
|  | 1466 | ret = inode_state_will_create; | 
|  | 1467 | } else { | 
|  | 1468 | ret = -ENOENT; | 
|  | 1469 | } | 
|  | 1470 | } else if (!right_ret) { | 
|  | 1471 | if (right_gen == gen) { | 
|  | 1472 | if (ino < sctx->send_progress) | 
|  | 1473 | ret = inode_state_did_delete; | 
|  | 1474 | else | 
|  | 1475 | ret = inode_state_will_delete; | 
|  | 1476 | } else { | 
|  | 1477 | ret = -ENOENT; | 
|  | 1478 | } | 
|  | 1479 | } else { | 
|  | 1480 | ret = -ENOENT; | 
|  | 1481 | } | 
|  | 1482 |  | 
|  | 1483 | out: | 
|  | 1484 | return ret; | 
|  | 1485 | } | 
|  | 1486 |  | 
|  | 1487 | static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) | 
|  | 1488 | { | 
|  | 1489 | int ret; | 
|  | 1490 |  | 
|  | 1491 | ret = get_cur_inode_state(sctx, ino, gen); | 
|  | 1492 | if (ret < 0) | 
|  | 1493 | goto out; | 
|  | 1494 |  | 
|  | 1495 | if (ret == inode_state_no_change || | 
|  | 1496 | ret == inode_state_did_create || | 
|  | 1497 | ret == inode_state_will_delete) | 
|  | 1498 | ret = 1; | 
|  | 1499 | else | 
|  | 1500 | ret = 0; | 
|  | 1501 |  | 
|  | 1502 | out: | 
|  | 1503 | return ret; | 
|  | 1504 | } | 
|  | 1505 |  | 
|  | 1506 | /* | 
|  | 1507 | * Helper function to lookup a dir item in a dir. | 
|  | 1508 | */ | 
|  | 1509 | static int lookup_dir_item_inode(struct btrfs_root *root, | 
|  | 1510 | u64 dir, const char *name, int name_len, | 
|  | 1511 | u64 *found_inode, | 
|  | 1512 | u8 *found_type) | 
|  | 1513 | { | 
|  | 1514 | int ret = 0; | 
|  | 1515 | struct btrfs_dir_item *di; | 
|  | 1516 | struct btrfs_key key; | 
|  | 1517 | struct btrfs_path *path; | 
|  | 1518 |  | 
|  | 1519 | path = alloc_path_for_send(); | 
|  | 1520 | if (!path) | 
|  | 1521 | return -ENOMEM; | 
|  | 1522 |  | 
|  | 1523 | di = btrfs_lookup_dir_item(NULL, root, path, | 
|  | 1524 | dir, name, name_len, 0); | 
|  | 1525 | if (!di) { | 
|  | 1526 | ret = -ENOENT; | 
|  | 1527 | goto out; | 
|  | 1528 | } | 
|  | 1529 | if (IS_ERR(di)) { | 
|  | 1530 | ret = PTR_ERR(di); | 
|  | 1531 | goto out; | 
|  | 1532 | } | 
|  | 1533 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); | 
|  | 1534 | *found_inode = key.objectid; | 
|  | 1535 | *found_type = btrfs_dir_type(path->nodes[0], di); | 
|  | 1536 |  | 
|  | 1537 | out: | 
|  | 1538 | btrfs_free_path(path); | 
|  | 1539 | return ret; | 
|  | 1540 | } | 
|  | 1541 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1542 | /* | 
|  | 1543 | * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir, | 
|  | 1544 | * generation of the parent dir and the name of the dir entry. | 
|  | 1545 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1546 | static int get_first_ref(struct send_ctx *sctx, | 
|  | 1547 | struct btrfs_root *root, u64 ino, | 
|  | 1548 | u64 *dir, u64 *dir_gen, struct fs_path *name) | 
|  | 1549 | { | 
|  | 1550 | int ret; | 
|  | 1551 | struct btrfs_key key; | 
|  | 1552 | struct btrfs_key found_key; | 
|  | 1553 | struct btrfs_path *path; | 
|  | 1554 | struct btrfs_inode_ref *iref; | 
|  | 1555 | int len; | 
|  | 1556 |  | 
|  | 1557 | path = alloc_path_for_send(); | 
|  | 1558 | if (!path) | 
|  | 1559 | return -ENOMEM; | 
|  | 1560 |  | 
|  | 1561 | key.objectid = ino; | 
|  | 1562 | key.type = BTRFS_INODE_REF_KEY; | 
|  | 1563 | key.offset = 0; | 
|  | 1564 |  | 
|  | 1565 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 
|  | 1566 | if (ret < 0) | 
|  | 1567 | goto out; | 
|  | 1568 | if (!ret) | 
|  | 1569 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | 
|  | 1570 | path->slots[0]); | 
|  | 1571 | if (ret || found_key.objectid != key.objectid || | 
|  | 1572 | found_key.type != key.type) { | 
|  | 1573 | ret = -ENOENT; | 
|  | 1574 | goto out; | 
|  | 1575 | } | 
|  | 1576 |  | 
|  | 1577 | iref = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 1578 | struct btrfs_inode_ref); | 
|  | 1579 | len = btrfs_inode_ref_name_len(path->nodes[0], iref); | 
|  | 1580 | ret = fs_path_add_from_extent_buffer(name, path->nodes[0], | 
|  | 1581 | (unsigned long)(iref + 1), len); | 
|  | 1582 | if (ret < 0) | 
|  | 1583 | goto out; | 
|  | 1584 | btrfs_release_path(path); | 
|  | 1585 |  | 
|  | 1586 | ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1587 | NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1588 | if (ret < 0) | 
|  | 1589 | goto out; | 
|  | 1590 |  | 
|  | 1591 | *dir = found_key.offset; | 
|  | 1592 |  | 
|  | 1593 | out: | 
|  | 1594 | btrfs_free_path(path); | 
|  | 1595 | return ret; | 
|  | 1596 | } | 
|  | 1597 |  | 
|  | 1598 | static int is_first_ref(struct send_ctx *sctx, | 
|  | 1599 | struct btrfs_root *root, | 
|  | 1600 | u64 ino, u64 dir, | 
|  | 1601 | const char *name, int name_len) | 
|  | 1602 | { | 
|  | 1603 | int ret; | 
|  | 1604 | struct fs_path *tmp_name; | 
|  | 1605 | u64 tmp_dir; | 
|  | 1606 | u64 tmp_dir_gen; | 
|  | 1607 |  | 
|  | 1608 | tmp_name = fs_path_alloc(sctx); | 
|  | 1609 | if (!tmp_name) | 
|  | 1610 | return -ENOMEM; | 
|  | 1611 |  | 
|  | 1612 | ret = get_first_ref(sctx, root, ino, &tmp_dir, &tmp_dir_gen, tmp_name); | 
|  | 1613 | if (ret < 0) | 
|  | 1614 | goto out; | 
|  | 1615 |  | 
| Alexander Block | b9291af | 2012-07-28 11:07:18 +0200 | [diff] [blame] | 1616 | if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1617 | ret = 0; | 
|  | 1618 | goto out; | 
|  | 1619 | } | 
|  | 1620 |  | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1621 | ret = !memcmp(tmp_name->start, name, name_len); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1622 |  | 
|  | 1623 | out: | 
|  | 1624 | fs_path_free(sctx, tmp_name); | 
|  | 1625 | return ret; | 
|  | 1626 | } | 
|  | 1627 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1628 | /* | 
|  | 1629 | * Used by process_recorded_refs to determine if a new ref would overwrite an | 
|  | 1630 | * already existing ref. In case it detects an overwrite, it returns the | 
|  | 1631 | * inode/gen in who_ino/who_gen. | 
|  | 1632 | * When an overwrite is detected, process_recorded_refs does proper orphanizing | 
|  | 1633 | * to make sure later references to the overwritten inode are possible. | 
|  | 1634 | * Orphanizing is however only required for the first ref of an inode. | 
|  | 1635 | * process_recorded_refs does an additional is_first_ref check to see if | 
|  | 1636 | * orphanizing is really required. | 
|  | 1637 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1638 | static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, | 
|  | 1639 | const char *name, int name_len, | 
|  | 1640 | u64 *who_ino, u64 *who_gen) | 
|  | 1641 | { | 
|  | 1642 | int ret = 0; | 
|  | 1643 | u64 other_inode = 0; | 
|  | 1644 | u8 other_type = 0; | 
|  | 1645 |  | 
|  | 1646 | if (!sctx->parent_root) | 
|  | 1647 | goto out; | 
|  | 1648 |  | 
|  | 1649 | ret = is_inode_existent(sctx, dir, dir_gen); | 
|  | 1650 | if (ret <= 0) | 
|  | 1651 | goto out; | 
|  | 1652 |  | 
|  | 1653 | ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, | 
|  | 1654 | &other_inode, &other_type); | 
|  | 1655 | if (ret < 0 && ret != -ENOENT) | 
|  | 1656 | goto out; | 
|  | 1657 | if (ret) { | 
|  | 1658 | ret = 0; | 
|  | 1659 | goto out; | 
|  | 1660 | } | 
|  | 1661 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1662 | /* | 
|  | 1663 | * Check if the overwritten ref was already processed. If yes, the ref | 
|  | 1664 | * was already unlinked/moved, so we can safely assume that we will not | 
|  | 1665 | * overwrite anything at this point in time. | 
|  | 1666 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1667 | if (other_inode > sctx->send_progress) { | 
|  | 1668 | ret = get_inode_info(sctx->parent_root, other_inode, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1669 | who_gen, NULL, NULL, NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1670 | if (ret < 0) | 
|  | 1671 | goto out; | 
|  | 1672 |  | 
|  | 1673 | ret = 1; | 
|  | 1674 | *who_ino = other_inode; | 
|  | 1675 | } else { | 
|  | 1676 | ret = 0; | 
|  | 1677 | } | 
|  | 1678 |  | 
|  | 1679 | out: | 
|  | 1680 | return ret; | 
|  | 1681 | } | 
|  | 1682 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1683 | /* | 
|  | 1684 | * Checks if the ref was overwritten by an already processed inode. This is | 
|  | 1685 | * used by __get_cur_name_and_parent to find out if the ref was orphanized and | 
|  | 1686 | * thus the orphan name needs be used. | 
|  | 1687 | * process_recorded_refs also uses it to avoid unlinking of refs that were | 
|  | 1688 | * overwritten. | 
|  | 1689 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1690 | static int did_overwrite_ref(struct send_ctx *sctx, | 
|  | 1691 | u64 dir, u64 dir_gen, | 
|  | 1692 | u64 ino, u64 ino_gen, | 
|  | 1693 | const char *name, int name_len) | 
|  | 1694 | { | 
|  | 1695 | int ret = 0; | 
|  | 1696 | u64 gen; | 
|  | 1697 | u64 ow_inode; | 
|  | 1698 | u8 other_type; | 
|  | 1699 |  | 
|  | 1700 | if (!sctx->parent_root) | 
|  | 1701 | goto out; | 
|  | 1702 |  | 
|  | 1703 | ret = is_inode_existent(sctx, dir, dir_gen); | 
|  | 1704 | if (ret <= 0) | 
|  | 1705 | goto out; | 
|  | 1706 |  | 
|  | 1707 | /* check if the ref was overwritten by another ref */ | 
|  | 1708 | ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, | 
|  | 1709 | &ow_inode, &other_type); | 
|  | 1710 | if (ret < 0 && ret != -ENOENT) | 
|  | 1711 | goto out; | 
|  | 1712 | if (ret) { | 
|  | 1713 | /* was never and will never be overwritten */ | 
|  | 1714 | ret = 0; | 
|  | 1715 | goto out; | 
|  | 1716 | } | 
|  | 1717 |  | 
|  | 1718 | ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1719 | NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1720 | if (ret < 0) | 
|  | 1721 | goto out; | 
|  | 1722 |  | 
|  | 1723 | if (ow_inode == ino && gen == ino_gen) { | 
|  | 1724 | ret = 0; | 
|  | 1725 | goto out; | 
|  | 1726 | } | 
|  | 1727 |  | 
|  | 1728 | /* we know that it is or will be overwritten. check this now */ | 
|  | 1729 | if (ow_inode < sctx->send_progress) | 
|  | 1730 | ret = 1; | 
|  | 1731 | else | 
|  | 1732 | ret = 0; | 
|  | 1733 |  | 
|  | 1734 | out: | 
|  | 1735 | return ret; | 
|  | 1736 | } | 
|  | 1737 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1738 | /* | 
|  | 1739 | * Same as did_overwrite_ref, but also checks if it is the first ref of an inode | 
|  | 1740 | * that got overwritten. This is used by process_recorded_refs to determine | 
|  | 1741 | * if it has to use the path as returned by get_cur_path or the orphan name. | 
|  | 1742 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1743 | static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) | 
|  | 1744 | { | 
|  | 1745 | int ret = 0; | 
|  | 1746 | struct fs_path *name = NULL; | 
|  | 1747 | u64 dir; | 
|  | 1748 | u64 dir_gen; | 
|  | 1749 |  | 
|  | 1750 | if (!sctx->parent_root) | 
|  | 1751 | goto out; | 
|  | 1752 |  | 
|  | 1753 | name = fs_path_alloc(sctx); | 
|  | 1754 | if (!name) | 
|  | 1755 | return -ENOMEM; | 
|  | 1756 |  | 
|  | 1757 | ret = get_first_ref(sctx, sctx->parent_root, ino, &dir, &dir_gen, name); | 
|  | 1758 | if (ret < 0) | 
|  | 1759 | goto out; | 
|  | 1760 |  | 
|  | 1761 | ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, | 
|  | 1762 | name->start, fs_path_len(name)); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1763 |  | 
|  | 1764 | out: | 
|  | 1765 | fs_path_free(sctx, name); | 
|  | 1766 | return ret; | 
|  | 1767 | } | 
|  | 1768 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1769 | /* | 
|  | 1770 | * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit, | 
|  | 1771 | * so we need to do some special handling in case we have clashes. This function | 
|  | 1772 | * takes care of this with the help of name_cache_entry::radix_list. | 
| Alexander Block | 5dc67d0 | 2012-08-01 12:07:43 +0200 | [diff] [blame] | 1773 | * In case of error, nce is kfreed. | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1774 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1775 | static int name_cache_insert(struct send_ctx *sctx, | 
|  | 1776 | struct name_cache_entry *nce) | 
|  | 1777 | { | 
|  | 1778 | int ret = 0; | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1779 | struct list_head *nce_head; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1780 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1781 | nce_head = radix_tree_lookup(&sctx->name_cache, | 
|  | 1782 | (unsigned long)nce->ino); | 
|  | 1783 | if (!nce_head) { | 
|  | 1784 | nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS); | 
|  | 1785 | if (!nce_head) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1786 | return -ENOMEM; | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1787 | INIT_LIST_HEAD(nce_head); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1788 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1789 | ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); | 
| Alexander Block | 5dc67d0 | 2012-08-01 12:07:43 +0200 | [diff] [blame] | 1790 | if (ret < 0) { | 
|  | 1791 | kfree(nce_head); | 
|  | 1792 | kfree(nce); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1793 | return ret; | 
| Alexander Block | 5dc67d0 | 2012-08-01 12:07:43 +0200 | [diff] [blame] | 1794 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1795 | } | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1796 | list_add_tail(&nce->radix_list, nce_head); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1797 | list_add_tail(&nce->list, &sctx->name_cache_list); | 
|  | 1798 | sctx->name_cache_size++; | 
|  | 1799 |  | 
|  | 1800 | return ret; | 
|  | 1801 | } | 
|  | 1802 |  | 
|  | 1803 | static void name_cache_delete(struct send_ctx *sctx, | 
|  | 1804 | struct name_cache_entry *nce) | 
|  | 1805 | { | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1806 | struct list_head *nce_head; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1807 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1808 | nce_head = radix_tree_lookup(&sctx->name_cache, | 
|  | 1809 | (unsigned long)nce->ino); | 
|  | 1810 | BUG_ON(!nce_head); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1811 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1812 | list_del(&nce->radix_list); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1813 | list_del(&nce->list); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1814 | sctx->name_cache_size--; | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1815 |  | 
|  | 1816 | if (list_empty(nce_head)) { | 
|  | 1817 | radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); | 
|  | 1818 | kfree(nce_head); | 
|  | 1819 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1820 | } | 
|  | 1821 |  | 
|  | 1822 | static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, | 
|  | 1823 | u64 ino, u64 gen) | 
|  | 1824 | { | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1825 | struct list_head *nce_head; | 
|  | 1826 | struct name_cache_entry *cur; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1827 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1828 | nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino); | 
|  | 1829 | if (!nce_head) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1830 | return NULL; | 
|  | 1831 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1832 | list_for_each_entry(cur, nce_head, radix_list) { | 
|  | 1833 | if (cur->ino == ino && cur->gen == gen) | 
|  | 1834 | return cur; | 
|  | 1835 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1836 | return NULL; | 
|  | 1837 | } | 
|  | 1838 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1839 | /* | 
|  | 1840 | * Removes the entry from the list and adds it back to the end. This marks the | 
|  | 1841 | * entry as recently used so that name_cache_clean_unused does not remove it. | 
|  | 1842 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1843 | static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce) | 
|  | 1844 | { | 
|  | 1845 | list_del(&nce->list); | 
|  | 1846 | list_add_tail(&nce->list, &sctx->name_cache_list); | 
|  | 1847 | } | 
|  | 1848 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1849 | /* | 
|  | 1850 | * Remove some entries from the beginning of name_cache_list. | 
|  | 1851 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1852 | static void name_cache_clean_unused(struct send_ctx *sctx) | 
|  | 1853 | { | 
|  | 1854 | struct name_cache_entry *nce; | 
|  | 1855 |  | 
|  | 1856 | if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE) | 
|  | 1857 | return; | 
|  | 1858 |  | 
|  | 1859 | while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) { | 
|  | 1860 | nce = list_entry(sctx->name_cache_list.next, | 
|  | 1861 | struct name_cache_entry, list); | 
|  | 1862 | name_cache_delete(sctx, nce); | 
|  | 1863 | kfree(nce); | 
|  | 1864 | } | 
|  | 1865 | } | 
|  | 1866 |  | 
|  | 1867 | static void name_cache_free(struct send_ctx *sctx) | 
|  | 1868 | { | 
|  | 1869 | struct name_cache_entry *nce; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1870 |  | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1871 | while (!list_empty(&sctx->name_cache_list)) { | 
|  | 1872 | nce = list_entry(sctx->name_cache_list.next, | 
|  | 1873 | struct name_cache_entry, list); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1874 | name_cache_delete(sctx, nce); | 
| Alexander Block | 17589bd | 2012-07-28 14:13:35 +0200 | [diff] [blame] | 1875 | kfree(nce); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1876 | } | 
|  | 1877 | } | 
|  | 1878 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1879 | /* | 
|  | 1880 | * Used by get_cur_path for each ref up to the root. | 
|  | 1881 | * Returns 0 if it succeeded. | 
|  | 1882 | * Returns 1 if the inode is not existent or got overwritten. In that case, the | 
|  | 1883 | * name is an orphan name. This instructs get_cur_path to stop iterating. If 1 | 
|  | 1884 | * is returned, parent_ino/parent_gen are not guaranteed to be valid. | 
|  | 1885 | * Returns <0 in case of error. | 
|  | 1886 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1887 | static int __get_cur_name_and_parent(struct send_ctx *sctx, | 
|  | 1888 | u64 ino, u64 gen, | 
|  | 1889 | u64 *parent_ino, | 
|  | 1890 | u64 *parent_gen, | 
|  | 1891 | struct fs_path *dest) | 
|  | 1892 | { | 
|  | 1893 | int ret; | 
|  | 1894 | int nce_ret; | 
|  | 1895 | struct btrfs_path *path = NULL; | 
|  | 1896 | struct name_cache_entry *nce = NULL; | 
|  | 1897 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1898 | /* | 
|  | 1899 | * First check if we already did a call to this function with the same | 
|  | 1900 | * ino/gen. If yes, check if the cache entry is still up-to-date. If yes | 
|  | 1901 | * return the cached result. | 
|  | 1902 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1903 | nce = name_cache_search(sctx, ino, gen); | 
|  | 1904 | if (nce) { | 
|  | 1905 | if (ino < sctx->send_progress && nce->need_later_update) { | 
|  | 1906 | name_cache_delete(sctx, nce); | 
|  | 1907 | kfree(nce); | 
|  | 1908 | nce = NULL; | 
|  | 1909 | } else { | 
|  | 1910 | name_cache_used(sctx, nce); | 
|  | 1911 | *parent_ino = nce->parent_ino; | 
|  | 1912 | *parent_gen = nce->parent_gen; | 
|  | 1913 | ret = fs_path_add(dest, nce->name, nce->name_len); | 
|  | 1914 | if (ret < 0) | 
|  | 1915 | goto out; | 
|  | 1916 | ret = nce->ret; | 
|  | 1917 | goto out; | 
|  | 1918 | } | 
|  | 1919 | } | 
|  | 1920 |  | 
|  | 1921 | path = alloc_path_for_send(); | 
|  | 1922 | if (!path) | 
|  | 1923 | return -ENOMEM; | 
|  | 1924 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1925 | /* | 
|  | 1926 | * If the inode is not existent yet, add the orphan name and return 1. | 
|  | 1927 | * This should only happen for the parent dir that we determine in | 
|  | 1928 | * __record_new_ref | 
|  | 1929 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1930 | ret = is_inode_existent(sctx, ino, gen); | 
|  | 1931 | if (ret < 0) | 
|  | 1932 | goto out; | 
|  | 1933 |  | 
|  | 1934 | if (!ret) { | 
|  | 1935 | ret = gen_unique_name(sctx, ino, gen, dest); | 
|  | 1936 | if (ret < 0) | 
|  | 1937 | goto out; | 
|  | 1938 | ret = 1; | 
|  | 1939 | goto out_cache; | 
|  | 1940 | } | 
|  | 1941 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1942 | /* | 
|  | 1943 | * Depending on whether the inode was already processed or not, use | 
|  | 1944 | * send_root or parent_root for ref lookup. | 
|  | 1945 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1946 | if (ino < sctx->send_progress) | 
|  | 1947 | ret = get_first_ref(sctx, sctx->send_root, ino, | 
|  | 1948 | parent_ino, parent_gen, dest); | 
|  | 1949 | else | 
|  | 1950 | ret = get_first_ref(sctx, sctx->parent_root, ino, | 
|  | 1951 | parent_ino, parent_gen, dest); | 
|  | 1952 | if (ret < 0) | 
|  | 1953 | goto out; | 
|  | 1954 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1955 | /* | 
|  | 1956 | * Check if the ref was overwritten by an inode's ref that was processed | 
|  | 1957 | * earlier. If yes, treat as orphan and return 1. | 
|  | 1958 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1959 | ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, | 
|  | 1960 | dest->start, dest->end - dest->start); | 
|  | 1961 | if (ret < 0) | 
|  | 1962 | goto out; | 
|  | 1963 | if (ret) { | 
|  | 1964 | fs_path_reset(dest); | 
|  | 1965 | ret = gen_unique_name(sctx, ino, gen, dest); | 
|  | 1966 | if (ret < 0) | 
|  | 1967 | goto out; | 
|  | 1968 | ret = 1; | 
|  | 1969 | } | 
|  | 1970 |  | 
|  | 1971 | out_cache: | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1972 | /* | 
|  | 1973 | * Store the result of the lookup in the name cache. | 
|  | 1974 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1975 | nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS); | 
|  | 1976 | if (!nce) { | 
|  | 1977 | ret = -ENOMEM; | 
|  | 1978 | goto out; | 
|  | 1979 | } | 
|  | 1980 |  | 
|  | 1981 | nce->ino = ino; | 
|  | 1982 | nce->gen = gen; | 
|  | 1983 | nce->parent_ino = *parent_ino; | 
|  | 1984 | nce->parent_gen = *parent_gen; | 
|  | 1985 | nce->name_len = fs_path_len(dest); | 
|  | 1986 | nce->ret = ret; | 
|  | 1987 | strcpy(nce->name, dest->start); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1988 |  | 
|  | 1989 | if (ino < sctx->send_progress) | 
|  | 1990 | nce->need_later_update = 0; | 
|  | 1991 | else | 
|  | 1992 | nce->need_later_update = 1; | 
|  | 1993 |  | 
|  | 1994 | nce_ret = name_cache_insert(sctx, nce); | 
|  | 1995 | if (nce_ret < 0) | 
|  | 1996 | ret = nce_ret; | 
|  | 1997 | name_cache_clean_unused(sctx); | 
|  | 1998 |  | 
|  | 1999 | out: | 
|  | 2000 | btrfs_free_path(path); | 
|  | 2001 | return ret; | 
|  | 2002 | } | 
|  | 2003 |  | 
|  | 2004 | /* | 
|  | 2005 | * Magic happens here. This function returns the first ref to an inode as it | 
|  | 2006 | * would look like while receiving the stream at this point in time. | 
|  | 2007 | * We walk the path up to the root. For every inode in between, we check if it | 
|  | 2008 | * was already processed/sent. If yes, we continue with the parent as found | 
|  | 2009 | * in send_root. If not, we continue with the parent as found in parent_root. | 
|  | 2010 | * If we encounter an inode that was deleted at this point in time, we use the | 
|  | 2011 | * inodes "orphan" name instead of the real name and stop. Same with new inodes | 
|  | 2012 | * that were not created yet and overwritten inodes/refs. | 
|  | 2013 | * | 
|  | 2014 | * When do we have have orphan inodes: | 
|  | 2015 | * 1. When an inode is freshly created and thus no valid refs are available yet | 
|  | 2016 | * 2. When a directory lost all it's refs (deleted) but still has dir items | 
|  | 2017 | *    inside which were not processed yet (pending for move/delete). If anyone | 
|  | 2018 | *    tried to get the path to the dir items, it would get a path inside that | 
|  | 2019 | *    orphan directory. | 
|  | 2020 | * 3. When an inode is moved around or gets new links, it may overwrite the ref | 
|  | 2021 | *    of an unprocessed inode. If in that case the first ref would be | 
|  | 2022 | *    overwritten, the overwritten inode gets "orphanized". Later when we | 
|  | 2023 | *    process this overwritten inode, it is restored at a new place by moving | 
|  | 2024 | *    the orphan inode. | 
|  | 2025 | * | 
|  | 2026 | * sctx->send_progress tells this function at which point in time receiving | 
|  | 2027 | * would be. | 
|  | 2028 | */ | 
|  | 2029 | static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, | 
|  | 2030 | struct fs_path *dest) | 
|  | 2031 | { | 
|  | 2032 | int ret = 0; | 
|  | 2033 | struct fs_path *name = NULL; | 
|  | 2034 | u64 parent_inode = 0; | 
|  | 2035 | u64 parent_gen = 0; | 
|  | 2036 | int stop = 0; | 
|  | 2037 |  | 
|  | 2038 | name = fs_path_alloc(sctx); | 
|  | 2039 | if (!name) { | 
|  | 2040 | ret = -ENOMEM; | 
|  | 2041 | goto out; | 
|  | 2042 | } | 
|  | 2043 |  | 
|  | 2044 | dest->reversed = 1; | 
|  | 2045 | fs_path_reset(dest); | 
|  | 2046 |  | 
|  | 2047 | while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { | 
|  | 2048 | fs_path_reset(name); | 
|  | 2049 |  | 
|  | 2050 | ret = __get_cur_name_and_parent(sctx, ino, gen, | 
|  | 2051 | &parent_inode, &parent_gen, name); | 
|  | 2052 | if (ret < 0) | 
|  | 2053 | goto out; | 
|  | 2054 | if (ret) | 
|  | 2055 | stop = 1; | 
|  | 2056 |  | 
|  | 2057 | ret = fs_path_add_path(dest, name); | 
|  | 2058 | if (ret < 0) | 
|  | 2059 | goto out; | 
|  | 2060 |  | 
|  | 2061 | ino = parent_inode; | 
|  | 2062 | gen = parent_gen; | 
|  | 2063 | } | 
|  | 2064 |  | 
|  | 2065 | out: | 
|  | 2066 | fs_path_free(sctx, name); | 
|  | 2067 | if (!ret) | 
|  | 2068 | fs_path_unreverse(dest); | 
|  | 2069 | return ret; | 
|  | 2070 | } | 
|  | 2071 |  | 
|  | 2072 | /* | 
|  | 2073 | * Called for regular files when sending extents data. Opens a struct file | 
|  | 2074 | * to read from the file. | 
|  | 2075 | */ | 
|  | 2076 | static int open_cur_inode_file(struct send_ctx *sctx) | 
|  | 2077 | { | 
|  | 2078 | int ret = 0; | 
|  | 2079 | struct btrfs_key key; | 
| Linus Torvalds | e2aed8d | 2012-07-26 14:48:55 -0700 | [diff] [blame] | 2080 | struct path path; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2081 | struct inode *inode; | 
|  | 2082 | struct dentry *dentry; | 
|  | 2083 | struct file *filp; | 
|  | 2084 | int new = 0; | 
|  | 2085 |  | 
|  | 2086 | if (sctx->cur_inode_filp) | 
|  | 2087 | goto out; | 
|  | 2088 |  | 
|  | 2089 | key.objectid = sctx->cur_ino; | 
|  | 2090 | key.type = BTRFS_INODE_ITEM_KEY; | 
|  | 2091 | key.offset = 0; | 
|  | 2092 |  | 
|  | 2093 | inode = btrfs_iget(sctx->send_root->fs_info->sb, &key, sctx->send_root, | 
|  | 2094 | &new); | 
|  | 2095 | if (IS_ERR(inode)) { | 
|  | 2096 | ret = PTR_ERR(inode); | 
|  | 2097 | goto out; | 
|  | 2098 | } | 
|  | 2099 |  | 
|  | 2100 | dentry = d_obtain_alias(inode); | 
|  | 2101 | inode = NULL; | 
|  | 2102 | if (IS_ERR(dentry)) { | 
|  | 2103 | ret = PTR_ERR(dentry); | 
|  | 2104 | goto out; | 
|  | 2105 | } | 
|  | 2106 |  | 
| Linus Torvalds | e2aed8d | 2012-07-26 14:48:55 -0700 | [diff] [blame] | 2107 | path.mnt = sctx->mnt; | 
|  | 2108 | path.dentry = dentry; | 
|  | 2109 | filp = dentry_open(&path, O_RDONLY | O_LARGEFILE, current_cred()); | 
|  | 2110 | dput(dentry); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2111 | dentry = NULL; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2112 | if (IS_ERR(filp)) { | 
|  | 2113 | ret = PTR_ERR(filp); | 
|  | 2114 | goto out; | 
|  | 2115 | } | 
|  | 2116 | sctx->cur_inode_filp = filp; | 
|  | 2117 |  | 
|  | 2118 | out: | 
|  | 2119 | /* | 
|  | 2120 | * no xxxput required here as every vfs op | 
|  | 2121 | * does it by itself on failure | 
|  | 2122 | */ | 
|  | 2123 | return ret; | 
|  | 2124 | } | 
|  | 2125 |  | 
|  | 2126 | /* | 
|  | 2127 | * Closes the struct file that was created in open_cur_inode_file | 
|  | 2128 | */ | 
|  | 2129 | static int close_cur_inode_file(struct send_ctx *sctx) | 
|  | 2130 | { | 
|  | 2131 | int ret = 0; | 
|  | 2132 |  | 
|  | 2133 | if (!sctx->cur_inode_filp) | 
|  | 2134 | goto out; | 
|  | 2135 |  | 
|  | 2136 | ret = filp_close(sctx->cur_inode_filp, NULL); | 
|  | 2137 | sctx->cur_inode_filp = NULL; | 
|  | 2138 |  | 
|  | 2139 | out: | 
|  | 2140 | return ret; | 
|  | 2141 | } | 
|  | 2142 |  | 
|  | 2143 | /* | 
|  | 2144 | * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace | 
|  | 2145 | */ | 
|  | 2146 | static int send_subvol_begin(struct send_ctx *sctx) | 
|  | 2147 | { | 
|  | 2148 | int ret; | 
|  | 2149 | struct btrfs_root *send_root = sctx->send_root; | 
|  | 2150 | struct btrfs_root *parent_root = sctx->parent_root; | 
|  | 2151 | struct btrfs_path *path; | 
|  | 2152 | struct btrfs_key key; | 
|  | 2153 | struct btrfs_root_ref *ref; | 
|  | 2154 | struct extent_buffer *leaf; | 
|  | 2155 | char *name = NULL; | 
|  | 2156 | int namelen; | 
|  | 2157 |  | 
|  | 2158 | path = alloc_path_for_send(); | 
|  | 2159 | if (!path) | 
|  | 2160 | return -ENOMEM; | 
|  | 2161 |  | 
|  | 2162 | name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS); | 
|  | 2163 | if (!name) { | 
|  | 2164 | btrfs_free_path(path); | 
|  | 2165 | return -ENOMEM; | 
|  | 2166 | } | 
|  | 2167 |  | 
|  | 2168 | key.objectid = send_root->objectid; | 
|  | 2169 | key.type = BTRFS_ROOT_BACKREF_KEY; | 
|  | 2170 | key.offset = 0; | 
|  | 2171 |  | 
|  | 2172 | ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root, | 
|  | 2173 | &key, path, 1, 0); | 
|  | 2174 | if (ret < 0) | 
|  | 2175 | goto out; | 
|  | 2176 | if (ret) { | 
|  | 2177 | ret = -ENOENT; | 
|  | 2178 | goto out; | 
|  | 2179 | } | 
|  | 2180 |  | 
|  | 2181 | leaf = path->nodes[0]; | 
|  | 2182 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 
|  | 2183 | if (key.type != BTRFS_ROOT_BACKREF_KEY || | 
|  | 2184 | key.objectid != send_root->objectid) { | 
|  | 2185 | ret = -ENOENT; | 
|  | 2186 | goto out; | 
|  | 2187 | } | 
|  | 2188 | ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); | 
|  | 2189 | namelen = btrfs_root_ref_name_len(leaf, ref); | 
|  | 2190 | read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); | 
|  | 2191 | btrfs_release_path(path); | 
|  | 2192 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2193 | if (parent_root) { | 
|  | 2194 | ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); | 
|  | 2195 | if (ret < 0) | 
|  | 2196 | goto out; | 
|  | 2197 | } else { | 
|  | 2198 | ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); | 
|  | 2199 | if (ret < 0) | 
|  | 2200 | goto out; | 
|  | 2201 | } | 
|  | 2202 |  | 
|  | 2203 | TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); | 
|  | 2204 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, | 
|  | 2205 | sctx->send_root->root_item.uuid); | 
|  | 2206 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, | 
|  | 2207 | sctx->send_root->root_item.ctransid); | 
|  | 2208 | if (parent_root) { | 
|  | 2209 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, | 
|  | 2210 | sctx->parent_root->root_item.uuid); | 
|  | 2211 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, | 
|  | 2212 | sctx->parent_root->root_item.ctransid); | 
|  | 2213 | } | 
|  | 2214 |  | 
|  | 2215 | ret = send_cmd(sctx); | 
|  | 2216 |  | 
|  | 2217 | tlv_put_failure: | 
|  | 2218 | out: | 
|  | 2219 | btrfs_free_path(path); | 
|  | 2220 | kfree(name); | 
|  | 2221 | return ret; | 
|  | 2222 | } | 
|  | 2223 |  | 
|  | 2224 | static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) | 
|  | 2225 | { | 
|  | 2226 | int ret = 0; | 
|  | 2227 | struct fs_path *p; | 
|  | 2228 |  | 
|  | 2229 | verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size); | 
|  | 2230 |  | 
|  | 2231 | p = fs_path_alloc(sctx); | 
|  | 2232 | if (!p) | 
|  | 2233 | return -ENOMEM; | 
|  | 2234 |  | 
|  | 2235 | ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); | 
|  | 2236 | if (ret < 0) | 
|  | 2237 | goto out; | 
|  | 2238 |  | 
|  | 2239 | ret = get_cur_path(sctx, ino, gen, p); | 
|  | 2240 | if (ret < 0) | 
|  | 2241 | goto out; | 
|  | 2242 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 2243 | TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); | 
|  | 2244 |  | 
|  | 2245 | ret = send_cmd(sctx); | 
|  | 2246 |  | 
|  | 2247 | tlv_put_failure: | 
|  | 2248 | out: | 
|  | 2249 | fs_path_free(sctx, p); | 
|  | 2250 | return ret; | 
|  | 2251 | } | 
|  | 2252 |  | 
|  | 2253 | static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) | 
|  | 2254 | { | 
|  | 2255 | int ret = 0; | 
|  | 2256 | struct fs_path *p; | 
|  | 2257 |  | 
|  | 2258 | verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode); | 
|  | 2259 |  | 
|  | 2260 | p = fs_path_alloc(sctx); | 
|  | 2261 | if (!p) | 
|  | 2262 | return -ENOMEM; | 
|  | 2263 |  | 
|  | 2264 | ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); | 
|  | 2265 | if (ret < 0) | 
|  | 2266 | goto out; | 
|  | 2267 |  | 
|  | 2268 | ret = get_cur_path(sctx, ino, gen, p); | 
|  | 2269 | if (ret < 0) | 
|  | 2270 | goto out; | 
|  | 2271 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 2272 | TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); | 
|  | 2273 |  | 
|  | 2274 | ret = send_cmd(sctx); | 
|  | 2275 |  | 
|  | 2276 | tlv_put_failure: | 
|  | 2277 | out: | 
|  | 2278 | fs_path_free(sctx, p); | 
|  | 2279 | return ret; | 
|  | 2280 | } | 
|  | 2281 |  | 
|  | 2282 | static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) | 
|  | 2283 | { | 
|  | 2284 | int ret = 0; | 
|  | 2285 | struct fs_path *p; | 
|  | 2286 |  | 
|  | 2287 | verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid); | 
|  | 2288 |  | 
|  | 2289 | p = fs_path_alloc(sctx); | 
|  | 2290 | if (!p) | 
|  | 2291 | return -ENOMEM; | 
|  | 2292 |  | 
|  | 2293 | ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); | 
|  | 2294 | if (ret < 0) | 
|  | 2295 | goto out; | 
|  | 2296 |  | 
|  | 2297 | ret = get_cur_path(sctx, ino, gen, p); | 
|  | 2298 | if (ret < 0) | 
|  | 2299 | goto out; | 
|  | 2300 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 2301 | TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); | 
|  | 2302 | TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); | 
|  | 2303 |  | 
|  | 2304 | ret = send_cmd(sctx); | 
|  | 2305 |  | 
|  | 2306 | tlv_put_failure: | 
|  | 2307 | out: | 
|  | 2308 | fs_path_free(sctx, p); | 
|  | 2309 | return ret; | 
|  | 2310 | } | 
|  | 2311 |  | 
|  | 2312 | static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) | 
|  | 2313 | { | 
|  | 2314 | int ret = 0; | 
|  | 2315 | struct fs_path *p = NULL; | 
|  | 2316 | struct btrfs_inode_item *ii; | 
|  | 2317 | struct btrfs_path *path = NULL; | 
|  | 2318 | struct extent_buffer *eb; | 
|  | 2319 | struct btrfs_key key; | 
|  | 2320 | int slot; | 
|  | 2321 |  | 
|  | 2322 | verbose_printk("btrfs: send_utimes %llu\n", ino); | 
|  | 2323 |  | 
|  | 2324 | p = fs_path_alloc(sctx); | 
|  | 2325 | if (!p) | 
|  | 2326 | return -ENOMEM; | 
|  | 2327 |  | 
|  | 2328 | path = alloc_path_for_send(); | 
|  | 2329 | if (!path) { | 
|  | 2330 | ret = -ENOMEM; | 
|  | 2331 | goto out; | 
|  | 2332 | } | 
|  | 2333 |  | 
|  | 2334 | key.objectid = ino; | 
|  | 2335 | key.type = BTRFS_INODE_ITEM_KEY; | 
|  | 2336 | key.offset = 0; | 
|  | 2337 | ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); | 
|  | 2338 | if (ret < 0) | 
|  | 2339 | goto out; | 
|  | 2340 |  | 
|  | 2341 | eb = path->nodes[0]; | 
|  | 2342 | slot = path->slots[0]; | 
|  | 2343 | ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); | 
|  | 2344 |  | 
|  | 2345 | ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); | 
|  | 2346 | if (ret < 0) | 
|  | 2347 | goto out; | 
|  | 2348 |  | 
|  | 2349 | ret = get_cur_path(sctx, ino, gen, p); | 
|  | 2350 | if (ret < 0) | 
|  | 2351 | goto out; | 
|  | 2352 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 2353 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, | 
|  | 2354 | btrfs_inode_atime(ii)); | 
|  | 2355 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, | 
|  | 2356 | btrfs_inode_mtime(ii)); | 
|  | 2357 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, | 
|  | 2358 | btrfs_inode_ctime(ii)); | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 2359 | /* TODO Add otime support when the otime patches get into upstream */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2360 |  | 
|  | 2361 | ret = send_cmd(sctx); | 
|  | 2362 |  | 
|  | 2363 | tlv_put_failure: | 
|  | 2364 | out: | 
|  | 2365 | fs_path_free(sctx, p); | 
|  | 2366 | btrfs_free_path(path); | 
|  | 2367 | return ret; | 
|  | 2368 | } | 
|  | 2369 |  | 
|  | 2370 | /* | 
|  | 2371 | * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have | 
|  | 2372 | * a valid path yet because we did not process the refs yet. So, the inode | 
|  | 2373 | * is created as orphan. | 
|  | 2374 | */ | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2375 | static int send_create_inode(struct send_ctx *sctx, u64 ino) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2376 | { | 
|  | 2377 | int ret = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2378 | struct fs_path *p; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2379 | int cmd; | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2380 | u64 gen; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2381 | u64 mode; | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2382 | u64 rdev; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2383 |  | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2384 | verbose_printk("btrfs: send_create_inode %llu\n", ino); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2385 |  | 
|  | 2386 | p = fs_path_alloc(sctx); | 
|  | 2387 | if (!p) | 
|  | 2388 | return -ENOMEM; | 
|  | 2389 |  | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2390 | ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL, | 
|  | 2391 | NULL, &rdev); | 
|  | 2392 | if (ret < 0) | 
|  | 2393 | goto out; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2394 |  | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2395 | if (S_ISREG(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2396 | cmd = BTRFS_SEND_C_MKFILE; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2397 | } else if (S_ISDIR(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2398 | cmd = BTRFS_SEND_C_MKDIR; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2399 | } else if (S_ISLNK(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2400 | cmd = BTRFS_SEND_C_SYMLINK; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2401 | } else if (S_ISCHR(mode) || S_ISBLK(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2402 | cmd = BTRFS_SEND_C_MKNOD; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2403 | } else if (S_ISFIFO(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2404 | cmd = BTRFS_SEND_C_MKFIFO; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2405 | } else if (S_ISSOCK(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2406 | cmd = BTRFS_SEND_C_MKSOCK; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2407 | } else { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2408 | printk(KERN_WARNING "btrfs: unexpected inode type %o", | 
|  | 2409 | (int)(mode & S_IFMT)); | 
|  | 2410 | ret = -ENOTSUPP; | 
|  | 2411 | goto out; | 
|  | 2412 | } | 
|  | 2413 |  | 
|  | 2414 | ret = begin_cmd(sctx, cmd); | 
|  | 2415 | if (ret < 0) | 
|  | 2416 | goto out; | 
|  | 2417 |  | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2418 | ret = gen_unique_name(sctx, ino, gen, p); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2419 | if (ret < 0) | 
|  | 2420 | goto out; | 
|  | 2421 |  | 
|  | 2422 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2423 | TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2424 |  | 
|  | 2425 | if (S_ISLNK(mode)) { | 
|  | 2426 | fs_path_reset(p); | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2427 | ret = read_symlink(sctx, sctx->send_root, ino, p); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2428 | if (ret < 0) | 
|  | 2429 | goto out; | 
|  | 2430 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); | 
|  | 2431 | } else if (S_ISCHR(mode) || S_ISBLK(mode) || | 
|  | 2432 | S_ISFIFO(mode) || S_ISSOCK(mode)) { | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2433 | TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, rdev); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2434 | } | 
|  | 2435 |  | 
|  | 2436 | ret = send_cmd(sctx); | 
|  | 2437 | if (ret < 0) | 
|  | 2438 | goto out; | 
|  | 2439 |  | 
|  | 2440 |  | 
|  | 2441 | tlv_put_failure: | 
|  | 2442 | out: | 
|  | 2443 | fs_path_free(sctx, p); | 
|  | 2444 | return ret; | 
|  | 2445 | } | 
|  | 2446 |  | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2447 | /* | 
|  | 2448 | * We need some special handling for inodes that get processed before the parent | 
|  | 2449 | * directory got created. See process_recorded_refs for details. | 
|  | 2450 | * This function does the check if we already created the dir out of order. | 
|  | 2451 | */ | 
|  | 2452 | static int did_create_dir(struct send_ctx *sctx, u64 dir) | 
|  | 2453 | { | 
|  | 2454 | int ret = 0; | 
|  | 2455 | struct btrfs_path *path = NULL; | 
|  | 2456 | struct btrfs_key key; | 
|  | 2457 | struct btrfs_key found_key; | 
|  | 2458 | struct btrfs_key di_key; | 
|  | 2459 | struct extent_buffer *eb; | 
|  | 2460 | struct btrfs_dir_item *di; | 
|  | 2461 | int slot; | 
|  | 2462 |  | 
|  | 2463 | path = alloc_path_for_send(); | 
|  | 2464 | if (!path) { | 
|  | 2465 | ret = -ENOMEM; | 
|  | 2466 | goto out; | 
|  | 2467 | } | 
|  | 2468 |  | 
|  | 2469 | key.objectid = dir; | 
|  | 2470 | key.type = BTRFS_DIR_INDEX_KEY; | 
|  | 2471 | key.offset = 0; | 
|  | 2472 | while (1) { | 
|  | 2473 | ret = btrfs_search_slot_for_read(sctx->send_root, &key, path, | 
|  | 2474 | 1, 0); | 
|  | 2475 | if (ret < 0) | 
|  | 2476 | goto out; | 
|  | 2477 | if (!ret) { | 
|  | 2478 | eb = path->nodes[0]; | 
|  | 2479 | slot = path->slots[0]; | 
|  | 2480 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 2481 | } | 
|  | 2482 | if (ret || found_key.objectid != key.objectid || | 
|  | 2483 | found_key.type != key.type) { | 
|  | 2484 | ret = 0; | 
|  | 2485 | goto out; | 
|  | 2486 | } | 
|  | 2487 |  | 
|  | 2488 | di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); | 
|  | 2489 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); | 
|  | 2490 |  | 
|  | 2491 | if (di_key.objectid < sctx->send_progress) { | 
|  | 2492 | ret = 1; | 
|  | 2493 | goto out; | 
|  | 2494 | } | 
|  | 2495 |  | 
|  | 2496 | key.offset = found_key.offset + 1; | 
|  | 2497 | btrfs_release_path(path); | 
|  | 2498 | } | 
|  | 2499 |  | 
|  | 2500 | out: | 
|  | 2501 | btrfs_free_path(path); | 
|  | 2502 | return ret; | 
|  | 2503 | } | 
|  | 2504 |  | 
|  | 2505 | /* | 
|  | 2506 | * Only creates the inode if it is: | 
|  | 2507 | * 1. Not a directory | 
|  | 2508 | * 2. Or a directory which was not created already due to out of order | 
|  | 2509 | *    directories. See did_create_dir and process_recorded_refs for details. | 
|  | 2510 | */ | 
|  | 2511 | static int send_create_inode_if_needed(struct send_ctx *sctx) | 
|  | 2512 | { | 
|  | 2513 | int ret; | 
|  | 2514 |  | 
|  | 2515 | if (S_ISDIR(sctx->cur_inode_mode)) { | 
|  | 2516 | ret = did_create_dir(sctx, sctx->cur_ino); | 
|  | 2517 | if (ret < 0) | 
|  | 2518 | goto out; | 
|  | 2519 | if (ret) { | 
|  | 2520 | ret = 0; | 
|  | 2521 | goto out; | 
|  | 2522 | } | 
|  | 2523 | } | 
|  | 2524 |  | 
|  | 2525 | ret = send_create_inode(sctx, sctx->cur_ino); | 
|  | 2526 | if (ret < 0) | 
|  | 2527 | goto out; | 
|  | 2528 |  | 
|  | 2529 | out: | 
|  | 2530 | return ret; | 
|  | 2531 | } | 
|  | 2532 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2533 | struct recorded_ref { | 
|  | 2534 | struct list_head list; | 
|  | 2535 | char *dir_path; | 
|  | 2536 | char *name; | 
|  | 2537 | struct fs_path *full_path; | 
|  | 2538 | u64 dir; | 
|  | 2539 | u64 dir_gen; | 
|  | 2540 | int dir_path_len; | 
|  | 2541 | int name_len; | 
|  | 2542 | }; | 
|  | 2543 |  | 
|  | 2544 | /* | 
|  | 2545 | * We need to process new refs before deleted refs, but compare_tree gives us | 
|  | 2546 | * everything mixed. So we first record all refs and later process them. | 
|  | 2547 | * This function is a helper to record one ref. | 
|  | 2548 | */ | 
|  | 2549 | static int record_ref(struct list_head *head, u64 dir, | 
|  | 2550 | u64 dir_gen, struct fs_path *path) | 
|  | 2551 | { | 
|  | 2552 | struct recorded_ref *ref; | 
|  | 2553 | char *tmp; | 
|  | 2554 |  | 
|  | 2555 | ref = kmalloc(sizeof(*ref), GFP_NOFS); | 
|  | 2556 | if (!ref) | 
|  | 2557 | return -ENOMEM; | 
|  | 2558 |  | 
|  | 2559 | ref->dir = dir; | 
|  | 2560 | ref->dir_gen = dir_gen; | 
|  | 2561 | ref->full_path = path; | 
|  | 2562 |  | 
|  | 2563 | tmp = strrchr(ref->full_path->start, '/'); | 
|  | 2564 | if (!tmp) { | 
|  | 2565 | ref->name_len = ref->full_path->end - ref->full_path->start; | 
|  | 2566 | ref->name = ref->full_path->start; | 
|  | 2567 | ref->dir_path_len = 0; | 
|  | 2568 | ref->dir_path = ref->full_path->start; | 
|  | 2569 | } else { | 
|  | 2570 | tmp++; | 
|  | 2571 | ref->name_len = ref->full_path->end - tmp; | 
|  | 2572 | ref->name = tmp; | 
|  | 2573 | ref->dir_path = ref->full_path->start; | 
|  | 2574 | ref->dir_path_len = ref->full_path->end - | 
|  | 2575 | ref->full_path->start - 1 - ref->name_len; | 
|  | 2576 | } | 
|  | 2577 |  | 
|  | 2578 | list_add_tail(&ref->list, head); | 
|  | 2579 | return 0; | 
|  | 2580 | } | 
|  | 2581 |  | 
|  | 2582 | static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head) | 
|  | 2583 | { | 
|  | 2584 | struct recorded_ref *cur; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2585 |  | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2586 | while (!list_empty(head)) { | 
|  | 2587 | cur = list_entry(head->next, struct recorded_ref, list); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2588 | fs_path_free(sctx, cur->full_path); | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2589 | list_del(&cur->list); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2590 | kfree(cur); | 
|  | 2591 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2592 | } | 
|  | 2593 |  | 
|  | 2594 | static void free_recorded_refs(struct send_ctx *sctx) | 
|  | 2595 | { | 
|  | 2596 | __free_recorded_refs(sctx, &sctx->new_refs); | 
|  | 2597 | __free_recorded_refs(sctx, &sctx->deleted_refs); | 
|  | 2598 | } | 
|  | 2599 |  | 
|  | 2600 | /* | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 2601 | * Renames/moves a file/dir to its orphan name. Used when the first | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2602 | * ref of an unprocessed inode gets overwritten and for all non empty | 
|  | 2603 | * directories. | 
|  | 2604 | */ | 
|  | 2605 | static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, | 
|  | 2606 | struct fs_path *path) | 
|  | 2607 | { | 
|  | 2608 | int ret; | 
|  | 2609 | struct fs_path *orphan; | 
|  | 2610 |  | 
|  | 2611 | orphan = fs_path_alloc(sctx); | 
|  | 2612 | if (!orphan) | 
|  | 2613 | return -ENOMEM; | 
|  | 2614 |  | 
|  | 2615 | ret = gen_unique_name(sctx, ino, gen, orphan); | 
|  | 2616 | if (ret < 0) | 
|  | 2617 | goto out; | 
|  | 2618 |  | 
|  | 2619 | ret = send_rename(sctx, path, orphan); | 
|  | 2620 |  | 
|  | 2621 | out: | 
|  | 2622 | fs_path_free(sctx, orphan); | 
|  | 2623 | return ret; | 
|  | 2624 | } | 
|  | 2625 |  | 
|  | 2626 | /* | 
|  | 2627 | * Returns 1 if a directory can be removed at this point in time. | 
|  | 2628 | * We check this by iterating all dir items and checking if the inode behind | 
|  | 2629 | * the dir item was already processed. | 
|  | 2630 | */ | 
|  | 2631 | static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress) | 
|  | 2632 | { | 
|  | 2633 | int ret = 0; | 
|  | 2634 | struct btrfs_root *root = sctx->parent_root; | 
|  | 2635 | struct btrfs_path *path; | 
|  | 2636 | struct btrfs_key key; | 
|  | 2637 | struct btrfs_key found_key; | 
|  | 2638 | struct btrfs_key loc; | 
|  | 2639 | struct btrfs_dir_item *di; | 
|  | 2640 |  | 
| Alexander Block | 6d85ed0 | 2012-08-01 14:48:59 +0200 | [diff] [blame] | 2641 | /* | 
|  | 2642 | * Don't try to rmdir the top/root subvolume dir. | 
|  | 2643 | */ | 
|  | 2644 | if (dir == BTRFS_FIRST_FREE_OBJECTID) | 
|  | 2645 | return 0; | 
|  | 2646 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2647 | path = alloc_path_for_send(); | 
|  | 2648 | if (!path) | 
|  | 2649 | return -ENOMEM; | 
|  | 2650 |  | 
|  | 2651 | key.objectid = dir; | 
|  | 2652 | key.type = BTRFS_DIR_INDEX_KEY; | 
|  | 2653 | key.offset = 0; | 
|  | 2654 |  | 
|  | 2655 | while (1) { | 
|  | 2656 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 
|  | 2657 | if (ret < 0) | 
|  | 2658 | goto out; | 
|  | 2659 | if (!ret) { | 
|  | 2660 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | 
|  | 2661 | path->slots[0]); | 
|  | 2662 | } | 
|  | 2663 | if (ret || found_key.objectid != key.objectid || | 
|  | 2664 | found_key.type != key.type) { | 
|  | 2665 | break; | 
|  | 2666 | } | 
|  | 2667 |  | 
|  | 2668 | di = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 2669 | struct btrfs_dir_item); | 
|  | 2670 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); | 
|  | 2671 |  | 
|  | 2672 | if (loc.objectid > send_progress) { | 
|  | 2673 | ret = 0; | 
|  | 2674 | goto out; | 
|  | 2675 | } | 
|  | 2676 |  | 
|  | 2677 | btrfs_release_path(path); | 
|  | 2678 | key.offset = found_key.offset + 1; | 
|  | 2679 | } | 
|  | 2680 |  | 
|  | 2681 | ret = 1; | 
|  | 2682 |  | 
|  | 2683 | out: | 
|  | 2684 | btrfs_free_path(path); | 
|  | 2685 | return ret; | 
|  | 2686 | } | 
|  | 2687 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2688 | /* | 
|  | 2689 | * This does all the move/link/unlink/rmdir magic. | 
|  | 2690 | */ | 
|  | 2691 | static int process_recorded_refs(struct send_ctx *sctx) | 
|  | 2692 | { | 
|  | 2693 | int ret = 0; | 
|  | 2694 | struct recorded_ref *cur; | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2695 | struct recorded_ref *cur2; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2696 | struct ulist *check_dirs = NULL; | 
|  | 2697 | struct ulist_iterator uit; | 
|  | 2698 | struct ulist_node *un; | 
|  | 2699 | struct fs_path *valid_path = NULL; | 
| Chris Mason | b24baf6 | 2012-07-25 19:21:10 -0400 | [diff] [blame] | 2700 | u64 ow_inode = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2701 | u64 ow_gen; | 
|  | 2702 | int did_overwrite = 0; | 
|  | 2703 | int is_orphan = 0; | 
|  | 2704 |  | 
|  | 2705 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | 
|  | 2706 |  | 
| Alexander Block | 6d85ed0 | 2012-08-01 14:48:59 +0200 | [diff] [blame] | 2707 | /* | 
|  | 2708 | * This should never happen as the root dir always has the same ref | 
|  | 2709 | * which is always '..' | 
|  | 2710 | */ | 
|  | 2711 | BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); | 
|  | 2712 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2713 | valid_path = fs_path_alloc(sctx); | 
|  | 2714 | if (!valid_path) { | 
|  | 2715 | ret = -ENOMEM; | 
|  | 2716 | goto out; | 
|  | 2717 | } | 
|  | 2718 |  | 
|  | 2719 | check_dirs = ulist_alloc(GFP_NOFS); | 
|  | 2720 | if (!check_dirs) { | 
|  | 2721 | ret = -ENOMEM; | 
|  | 2722 | goto out; | 
|  | 2723 | } | 
|  | 2724 |  | 
|  | 2725 | /* | 
|  | 2726 | * First, check if the first ref of the current inode was overwritten | 
|  | 2727 | * before. If yes, we know that the current inode was already orphanized | 
|  | 2728 | * and thus use the orphan name. If not, we can use get_cur_path to | 
|  | 2729 | * get the path of the first ref as it would like while receiving at | 
|  | 2730 | * this point in time. | 
|  | 2731 | * New inodes are always orphan at the beginning, so force to use the | 
|  | 2732 | * orphan name in this case. | 
|  | 2733 | * The first ref is stored in valid_path and will be updated if it | 
|  | 2734 | * gets moved around. | 
|  | 2735 | */ | 
|  | 2736 | if (!sctx->cur_inode_new) { | 
|  | 2737 | ret = did_overwrite_first_ref(sctx, sctx->cur_ino, | 
|  | 2738 | sctx->cur_inode_gen); | 
|  | 2739 | if (ret < 0) | 
|  | 2740 | goto out; | 
|  | 2741 | if (ret) | 
|  | 2742 | did_overwrite = 1; | 
|  | 2743 | } | 
|  | 2744 | if (sctx->cur_inode_new || did_overwrite) { | 
|  | 2745 | ret = gen_unique_name(sctx, sctx->cur_ino, | 
|  | 2746 | sctx->cur_inode_gen, valid_path); | 
|  | 2747 | if (ret < 0) | 
|  | 2748 | goto out; | 
|  | 2749 | is_orphan = 1; | 
|  | 2750 | } else { | 
|  | 2751 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, | 
|  | 2752 | valid_path); | 
|  | 2753 | if (ret < 0) | 
|  | 2754 | goto out; | 
|  | 2755 | } | 
|  | 2756 |  | 
|  | 2757 | list_for_each_entry(cur, &sctx->new_refs, list) { | 
|  | 2758 | /* | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2759 | * We may have refs where the parent directory does not exist | 
|  | 2760 | * yet. This happens if the parent directories inum is higher | 
|  | 2761 | * the the current inum. To handle this case, we create the | 
|  | 2762 | * parent directory out of order. But we need to check if this | 
|  | 2763 | * did already happen before due to other refs in the same dir. | 
|  | 2764 | */ | 
|  | 2765 | ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); | 
|  | 2766 | if (ret < 0) | 
|  | 2767 | goto out; | 
|  | 2768 | if (ret == inode_state_will_create) { | 
|  | 2769 | ret = 0; | 
|  | 2770 | /* | 
|  | 2771 | * First check if any of the current inodes refs did | 
|  | 2772 | * already create the dir. | 
|  | 2773 | */ | 
|  | 2774 | list_for_each_entry(cur2, &sctx->new_refs, list) { | 
|  | 2775 | if (cur == cur2) | 
|  | 2776 | break; | 
|  | 2777 | if (cur2->dir == cur->dir) { | 
|  | 2778 | ret = 1; | 
|  | 2779 | break; | 
|  | 2780 | } | 
|  | 2781 | } | 
|  | 2782 |  | 
|  | 2783 | /* | 
|  | 2784 | * If that did not happen, check if a previous inode | 
|  | 2785 | * did already create the dir. | 
|  | 2786 | */ | 
|  | 2787 | if (!ret) | 
|  | 2788 | ret = did_create_dir(sctx, cur->dir); | 
|  | 2789 | if (ret < 0) | 
|  | 2790 | goto out; | 
|  | 2791 | if (!ret) { | 
|  | 2792 | ret = send_create_inode(sctx, cur->dir); | 
|  | 2793 | if (ret < 0) | 
|  | 2794 | goto out; | 
|  | 2795 | } | 
|  | 2796 | } | 
|  | 2797 |  | 
|  | 2798 | /* | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2799 | * Check if this new ref would overwrite the first ref of | 
|  | 2800 | * another unprocessed inode. If yes, orphanize the | 
|  | 2801 | * overwritten inode. If we find an overwritten ref that is | 
|  | 2802 | * not the first ref, simply unlink it. | 
|  | 2803 | */ | 
|  | 2804 | ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, | 
|  | 2805 | cur->name, cur->name_len, | 
|  | 2806 | &ow_inode, &ow_gen); | 
|  | 2807 | if (ret < 0) | 
|  | 2808 | goto out; | 
|  | 2809 | if (ret) { | 
|  | 2810 | ret = is_first_ref(sctx, sctx->parent_root, | 
|  | 2811 | ow_inode, cur->dir, cur->name, | 
|  | 2812 | cur->name_len); | 
|  | 2813 | if (ret < 0) | 
|  | 2814 | goto out; | 
|  | 2815 | if (ret) { | 
|  | 2816 | ret = orphanize_inode(sctx, ow_inode, ow_gen, | 
|  | 2817 | cur->full_path); | 
|  | 2818 | if (ret < 0) | 
|  | 2819 | goto out; | 
|  | 2820 | } else { | 
|  | 2821 | ret = send_unlink(sctx, cur->full_path); | 
|  | 2822 | if (ret < 0) | 
|  | 2823 | goto out; | 
|  | 2824 | } | 
|  | 2825 | } | 
|  | 2826 |  | 
|  | 2827 | /* | 
|  | 2828 | * link/move the ref to the new place. If we have an orphan | 
|  | 2829 | * inode, move it and update valid_path. If not, link or move | 
|  | 2830 | * it depending on the inode mode. | 
|  | 2831 | */ | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2832 | if (is_orphan) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2833 | ret = send_rename(sctx, valid_path, cur->full_path); | 
|  | 2834 | if (ret < 0) | 
|  | 2835 | goto out; | 
|  | 2836 | is_orphan = 0; | 
|  | 2837 | ret = fs_path_copy(valid_path, cur->full_path); | 
|  | 2838 | if (ret < 0) | 
|  | 2839 | goto out; | 
|  | 2840 | } else { | 
|  | 2841 | if (S_ISDIR(sctx->cur_inode_mode)) { | 
|  | 2842 | /* | 
|  | 2843 | * Dirs can't be linked, so move it. For moved | 
|  | 2844 | * dirs, we always have one new and one deleted | 
|  | 2845 | * ref. The deleted ref is ignored later. | 
|  | 2846 | */ | 
|  | 2847 | ret = send_rename(sctx, valid_path, | 
|  | 2848 | cur->full_path); | 
|  | 2849 | if (ret < 0) | 
|  | 2850 | goto out; | 
|  | 2851 | ret = fs_path_copy(valid_path, cur->full_path); | 
|  | 2852 | if (ret < 0) | 
|  | 2853 | goto out; | 
|  | 2854 | } else { | 
|  | 2855 | ret = send_link(sctx, cur->full_path, | 
|  | 2856 | valid_path); | 
|  | 2857 | if (ret < 0) | 
|  | 2858 | goto out; | 
|  | 2859 | } | 
|  | 2860 | } | 
|  | 2861 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | 
|  | 2862 | GFP_NOFS); | 
|  | 2863 | if (ret < 0) | 
|  | 2864 | goto out; | 
|  | 2865 | } | 
|  | 2866 |  | 
|  | 2867 | if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { | 
|  | 2868 | /* | 
|  | 2869 | * Check if we can already rmdir the directory. If not, | 
|  | 2870 | * orphanize it. For every dir item inside that gets deleted | 
|  | 2871 | * later, we do this check again and rmdir it then if possible. | 
|  | 2872 | * See the use of check_dirs for more details. | 
|  | 2873 | */ | 
|  | 2874 | ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino); | 
|  | 2875 | if (ret < 0) | 
|  | 2876 | goto out; | 
|  | 2877 | if (ret) { | 
|  | 2878 | ret = send_rmdir(sctx, valid_path); | 
|  | 2879 | if (ret < 0) | 
|  | 2880 | goto out; | 
|  | 2881 | } else if (!is_orphan) { | 
|  | 2882 | ret = orphanize_inode(sctx, sctx->cur_ino, | 
|  | 2883 | sctx->cur_inode_gen, valid_path); | 
|  | 2884 | if (ret < 0) | 
|  | 2885 | goto out; | 
|  | 2886 | is_orphan = 1; | 
|  | 2887 | } | 
|  | 2888 |  | 
|  | 2889 | list_for_each_entry(cur, &sctx->deleted_refs, list) { | 
|  | 2890 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | 
|  | 2891 | GFP_NOFS); | 
|  | 2892 | if (ret < 0) | 
|  | 2893 | goto out; | 
|  | 2894 | } | 
| Alexander Block | ccf1626 | 2012-07-28 11:46:29 +0200 | [diff] [blame] | 2895 | } else if (S_ISDIR(sctx->cur_inode_mode) && | 
|  | 2896 | !list_empty(&sctx->deleted_refs)) { | 
|  | 2897 | /* | 
|  | 2898 | * We have a moved dir. Add the old parent to check_dirs | 
|  | 2899 | */ | 
|  | 2900 | cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, | 
|  | 2901 | list); | 
|  | 2902 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | 
|  | 2903 | GFP_NOFS); | 
|  | 2904 | if (ret < 0) | 
|  | 2905 | goto out; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2906 | } else if (!S_ISDIR(sctx->cur_inode_mode)) { | 
|  | 2907 | /* | 
|  | 2908 | * We have a non dir inode. Go through all deleted refs and | 
|  | 2909 | * unlink them if they were not already overwritten by other | 
|  | 2910 | * inodes. | 
|  | 2911 | */ | 
|  | 2912 | list_for_each_entry(cur, &sctx->deleted_refs, list) { | 
|  | 2913 | ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, | 
|  | 2914 | sctx->cur_ino, sctx->cur_inode_gen, | 
|  | 2915 | cur->name, cur->name_len); | 
|  | 2916 | if (ret < 0) | 
|  | 2917 | goto out; | 
|  | 2918 | if (!ret) { | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2919 | ret = send_unlink(sctx, cur->full_path); | 
|  | 2920 | if (ret < 0) | 
|  | 2921 | goto out; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2922 | } | 
|  | 2923 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | 
|  | 2924 | GFP_NOFS); | 
|  | 2925 | if (ret < 0) | 
|  | 2926 | goto out; | 
|  | 2927 | } | 
|  | 2928 |  | 
|  | 2929 | /* | 
|  | 2930 | * If the inode is still orphan, unlink the orphan. This may | 
|  | 2931 | * happen when a previous inode did overwrite the first ref | 
|  | 2932 | * of this inode and no new refs were added for the current | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 2933 | * inode. Unlinking does not mean that the inode is deleted in | 
|  | 2934 | * all cases. There may still be links to this inode in other | 
|  | 2935 | * places. | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2936 | */ | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2937 | if (is_orphan) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2938 | ret = send_unlink(sctx, valid_path); | 
|  | 2939 | if (ret < 0) | 
|  | 2940 | goto out; | 
|  | 2941 | } | 
|  | 2942 | } | 
|  | 2943 |  | 
|  | 2944 | /* | 
|  | 2945 | * We did collect all parent dirs where cur_inode was once located. We | 
|  | 2946 | * now go through all these dirs and check if they are pending for | 
|  | 2947 | * deletion and if it's finally possible to perform the rmdir now. | 
|  | 2948 | * We also update the inode stats of the parent dirs here. | 
|  | 2949 | */ | 
|  | 2950 | ULIST_ITER_INIT(&uit); | 
|  | 2951 | while ((un = ulist_next(check_dirs, &uit))) { | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 2952 | /* | 
|  | 2953 | * In case we had refs into dirs that were not processed yet, | 
|  | 2954 | * we don't need to do the utime and rmdir logic for these dirs. | 
|  | 2955 | * The dir will be processed later. | 
|  | 2956 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2957 | if (un->val > sctx->cur_ino) | 
|  | 2958 | continue; | 
|  | 2959 |  | 
|  | 2960 | ret = get_cur_inode_state(sctx, un->val, un->aux); | 
|  | 2961 | if (ret < 0) | 
|  | 2962 | goto out; | 
|  | 2963 |  | 
|  | 2964 | if (ret == inode_state_did_create || | 
|  | 2965 | ret == inode_state_no_change) { | 
|  | 2966 | /* TODO delayed utimes */ | 
|  | 2967 | ret = send_utimes(sctx, un->val, un->aux); | 
|  | 2968 | if (ret < 0) | 
|  | 2969 | goto out; | 
|  | 2970 | } else if (ret == inode_state_did_delete) { | 
|  | 2971 | ret = can_rmdir(sctx, un->val, sctx->cur_ino); | 
|  | 2972 | if (ret < 0) | 
|  | 2973 | goto out; | 
|  | 2974 | if (ret) { | 
|  | 2975 | ret = get_cur_path(sctx, un->val, un->aux, | 
|  | 2976 | valid_path); | 
|  | 2977 | if (ret < 0) | 
|  | 2978 | goto out; | 
|  | 2979 | ret = send_rmdir(sctx, valid_path); | 
|  | 2980 | if (ret < 0) | 
|  | 2981 | goto out; | 
|  | 2982 | } | 
|  | 2983 | } | 
|  | 2984 | } | 
|  | 2985 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2986 | ret = 0; | 
|  | 2987 |  | 
|  | 2988 | out: | 
|  | 2989 | free_recorded_refs(sctx); | 
|  | 2990 | ulist_free(check_dirs); | 
|  | 2991 | fs_path_free(sctx, valid_path); | 
|  | 2992 | return ret; | 
|  | 2993 | } | 
|  | 2994 |  | 
|  | 2995 | static int __record_new_ref(int num, u64 dir, int index, | 
|  | 2996 | struct fs_path *name, | 
|  | 2997 | void *ctx) | 
|  | 2998 | { | 
|  | 2999 | int ret = 0; | 
|  | 3000 | struct send_ctx *sctx = ctx; | 
|  | 3001 | struct fs_path *p; | 
|  | 3002 | u64 gen; | 
|  | 3003 |  | 
|  | 3004 | p = fs_path_alloc(sctx); | 
|  | 3005 | if (!p) | 
|  | 3006 | return -ENOMEM; | 
|  | 3007 |  | 
|  | 3008 | ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 3009 | NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3010 | if (ret < 0) | 
|  | 3011 | goto out; | 
|  | 3012 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3013 | ret = get_cur_path(sctx, dir, gen, p); | 
|  | 3014 | if (ret < 0) | 
|  | 3015 | goto out; | 
|  | 3016 | ret = fs_path_add_path(p, name); | 
|  | 3017 | if (ret < 0) | 
|  | 3018 | goto out; | 
|  | 3019 |  | 
|  | 3020 | ret = record_ref(&sctx->new_refs, dir, gen, p); | 
|  | 3021 |  | 
|  | 3022 | out: | 
|  | 3023 | if (ret) | 
|  | 3024 | fs_path_free(sctx, p); | 
|  | 3025 | return ret; | 
|  | 3026 | } | 
|  | 3027 |  | 
|  | 3028 | static int __record_deleted_ref(int num, u64 dir, int index, | 
|  | 3029 | struct fs_path *name, | 
|  | 3030 | void *ctx) | 
|  | 3031 | { | 
|  | 3032 | int ret = 0; | 
|  | 3033 | struct send_ctx *sctx = ctx; | 
|  | 3034 | struct fs_path *p; | 
|  | 3035 | u64 gen; | 
|  | 3036 |  | 
|  | 3037 | p = fs_path_alloc(sctx); | 
|  | 3038 | if (!p) | 
|  | 3039 | return -ENOMEM; | 
|  | 3040 |  | 
|  | 3041 | ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 3042 | NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3043 | if (ret < 0) | 
|  | 3044 | goto out; | 
|  | 3045 |  | 
|  | 3046 | ret = get_cur_path(sctx, dir, gen, p); | 
|  | 3047 | if (ret < 0) | 
|  | 3048 | goto out; | 
|  | 3049 | ret = fs_path_add_path(p, name); | 
|  | 3050 | if (ret < 0) | 
|  | 3051 | goto out; | 
|  | 3052 |  | 
|  | 3053 | ret = record_ref(&sctx->deleted_refs, dir, gen, p); | 
|  | 3054 |  | 
|  | 3055 | out: | 
|  | 3056 | if (ret) | 
|  | 3057 | fs_path_free(sctx, p); | 
|  | 3058 | return ret; | 
|  | 3059 | } | 
|  | 3060 |  | 
|  | 3061 | static int record_new_ref(struct send_ctx *sctx) | 
|  | 3062 | { | 
|  | 3063 | int ret; | 
|  | 3064 |  | 
|  | 3065 | ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path, | 
|  | 3066 | sctx->cmp_key, 0, __record_new_ref, sctx); | 
|  | 3067 | if (ret < 0) | 
|  | 3068 | goto out; | 
|  | 3069 | ret = 0; | 
|  | 3070 |  | 
|  | 3071 | out: | 
|  | 3072 | return ret; | 
|  | 3073 | } | 
|  | 3074 |  | 
|  | 3075 | static int record_deleted_ref(struct send_ctx *sctx) | 
|  | 3076 | { | 
|  | 3077 | int ret; | 
|  | 3078 |  | 
|  | 3079 | ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3080 | sctx->cmp_key, 0, __record_deleted_ref, sctx); | 
|  | 3081 | if (ret < 0) | 
|  | 3082 | goto out; | 
|  | 3083 | ret = 0; | 
|  | 3084 |  | 
|  | 3085 | out: | 
|  | 3086 | return ret; | 
|  | 3087 | } | 
|  | 3088 |  | 
|  | 3089 | struct find_ref_ctx { | 
|  | 3090 | u64 dir; | 
|  | 3091 | struct fs_path *name; | 
|  | 3092 | int found_idx; | 
|  | 3093 | }; | 
|  | 3094 |  | 
|  | 3095 | static int __find_iref(int num, u64 dir, int index, | 
|  | 3096 | struct fs_path *name, | 
|  | 3097 | void *ctx_) | 
|  | 3098 | { | 
|  | 3099 | struct find_ref_ctx *ctx = ctx_; | 
|  | 3100 |  | 
|  | 3101 | if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) && | 
|  | 3102 | strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) { | 
|  | 3103 | ctx->found_idx = num; | 
|  | 3104 | return 1; | 
|  | 3105 | } | 
|  | 3106 | return 0; | 
|  | 3107 | } | 
|  | 3108 |  | 
|  | 3109 | static int find_iref(struct send_ctx *sctx, | 
|  | 3110 | struct btrfs_root *root, | 
|  | 3111 | struct btrfs_path *path, | 
|  | 3112 | struct btrfs_key *key, | 
|  | 3113 | u64 dir, struct fs_path *name) | 
|  | 3114 | { | 
|  | 3115 | int ret; | 
|  | 3116 | struct find_ref_ctx ctx; | 
|  | 3117 |  | 
|  | 3118 | ctx.dir = dir; | 
|  | 3119 | ctx.name = name; | 
|  | 3120 | ctx.found_idx = -1; | 
|  | 3121 |  | 
|  | 3122 | ret = iterate_inode_ref(sctx, root, path, key, 0, __find_iref, &ctx); | 
|  | 3123 | if (ret < 0) | 
|  | 3124 | return ret; | 
|  | 3125 |  | 
|  | 3126 | if (ctx.found_idx == -1) | 
|  | 3127 | return -ENOENT; | 
|  | 3128 |  | 
|  | 3129 | return ctx.found_idx; | 
|  | 3130 | } | 
|  | 3131 |  | 
|  | 3132 | static int __record_changed_new_ref(int num, u64 dir, int index, | 
|  | 3133 | struct fs_path *name, | 
|  | 3134 | void *ctx) | 
|  | 3135 | { | 
|  | 3136 | int ret; | 
|  | 3137 | struct send_ctx *sctx = ctx; | 
|  | 3138 |  | 
|  | 3139 | ret = find_iref(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3140 | sctx->cmp_key, dir, name); | 
|  | 3141 | if (ret == -ENOENT) | 
|  | 3142 | ret = __record_new_ref(num, dir, index, name, sctx); | 
|  | 3143 | else if (ret > 0) | 
|  | 3144 | ret = 0; | 
|  | 3145 |  | 
|  | 3146 | return ret; | 
|  | 3147 | } | 
|  | 3148 |  | 
|  | 3149 | static int __record_changed_deleted_ref(int num, u64 dir, int index, | 
|  | 3150 | struct fs_path *name, | 
|  | 3151 | void *ctx) | 
|  | 3152 | { | 
|  | 3153 | int ret; | 
|  | 3154 | struct send_ctx *sctx = ctx; | 
|  | 3155 |  | 
|  | 3156 | ret = find_iref(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key, | 
|  | 3157 | dir, name); | 
|  | 3158 | if (ret == -ENOENT) | 
|  | 3159 | ret = __record_deleted_ref(num, dir, index, name, sctx); | 
|  | 3160 | else if (ret > 0) | 
|  | 3161 | ret = 0; | 
|  | 3162 |  | 
|  | 3163 | return ret; | 
|  | 3164 | } | 
|  | 3165 |  | 
|  | 3166 | static int record_changed_ref(struct send_ctx *sctx) | 
|  | 3167 | { | 
|  | 3168 | int ret = 0; | 
|  | 3169 |  | 
|  | 3170 | ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path, | 
|  | 3171 | sctx->cmp_key, 0, __record_changed_new_ref, sctx); | 
|  | 3172 | if (ret < 0) | 
|  | 3173 | goto out; | 
|  | 3174 | ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3175 | sctx->cmp_key, 0, __record_changed_deleted_ref, sctx); | 
|  | 3176 | if (ret < 0) | 
|  | 3177 | goto out; | 
|  | 3178 | ret = 0; | 
|  | 3179 |  | 
|  | 3180 | out: | 
|  | 3181 | return ret; | 
|  | 3182 | } | 
|  | 3183 |  | 
|  | 3184 | /* | 
|  | 3185 | * Record and process all refs at once. Needed when an inode changes the | 
|  | 3186 | * generation number, which means that it was deleted and recreated. | 
|  | 3187 | */ | 
|  | 3188 | static int process_all_refs(struct send_ctx *sctx, | 
|  | 3189 | enum btrfs_compare_tree_result cmd) | 
|  | 3190 | { | 
|  | 3191 | int ret; | 
|  | 3192 | struct btrfs_root *root; | 
|  | 3193 | struct btrfs_path *path; | 
|  | 3194 | struct btrfs_key key; | 
|  | 3195 | struct btrfs_key found_key; | 
|  | 3196 | struct extent_buffer *eb; | 
|  | 3197 | int slot; | 
|  | 3198 | iterate_inode_ref_t cb; | 
|  | 3199 |  | 
|  | 3200 | path = alloc_path_for_send(); | 
|  | 3201 | if (!path) | 
|  | 3202 | return -ENOMEM; | 
|  | 3203 |  | 
|  | 3204 | if (cmd == BTRFS_COMPARE_TREE_NEW) { | 
|  | 3205 | root = sctx->send_root; | 
|  | 3206 | cb = __record_new_ref; | 
|  | 3207 | } else if (cmd == BTRFS_COMPARE_TREE_DELETED) { | 
|  | 3208 | root = sctx->parent_root; | 
|  | 3209 | cb = __record_deleted_ref; | 
|  | 3210 | } else { | 
|  | 3211 | BUG(); | 
|  | 3212 | } | 
|  | 3213 |  | 
|  | 3214 | key.objectid = sctx->cmp_key->objectid; | 
|  | 3215 | key.type = BTRFS_INODE_REF_KEY; | 
|  | 3216 | key.offset = 0; | 
|  | 3217 | while (1) { | 
|  | 3218 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3219 | if (ret < 0) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3220 | goto out; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3221 | if (ret) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3222 | break; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3223 |  | 
|  | 3224 | eb = path->nodes[0]; | 
|  | 3225 | slot = path->slots[0]; | 
|  | 3226 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 3227 |  | 
|  | 3228 | if (found_key.objectid != key.objectid || | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3229 | found_key.type != key.type) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3230 | break; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3231 |  | 
| Alexander Block | 2f28f47 | 2012-08-01 14:42:14 +0200 | [diff] [blame] | 3232 | ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb, | 
|  | 3233 | sctx); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3234 | btrfs_release_path(path); | 
|  | 3235 | if (ret < 0) | 
|  | 3236 | goto out; | 
|  | 3237 |  | 
|  | 3238 | key.offset = found_key.offset + 1; | 
|  | 3239 | } | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3240 | btrfs_release_path(path); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3241 |  | 
|  | 3242 | ret = process_recorded_refs(sctx); | 
|  | 3243 |  | 
|  | 3244 | out: | 
|  | 3245 | btrfs_free_path(path); | 
|  | 3246 | return ret; | 
|  | 3247 | } | 
|  | 3248 |  | 
|  | 3249 | static int send_set_xattr(struct send_ctx *sctx, | 
|  | 3250 | struct fs_path *path, | 
|  | 3251 | const char *name, int name_len, | 
|  | 3252 | const char *data, int data_len) | 
|  | 3253 | { | 
|  | 3254 | int ret = 0; | 
|  | 3255 |  | 
|  | 3256 | ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); | 
|  | 3257 | if (ret < 0) | 
|  | 3258 | goto out; | 
|  | 3259 |  | 
|  | 3260 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | 
|  | 3261 | TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); | 
|  | 3262 | TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); | 
|  | 3263 |  | 
|  | 3264 | ret = send_cmd(sctx); | 
|  | 3265 |  | 
|  | 3266 | tlv_put_failure: | 
|  | 3267 | out: | 
|  | 3268 | return ret; | 
|  | 3269 | } | 
|  | 3270 |  | 
|  | 3271 | static int send_remove_xattr(struct send_ctx *sctx, | 
|  | 3272 | struct fs_path *path, | 
|  | 3273 | const char *name, int name_len) | 
|  | 3274 | { | 
|  | 3275 | int ret = 0; | 
|  | 3276 |  | 
|  | 3277 | ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); | 
|  | 3278 | if (ret < 0) | 
|  | 3279 | goto out; | 
|  | 3280 |  | 
|  | 3281 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | 
|  | 3282 | TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); | 
|  | 3283 |  | 
|  | 3284 | ret = send_cmd(sctx); | 
|  | 3285 |  | 
|  | 3286 | tlv_put_failure: | 
|  | 3287 | out: | 
|  | 3288 | return ret; | 
|  | 3289 | } | 
|  | 3290 |  | 
|  | 3291 | static int __process_new_xattr(int num, struct btrfs_key *di_key, | 
|  | 3292 | const char *name, int name_len, | 
|  | 3293 | const char *data, int data_len, | 
|  | 3294 | u8 type, void *ctx) | 
|  | 3295 | { | 
|  | 3296 | int ret; | 
|  | 3297 | struct send_ctx *sctx = ctx; | 
|  | 3298 | struct fs_path *p; | 
|  | 3299 | posix_acl_xattr_header dummy_acl; | 
|  | 3300 |  | 
|  | 3301 | p = fs_path_alloc(sctx); | 
|  | 3302 | if (!p) | 
|  | 3303 | return -ENOMEM; | 
|  | 3304 |  | 
|  | 3305 | /* | 
|  | 3306 | * This hack is needed because empty acl's are stored as zero byte | 
|  | 3307 | * data in xattrs. Problem with that is, that receiving these zero byte | 
|  | 3308 | * acl's will fail later. To fix this, we send a dummy acl list that | 
|  | 3309 | * only contains the version number and no entries. | 
|  | 3310 | */ | 
|  | 3311 | if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || | 
|  | 3312 | !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) { | 
|  | 3313 | if (data_len == 0) { | 
|  | 3314 | dummy_acl.a_version = | 
|  | 3315 | cpu_to_le32(POSIX_ACL_XATTR_VERSION); | 
|  | 3316 | data = (char *)&dummy_acl; | 
|  | 3317 | data_len = sizeof(dummy_acl); | 
|  | 3318 | } | 
|  | 3319 | } | 
|  | 3320 |  | 
|  | 3321 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | 
|  | 3322 | if (ret < 0) | 
|  | 3323 | goto out; | 
|  | 3324 |  | 
|  | 3325 | ret = send_set_xattr(sctx, p, name, name_len, data, data_len); | 
|  | 3326 |  | 
|  | 3327 | out: | 
|  | 3328 | fs_path_free(sctx, p); | 
|  | 3329 | return ret; | 
|  | 3330 | } | 
|  | 3331 |  | 
|  | 3332 | static int __process_deleted_xattr(int num, struct btrfs_key *di_key, | 
|  | 3333 | const char *name, int name_len, | 
|  | 3334 | const char *data, int data_len, | 
|  | 3335 | u8 type, void *ctx) | 
|  | 3336 | { | 
|  | 3337 | int ret; | 
|  | 3338 | struct send_ctx *sctx = ctx; | 
|  | 3339 | struct fs_path *p; | 
|  | 3340 |  | 
|  | 3341 | p = fs_path_alloc(sctx); | 
|  | 3342 | if (!p) | 
|  | 3343 | return -ENOMEM; | 
|  | 3344 |  | 
|  | 3345 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | 
|  | 3346 | if (ret < 0) | 
|  | 3347 | goto out; | 
|  | 3348 |  | 
|  | 3349 | ret = send_remove_xattr(sctx, p, name, name_len); | 
|  | 3350 |  | 
|  | 3351 | out: | 
|  | 3352 | fs_path_free(sctx, p); | 
|  | 3353 | return ret; | 
|  | 3354 | } | 
|  | 3355 |  | 
|  | 3356 | static int process_new_xattr(struct send_ctx *sctx) | 
|  | 3357 | { | 
|  | 3358 | int ret = 0; | 
|  | 3359 |  | 
|  | 3360 | ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path, | 
|  | 3361 | sctx->cmp_key, __process_new_xattr, sctx); | 
|  | 3362 |  | 
|  | 3363 | return ret; | 
|  | 3364 | } | 
|  | 3365 |  | 
|  | 3366 | static int process_deleted_xattr(struct send_ctx *sctx) | 
|  | 3367 | { | 
|  | 3368 | int ret; | 
|  | 3369 |  | 
|  | 3370 | ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3371 | sctx->cmp_key, __process_deleted_xattr, sctx); | 
|  | 3372 |  | 
|  | 3373 | return ret; | 
|  | 3374 | } | 
|  | 3375 |  | 
|  | 3376 | struct find_xattr_ctx { | 
|  | 3377 | const char *name; | 
|  | 3378 | int name_len; | 
|  | 3379 | int found_idx; | 
|  | 3380 | char *found_data; | 
|  | 3381 | int found_data_len; | 
|  | 3382 | }; | 
|  | 3383 |  | 
|  | 3384 | static int __find_xattr(int num, struct btrfs_key *di_key, | 
|  | 3385 | const char *name, int name_len, | 
|  | 3386 | const char *data, int data_len, | 
|  | 3387 | u8 type, void *vctx) | 
|  | 3388 | { | 
|  | 3389 | struct find_xattr_ctx *ctx = vctx; | 
|  | 3390 |  | 
|  | 3391 | if (name_len == ctx->name_len && | 
|  | 3392 | strncmp(name, ctx->name, name_len) == 0) { | 
|  | 3393 | ctx->found_idx = num; | 
|  | 3394 | ctx->found_data_len = data_len; | 
|  | 3395 | ctx->found_data = kmalloc(data_len, GFP_NOFS); | 
|  | 3396 | if (!ctx->found_data) | 
|  | 3397 | return -ENOMEM; | 
|  | 3398 | memcpy(ctx->found_data, data, data_len); | 
|  | 3399 | return 1; | 
|  | 3400 | } | 
|  | 3401 | return 0; | 
|  | 3402 | } | 
|  | 3403 |  | 
|  | 3404 | static int find_xattr(struct send_ctx *sctx, | 
|  | 3405 | struct btrfs_root *root, | 
|  | 3406 | struct btrfs_path *path, | 
|  | 3407 | struct btrfs_key *key, | 
|  | 3408 | const char *name, int name_len, | 
|  | 3409 | char **data, int *data_len) | 
|  | 3410 | { | 
|  | 3411 | int ret; | 
|  | 3412 | struct find_xattr_ctx ctx; | 
|  | 3413 |  | 
|  | 3414 | ctx.name = name; | 
|  | 3415 | ctx.name_len = name_len; | 
|  | 3416 | ctx.found_idx = -1; | 
|  | 3417 | ctx.found_data = NULL; | 
|  | 3418 | ctx.found_data_len = 0; | 
|  | 3419 |  | 
|  | 3420 | ret = iterate_dir_item(sctx, root, path, key, __find_xattr, &ctx); | 
|  | 3421 | if (ret < 0) | 
|  | 3422 | return ret; | 
|  | 3423 |  | 
|  | 3424 | if (ctx.found_idx == -1) | 
|  | 3425 | return -ENOENT; | 
|  | 3426 | if (data) { | 
|  | 3427 | *data = ctx.found_data; | 
|  | 3428 | *data_len = ctx.found_data_len; | 
|  | 3429 | } else { | 
|  | 3430 | kfree(ctx.found_data); | 
|  | 3431 | } | 
|  | 3432 | return ctx.found_idx; | 
|  | 3433 | } | 
|  | 3434 |  | 
|  | 3435 |  | 
|  | 3436 | static int __process_changed_new_xattr(int num, struct btrfs_key *di_key, | 
|  | 3437 | const char *name, int name_len, | 
|  | 3438 | const char *data, int data_len, | 
|  | 3439 | u8 type, void *ctx) | 
|  | 3440 | { | 
|  | 3441 | int ret; | 
|  | 3442 | struct send_ctx *sctx = ctx; | 
|  | 3443 | char *found_data = NULL; | 
|  | 3444 | int found_data_len  = 0; | 
|  | 3445 | struct fs_path *p = NULL; | 
|  | 3446 |  | 
|  | 3447 | ret = find_xattr(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3448 | sctx->cmp_key, name, name_len, &found_data, | 
|  | 3449 | &found_data_len); | 
|  | 3450 | if (ret == -ENOENT) { | 
|  | 3451 | ret = __process_new_xattr(num, di_key, name, name_len, data, | 
|  | 3452 | data_len, type, ctx); | 
|  | 3453 | } else if (ret >= 0) { | 
|  | 3454 | if (data_len != found_data_len || | 
|  | 3455 | memcmp(data, found_data, data_len)) { | 
|  | 3456 | ret = __process_new_xattr(num, di_key, name, name_len, | 
|  | 3457 | data, data_len, type, ctx); | 
|  | 3458 | } else { | 
|  | 3459 | ret = 0; | 
|  | 3460 | } | 
|  | 3461 | } | 
|  | 3462 |  | 
|  | 3463 | kfree(found_data); | 
|  | 3464 | fs_path_free(sctx, p); | 
|  | 3465 | return ret; | 
|  | 3466 | } | 
|  | 3467 |  | 
|  | 3468 | static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key, | 
|  | 3469 | const char *name, int name_len, | 
|  | 3470 | const char *data, int data_len, | 
|  | 3471 | u8 type, void *ctx) | 
|  | 3472 | { | 
|  | 3473 | int ret; | 
|  | 3474 | struct send_ctx *sctx = ctx; | 
|  | 3475 |  | 
|  | 3476 | ret = find_xattr(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key, | 
|  | 3477 | name, name_len, NULL, NULL); | 
|  | 3478 | if (ret == -ENOENT) | 
|  | 3479 | ret = __process_deleted_xattr(num, di_key, name, name_len, data, | 
|  | 3480 | data_len, type, ctx); | 
|  | 3481 | else if (ret >= 0) | 
|  | 3482 | ret = 0; | 
|  | 3483 |  | 
|  | 3484 | return ret; | 
|  | 3485 | } | 
|  | 3486 |  | 
|  | 3487 | static int process_changed_xattr(struct send_ctx *sctx) | 
|  | 3488 | { | 
|  | 3489 | int ret = 0; | 
|  | 3490 |  | 
|  | 3491 | ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path, | 
|  | 3492 | sctx->cmp_key, __process_changed_new_xattr, sctx); | 
|  | 3493 | if (ret < 0) | 
|  | 3494 | goto out; | 
|  | 3495 | ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3496 | sctx->cmp_key, __process_changed_deleted_xattr, sctx); | 
|  | 3497 |  | 
|  | 3498 | out: | 
|  | 3499 | return ret; | 
|  | 3500 | } | 
|  | 3501 |  | 
|  | 3502 | static int process_all_new_xattrs(struct send_ctx *sctx) | 
|  | 3503 | { | 
|  | 3504 | int ret; | 
|  | 3505 | struct btrfs_root *root; | 
|  | 3506 | struct btrfs_path *path; | 
|  | 3507 | struct btrfs_key key; | 
|  | 3508 | struct btrfs_key found_key; | 
|  | 3509 | struct extent_buffer *eb; | 
|  | 3510 | int slot; | 
|  | 3511 |  | 
|  | 3512 | path = alloc_path_for_send(); | 
|  | 3513 | if (!path) | 
|  | 3514 | return -ENOMEM; | 
|  | 3515 |  | 
|  | 3516 | root = sctx->send_root; | 
|  | 3517 |  | 
|  | 3518 | key.objectid = sctx->cmp_key->objectid; | 
|  | 3519 | key.type = BTRFS_XATTR_ITEM_KEY; | 
|  | 3520 | key.offset = 0; | 
|  | 3521 | while (1) { | 
|  | 3522 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 
|  | 3523 | if (ret < 0) | 
|  | 3524 | goto out; | 
|  | 3525 | if (ret) { | 
|  | 3526 | ret = 0; | 
|  | 3527 | goto out; | 
|  | 3528 | } | 
|  | 3529 |  | 
|  | 3530 | eb = path->nodes[0]; | 
|  | 3531 | slot = path->slots[0]; | 
|  | 3532 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 3533 |  | 
|  | 3534 | if (found_key.objectid != key.objectid || | 
|  | 3535 | found_key.type != key.type) { | 
|  | 3536 | ret = 0; | 
|  | 3537 | goto out; | 
|  | 3538 | } | 
|  | 3539 |  | 
|  | 3540 | ret = iterate_dir_item(sctx, root, path, &found_key, | 
|  | 3541 | __process_new_xattr, sctx); | 
|  | 3542 | if (ret < 0) | 
|  | 3543 | goto out; | 
|  | 3544 |  | 
|  | 3545 | btrfs_release_path(path); | 
|  | 3546 | key.offset = found_key.offset + 1; | 
|  | 3547 | } | 
|  | 3548 |  | 
|  | 3549 | out: | 
|  | 3550 | btrfs_free_path(path); | 
|  | 3551 | return ret; | 
|  | 3552 | } | 
|  | 3553 |  | 
|  | 3554 | /* | 
|  | 3555 | * Read some bytes from the current inode/file and send a write command to | 
|  | 3556 | * user space. | 
|  | 3557 | */ | 
|  | 3558 | static int send_write(struct send_ctx *sctx, u64 offset, u32 len) | 
|  | 3559 | { | 
|  | 3560 | int ret = 0; | 
|  | 3561 | struct fs_path *p; | 
|  | 3562 | loff_t pos = offset; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3563 | int num_read = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3564 | mm_segment_t old_fs; | 
|  | 3565 |  | 
|  | 3566 | p = fs_path_alloc(sctx); | 
|  | 3567 | if (!p) | 
|  | 3568 | return -ENOMEM; | 
|  | 3569 |  | 
|  | 3570 | /* | 
|  | 3571 | * vfs normally only accepts user space buffers for security reasons. | 
|  | 3572 | * we only read from the file and also only provide the read_buf buffer | 
|  | 3573 | * to vfs. As this buffer does not come from a user space call, it's | 
|  | 3574 | * ok to temporary allow kernel space buffers. | 
|  | 3575 | */ | 
|  | 3576 | old_fs = get_fs(); | 
|  | 3577 | set_fs(KERNEL_DS); | 
|  | 3578 |  | 
|  | 3579 | verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len); | 
|  | 3580 |  | 
|  | 3581 | ret = open_cur_inode_file(sctx); | 
|  | 3582 | if (ret < 0) | 
|  | 3583 | goto out; | 
|  | 3584 |  | 
|  | 3585 | ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos); | 
|  | 3586 | if (ret < 0) | 
|  | 3587 | goto out; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3588 | num_read = ret; | 
|  | 3589 | if (!num_read) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3590 | goto out; | 
|  | 3591 |  | 
|  | 3592 | ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); | 
|  | 3593 | if (ret < 0) | 
|  | 3594 | goto out; | 
|  | 3595 |  | 
|  | 3596 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | 
|  | 3597 | if (ret < 0) | 
|  | 3598 | goto out; | 
|  | 3599 |  | 
|  | 3600 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 3601 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3602 | TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3603 |  | 
|  | 3604 | ret = send_cmd(sctx); | 
|  | 3605 |  | 
|  | 3606 | tlv_put_failure: | 
|  | 3607 | out: | 
|  | 3608 | fs_path_free(sctx, p); | 
|  | 3609 | set_fs(old_fs); | 
|  | 3610 | if (ret < 0) | 
|  | 3611 | return ret; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3612 | return num_read; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3613 | } | 
|  | 3614 |  | 
|  | 3615 | /* | 
|  | 3616 | * Send a clone command to user space. | 
|  | 3617 | */ | 
|  | 3618 | static int send_clone(struct send_ctx *sctx, | 
|  | 3619 | u64 offset, u32 len, | 
|  | 3620 | struct clone_root *clone_root) | 
|  | 3621 | { | 
|  | 3622 | int ret = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3623 | struct fs_path *p; | 
|  | 3624 | u64 gen; | 
|  | 3625 |  | 
|  | 3626 | verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, " | 
|  | 3627 | "clone_inode=%llu, clone_offset=%llu\n", offset, len, | 
|  | 3628 | clone_root->root->objectid, clone_root->ino, | 
|  | 3629 | clone_root->offset); | 
|  | 3630 |  | 
|  | 3631 | p = fs_path_alloc(sctx); | 
|  | 3632 | if (!p) | 
|  | 3633 | return -ENOMEM; | 
|  | 3634 |  | 
|  | 3635 | ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); | 
|  | 3636 | if (ret < 0) | 
|  | 3637 | goto out; | 
|  | 3638 |  | 
|  | 3639 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | 
|  | 3640 | if (ret < 0) | 
|  | 3641 | goto out; | 
|  | 3642 |  | 
|  | 3643 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); | 
|  | 3644 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); | 
|  | 3645 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 3646 |  | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3647 | if (clone_root->root == sctx->send_root) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3648 | ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 3649 | &gen, NULL, NULL, NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3650 | if (ret < 0) | 
|  | 3651 | goto out; | 
|  | 3652 | ret = get_cur_path(sctx, clone_root->ino, gen, p); | 
|  | 3653 | } else { | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3654 | ret = get_inode_path(sctx, clone_root->root, | 
|  | 3655 | clone_root->ino, p); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3656 | } | 
|  | 3657 | if (ret < 0) | 
|  | 3658 | goto out; | 
|  | 3659 |  | 
|  | 3660 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3661 | clone_root->root->root_item.uuid); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3662 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3663 | clone_root->root->root_item.ctransid); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3664 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); | 
|  | 3665 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, | 
|  | 3666 | clone_root->offset); | 
|  | 3667 |  | 
|  | 3668 | ret = send_cmd(sctx); | 
|  | 3669 |  | 
|  | 3670 | tlv_put_failure: | 
|  | 3671 | out: | 
|  | 3672 | fs_path_free(sctx, p); | 
|  | 3673 | return ret; | 
|  | 3674 | } | 
|  | 3675 |  | 
|  | 3676 | static int send_write_or_clone(struct send_ctx *sctx, | 
|  | 3677 | struct btrfs_path *path, | 
|  | 3678 | struct btrfs_key *key, | 
|  | 3679 | struct clone_root *clone_root) | 
|  | 3680 | { | 
|  | 3681 | int ret = 0; | 
|  | 3682 | struct btrfs_file_extent_item *ei; | 
|  | 3683 | u64 offset = key->offset; | 
|  | 3684 | u64 pos = 0; | 
|  | 3685 | u64 len; | 
|  | 3686 | u32 l; | 
|  | 3687 | u8 type; | 
|  | 3688 |  | 
|  | 3689 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 3690 | struct btrfs_file_extent_item); | 
|  | 3691 | type = btrfs_file_extent_type(path->nodes[0], ei); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3692 | if (type == BTRFS_FILE_EXTENT_INLINE) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3693 | len = btrfs_file_extent_inline_len(path->nodes[0], ei); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3694 | /* | 
|  | 3695 | * it is possible the inline item won't cover the whole page, | 
|  | 3696 | * but there may be items after this page.  Make | 
|  | 3697 | * sure to send the whole thing | 
|  | 3698 | */ | 
|  | 3699 | len = PAGE_CACHE_ALIGN(len); | 
|  | 3700 | } else { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3701 | len = btrfs_file_extent_num_bytes(path->nodes[0], ei); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3702 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3703 |  | 
|  | 3704 | if (offset + len > sctx->cur_inode_size) | 
|  | 3705 | len = sctx->cur_inode_size - offset; | 
|  | 3706 | if (len == 0) { | 
|  | 3707 | ret = 0; | 
|  | 3708 | goto out; | 
|  | 3709 | } | 
|  | 3710 |  | 
|  | 3711 | if (!clone_root) { | 
|  | 3712 | while (pos < len) { | 
|  | 3713 | l = len - pos; | 
|  | 3714 | if (l > BTRFS_SEND_READ_SIZE) | 
|  | 3715 | l = BTRFS_SEND_READ_SIZE; | 
|  | 3716 | ret = send_write(sctx, pos + offset, l); | 
|  | 3717 | if (ret < 0) | 
|  | 3718 | goto out; | 
|  | 3719 | if (!ret) | 
|  | 3720 | break; | 
|  | 3721 | pos += ret; | 
|  | 3722 | } | 
|  | 3723 | ret = 0; | 
|  | 3724 | } else { | 
|  | 3725 | ret = send_clone(sctx, offset, len, clone_root); | 
|  | 3726 | } | 
|  | 3727 |  | 
|  | 3728 | out: | 
|  | 3729 | return ret; | 
|  | 3730 | } | 
|  | 3731 |  | 
|  | 3732 | static int is_extent_unchanged(struct send_ctx *sctx, | 
|  | 3733 | struct btrfs_path *left_path, | 
|  | 3734 | struct btrfs_key *ekey) | 
|  | 3735 | { | 
|  | 3736 | int ret = 0; | 
|  | 3737 | struct btrfs_key key; | 
|  | 3738 | struct btrfs_path *path = NULL; | 
|  | 3739 | struct extent_buffer *eb; | 
|  | 3740 | int slot; | 
|  | 3741 | struct btrfs_key found_key; | 
|  | 3742 | struct btrfs_file_extent_item *ei; | 
|  | 3743 | u64 left_disknr; | 
|  | 3744 | u64 right_disknr; | 
|  | 3745 | u64 left_offset; | 
|  | 3746 | u64 right_offset; | 
|  | 3747 | u64 left_offset_fixed; | 
|  | 3748 | u64 left_len; | 
|  | 3749 | u64 right_len; | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3750 | u64 left_gen; | 
|  | 3751 | u64 right_gen; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3752 | u8 left_type; | 
|  | 3753 | u8 right_type; | 
|  | 3754 |  | 
|  | 3755 | path = alloc_path_for_send(); | 
|  | 3756 | if (!path) | 
|  | 3757 | return -ENOMEM; | 
|  | 3758 |  | 
|  | 3759 | eb = left_path->nodes[0]; | 
|  | 3760 | slot = left_path->slots[0]; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3761 | ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | 
|  | 3762 | left_type = btrfs_file_extent_type(eb, ei); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3763 |  | 
|  | 3764 | if (left_type != BTRFS_FILE_EXTENT_REG) { | 
|  | 3765 | ret = 0; | 
|  | 3766 | goto out; | 
|  | 3767 | } | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3768 | left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); | 
|  | 3769 | left_len = btrfs_file_extent_num_bytes(eb, ei); | 
|  | 3770 | left_offset = btrfs_file_extent_offset(eb, ei); | 
|  | 3771 | left_gen = btrfs_file_extent_generation(eb, ei); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3772 |  | 
|  | 3773 | /* | 
|  | 3774 | * Following comments will refer to these graphics. L is the left | 
|  | 3775 | * extents which we are checking at the moment. 1-8 are the right | 
|  | 3776 | * extents that we iterate. | 
|  | 3777 | * | 
|  | 3778 | *       |-----L-----| | 
|  | 3779 | * |-1-|-2a-|-3-|-4-|-5-|-6-| | 
|  | 3780 | * | 
|  | 3781 | *       |-----L-----| | 
|  | 3782 | * |--1--|-2b-|...(same as above) | 
|  | 3783 | * | 
|  | 3784 | * Alternative situation. Happens on files where extents got split. | 
|  | 3785 | *       |-----L-----| | 
|  | 3786 | * |-----------7-----------|-6-| | 
|  | 3787 | * | 
|  | 3788 | * Alternative situation. Happens on files which got larger. | 
|  | 3789 | *       |-----L-----| | 
|  | 3790 | * |-8-| | 
|  | 3791 | * Nothing follows after 8. | 
|  | 3792 | */ | 
|  | 3793 |  | 
|  | 3794 | key.objectid = ekey->objectid; | 
|  | 3795 | key.type = BTRFS_EXTENT_DATA_KEY; | 
|  | 3796 | key.offset = ekey->offset; | 
|  | 3797 | ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); | 
|  | 3798 | if (ret < 0) | 
|  | 3799 | goto out; | 
|  | 3800 | if (ret) { | 
|  | 3801 | ret = 0; | 
|  | 3802 | goto out; | 
|  | 3803 | } | 
|  | 3804 |  | 
|  | 3805 | /* | 
|  | 3806 | * Handle special case where the right side has no extents at all. | 
|  | 3807 | */ | 
|  | 3808 | eb = path->nodes[0]; | 
|  | 3809 | slot = path->slots[0]; | 
|  | 3810 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 3811 | if (found_key.objectid != key.objectid || | 
|  | 3812 | found_key.type != key.type) { | 
|  | 3813 | ret = 0; | 
|  | 3814 | goto out; | 
|  | 3815 | } | 
|  | 3816 |  | 
|  | 3817 | /* | 
|  | 3818 | * We're now on 2a, 2b or 7. | 
|  | 3819 | */ | 
|  | 3820 | key = found_key; | 
|  | 3821 | while (key.offset < ekey->offset + left_len) { | 
|  | 3822 | ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | 
|  | 3823 | right_type = btrfs_file_extent_type(eb, ei); | 
|  | 3824 | right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); | 
|  | 3825 | right_len = btrfs_file_extent_num_bytes(eb, ei); | 
|  | 3826 | right_offset = btrfs_file_extent_offset(eb, ei); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3827 | right_gen = btrfs_file_extent_generation(eb, ei); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3828 |  | 
|  | 3829 | if (right_type != BTRFS_FILE_EXTENT_REG) { | 
|  | 3830 | ret = 0; | 
|  | 3831 | goto out; | 
|  | 3832 | } | 
|  | 3833 |  | 
|  | 3834 | /* | 
|  | 3835 | * Are we at extent 8? If yes, we know the extent is changed. | 
|  | 3836 | * This may only happen on the first iteration. | 
|  | 3837 | */ | 
| Alexander Block | d8347fa | 2012-08-01 12:49:15 +0200 | [diff] [blame] | 3838 | if (found_key.offset + right_len <= ekey->offset) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3839 | ret = 0; | 
|  | 3840 | goto out; | 
|  | 3841 | } | 
|  | 3842 |  | 
|  | 3843 | left_offset_fixed = left_offset; | 
|  | 3844 | if (key.offset < ekey->offset) { | 
|  | 3845 | /* Fix the right offset for 2a and 7. */ | 
|  | 3846 | right_offset += ekey->offset - key.offset; | 
|  | 3847 | } else { | 
|  | 3848 | /* Fix the left offset for all behind 2a and 2b */ | 
|  | 3849 | left_offset_fixed += key.offset - ekey->offset; | 
|  | 3850 | } | 
|  | 3851 |  | 
|  | 3852 | /* | 
|  | 3853 | * Check if we have the same extent. | 
|  | 3854 | */ | 
| Alexander Block | 3954096 | 2012-08-01 12:46:05 +0200 | [diff] [blame] | 3855 | if (left_disknr != right_disknr || | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3856 | left_offset_fixed != right_offset || | 
|  | 3857 | left_gen != right_gen) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3858 | ret = 0; | 
|  | 3859 | goto out; | 
|  | 3860 | } | 
|  | 3861 |  | 
|  | 3862 | /* | 
|  | 3863 | * Go to the next extent. | 
|  | 3864 | */ | 
|  | 3865 | ret = btrfs_next_item(sctx->parent_root, path); | 
|  | 3866 | if (ret < 0) | 
|  | 3867 | goto out; | 
|  | 3868 | if (!ret) { | 
|  | 3869 | eb = path->nodes[0]; | 
|  | 3870 | slot = path->slots[0]; | 
|  | 3871 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 3872 | } | 
|  | 3873 | if (ret || found_key.objectid != key.objectid || | 
|  | 3874 | found_key.type != key.type) { | 
|  | 3875 | key.offset += right_len; | 
|  | 3876 | break; | 
|  | 3877 | } else { | 
|  | 3878 | if (found_key.offset != key.offset + right_len) { | 
|  | 3879 | /* Should really not happen */ | 
|  | 3880 | ret = -EIO; | 
|  | 3881 | goto out; | 
|  | 3882 | } | 
|  | 3883 | } | 
|  | 3884 | key = found_key; | 
|  | 3885 | } | 
|  | 3886 |  | 
|  | 3887 | /* | 
|  | 3888 | * We're now behind the left extent (treat as unchanged) or at the end | 
|  | 3889 | * of the right side (treat as changed). | 
|  | 3890 | */ | 
|  | 3891 | if (key.offset >= ekey->offset + left_len) | 
|  | 3892 | ret = 1; | 
|  | 3893 | else | 
|  | 3894 | ret = 0; | 
|  | 3895 |  | 
|  | 3896 |  | 
|  | 3897 | out: | 
|  | 3898 | btrfs_free_path(path); | 
|  | 3899 | return ret; | 
|  | 3900 | } | 
|  | 3901 |  | 
|  | 3902 | static int process_extent(struct send_ctx *sctx, | 
|  | 3903 | struct btrfs_path *path, | 
|  | 3904 | struct btrfs_key *key) | 
|  | 3905 | { | 
|  | 3906 | int ret = 0; | 
|  | 3907 | struct clone_root *found_clone = NULL; | 
|  | 3908 |  | 
|  | 3909 | if (S_ISLNK(sctx->cur_inode_mode)) | 
|  | 3910 | return 0; | 
|  | 3911 |  | 
|  | 3912 | if (sctx->parent_root && !sctx->cur_inode_new) { | 
|  | 3913 | ret = is_extent_unchanged(sctx, path, key); | 
|  | 3914 | if (ret < 0) | 
|  | 3915 | goto out; | 
|  | 3916 | if (ret) { | 
|  | 3917 | ret = 0; | 
|  | 3918 | goto out; | 
|  | 3919 | } | 
|  | 3920 | } | 
|  | 3921 |  | 
|  | 3922 | ret = find_extent_clone(sctx, path, key->objectid, key->offset, | 
|  | 3923 | sctx->cur_inode_size, &found_clone); | 
|  | 3924 | if (ret != -ENOENT && ret < 0) | 
|  | 3925 | goto out; | 
|  | 3926 |  | 
|  | 3927 | ret = send_write_or_clone(sctx, path, key, found_clone); | 
|  | 3928 |  | 
|  | 3929 | out: | 
|  | 3930 | return ret; | 
|  | 3931 | } | 
|  | 3932 |  | 
|  | 3933 | static int process_all_extents(struct send_ctx *sctx) | 
|  | 3934 | { | 
|  | 3935 | int ret; | 
|  | 3936 | struct btrfs_root *root; | 
|  | 3937 | struct btrfs_path *path; | 
|  | 3938 | struct btrfs_key key; | 
|  | 3939 | struct btrfs_key found_key; | 
|  | 3940 | struct extent_buffer *eb; | 
|  | 3941 | int slot; | 
|  | 3942 |  | 
|  | 3943 | root = sctx->send_root; | 
|  | 3944 | path = alloc_path_for_send(); | 
|  | 3945 | if (!path) | 
|  | 3946 | return -ENOMEM; | 
|  | 3947 |  | 
|  | 3948 | key.objectid = sctx->cmp_key->objectid; | 
|  | 3949 | key.type = BTRFS_EXTENT_DATA_KEY; | 
|  | 3950 | key.offset = 0; | 
|  | 3951 | while (1) { | 
|  | 3952 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 
|  | 3953 | if (ret < 0) | 
|  | 3954 | goto out; | 
|  | 3955 | if (ret) { | 
|  | 3956 | ret = 0; | 
|  | 3957 | goto out; | 
|  | 3958 | } | 
|  | 3959 |  | 
|  | 3960 | eb = path->nodes[0]; | 
|  | 3961 | slot = path->slots[0]; | 
|  | 3962 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 3963 |  | 
|  | 3964 | if (found_key.objectid != key.objectid || | 
|  | 3965 | found_key.type != key.type) { | 
|  | 3966 | ret = 0; | 
|  | 3967 | goto out; | 
|  | 3968 | } | 
|  | 3969 |  | 
|  | 3970 | ret = process_extent(sctx, path, &found_key); | 
|  | 3971 | if (ret < 0) | 
|  | 3972 | goto out; | 
|  | 3973 |  | 
|  | 3974 | btrfs_release_path(path); | 
|  | 3975 | key.offset = found_key.offset + 1; | 
|  | 3976 | } | 
|  | 3977 |  | 
|  | 3978 | out: | 
|  | 3979 | btrfs_free_path(path); | 
|  | 3980 | return ret; | 
|  | 3981 | } | 
|  | 3982 |  | 
|  | 3983 | static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end) | 
|  | 3984 | { | 
|  | 3985 | int ret = 0; | 
|  | 3986 |  | 
|  | 3987 | if (sctx->cur_ino == 0) | 
|  | 3988 | goto out; | 
|  | 3989 | if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && | 
|  | 3990 | sctx->cmp_key->type <= BTRFS_INODE_REF_KEY) | 
|  | 3991 | goto out; | 
|  | 3992 | if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) | 
|  | 3993 | goto out; | 
|  | 3994 |  | 
|  | 3995 | ret = process_recorded_refs(sctx); | 
| Alexander Block | e479d9b | 2012-07-28 16:09:35 +0200 | [diff] [blame] | 3996 | if (ret < 0) | 
|  | 3997 | goto out; | 
|  | 3998 |  | 
|  | 3999 | /* | 
|  | 4000 | * We have processed the refs and thus need to advance send_progress. | 
|  | 4001 | * Now, calls to get_cur_xxx will take the updated refs of the current | 
|  | 4002 | * inode into account. | 
|  | 4003 | */ | 
|  | 4004 | sctx->send_progress = sctx->cur_ino + 1; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4005 |  | 
|  | 4006 | out: | 
|  | 4007 | return ret; | 
|  | 4008 | } | 
|  | 4009 |  | 
|  | 4010 | static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) | 
|  | 4011 | { | 
|  | 4012 | int ret = 0; | 
|  | 4013 | u64 left_mode; | 
|  | 4014 | u64 left_uid; | 
|  | 4015 | u64 left_gid; | 
|  | 4016 | u64 right_mode; | 
|  | 4017 | u64 right_uid; | 
|  | 4018 | u64 right_gid; | 
|  | 4019 | int need_chmod = 0; | 
|  | 4020 | int need_chown = 0; | 
|  | 4021 |  | 
|  | 4022 | ret = process_recorded_refs_if_needed(sctx, at_end); | 
|  | 4023 | if (ret < 0) | 
|  | 4024 | goto out; | 
|  | 4025 |  | 
|  | 4026 | if (sctx->cur_ino == 0 || sctx->cur_inode_deleted) | 
|  | 4027 | goto out; | 
|  | 4028 | if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino) | 
|  | 4029 | goto out; | 
|  | 4030 |  | 
|  | 4031 | ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 4032 | &left_mode, &left_uid, &left_gid, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4033 | if (ret < 0) | 
|  | 4034 | goto out; | 
|  | 4035 |  | 
|  | 4036 | if (!S_ISLNK(sctx->cur_inode_mode)) { | 
|  | 4037 | if (!sctx->parent_root || sctx->cur_inode_new) { | 
|  | 4038 | need_chmod = 1; | 
|  | 4039 | need_chown = 1; | 
|  | 4040 | } else { | 
|  | 4041 | ret = get_inode_info(sctx->parent_root, sctx->cur_ino, | 
|  | 4042 | NULL, NULL, &right_mode, &right_uid, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 4043 | &right_gid, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4044 | if (ret < 0) | 
|  | 4045 | goto out; | 
|  | 4046 |  | 
|  | 4047 | if (left_uid != right_uid || left_gid != right_gid) | 
|  | 4048 | need_chown = 1; | 
|  | 4049 | if (left_mode != right_mode) | 
|  | 4050 | need_chmod = 1; | 
|  | 4051 | } | 
|  | 4052 | } | 
|  | 4053 |  | 
|  | 4054 | if (S_ISREG(sctx->cur_inode_mode)) { | 
|  | 4055 | ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen, | 
|  | 4056 | sctx->cur_inode_size); | 
|  | 4057 | if (ret < 0) | 
|  | 4058 | goto out; | 
|  | 4059 | } | 
|  | 4060 |  | 
|  | 4061 | if (need_chown) { | 
|  | 4062 | ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen, | 
|  | 4063 | left_uid, left_gid); | 
|  | 4064 | if (ret < 0) | 
|  | 4065 | goto out; | 
|  | 4066 | } | 
|  | 4067 | if (need_chmod) { | 
|  | 4068 | ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen, | 
|  | 4069 | left_mode); | 
|  | 4070 | if (ret < 0) | 
|  | 4071 | goto out; | 
|  | 4072 | } | 
|  | 4073 |  | 
|  | 4074 | /* | 
|  | 4075 | * Need to send that every time, no matter if it actually changed | 
|  | 4076 | * between the two trees as we have done changes to the inode before. | 
|  | 4077 | */ | 
|  | 4078 | ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); | 
|  | 4079 | if (ret < 0) | 
|  | 4080 | goto out; | 
|  | 4081 |  | 
|  | 4082 | out: | 
|  | 4083 | return ret; | 
|  | 4084 | } | 
|  | 4085 |  | 
|  | 4086 | static int changed_inode(struct send_ctx *sctx, | 
|  | 4087 | enum btrfs_compare_tree_result result) | 
|  | 4088 | { | 
|  | 4089 | int ret = 0; | 
|  | 4090 | struct btrfs_key *key = sctx->cmp_key; | 
|  | 4091 | struct btrfs_inode_item *left_ii = NULL; | 
|  | 4092 | struct btrfs_inode_item *right_ii = NULL; | 
|  | 4093 | u64 left_gen = 0; | 
|  | 4094 | u64 right_gen = 0; | 
|  | 4095 |  | 
|  | 4096 | ret = close_cur_inode_file(sctx); | 
|  | 4097 | if (ret < 0) | 
|  | 4098 | goto out; | 
|  | 4099 |  | 
|  | 4100 | sctx->cur_ino = key->objectid; | 
|  | 4101 | sctx->cur_inode_new_gen = 0; | 
| Alexander Block | e479d9b | 2012-07-28 16:09:35 +0200 | [diff] [blame] | 4102 |  | 
|  | 4103 | /* | 
|  | 4104 | * Set send_progress to current inode. This will tell all get_cur_xxx | 
|  | 4105 | * functions that the current inode's refs are not updated yet. Later, | 
|  | 4106 | * when process_recorded_refs is finished, it is set to cur_ino + 1. | 
|  | 4107 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4108 | sctx->send_progress = sctx->cur_ino; | 
|  | 4109 |  | 
|  | 4110 | if (result == BTRFS_COMPARE_TREE_NEW || | 
|  | 4111 | result == BTRFS_COMPARE_TREE_CHANGED) { | 
|  | 4112 | left_ii = btrfs_item_ptr(sctx->left_path->nodes[0], | 
|  | 4113 | sctx->left_path->slots[0], | 
|  | 4114 | struct btrfs_inode_item); | 
|  | 4115 | left_gen = btrfs_inode_generation(sctx->left_path->nodes[0], | 
|  | 4116 | left_ii); | 
|  | 4117 | } else { | 
|  | 4118 | right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], | 
|  | 4119 | sctx->right_path->slots[0], | 
|  | 4120 | struct btrfs_inode_item); | 
|  | 4121 | right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], | 
|  | 4122 | right_ii); | 
|  | 4123 | } | 
|  | 4124 | if (result == BTRFS_COMPARE_TREE_CHANGED) { | 
|  | 4125 | right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], | 
|  | 4126 | sctx->right_path->slots[0], | 
|  | 4127 | struct btrfs_inode_item); | 
|  | 4128 |  | 
|  | 4129 | right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], | 
|  | 4130 | right_ii); | 
| Alexander Block | 6d85ed0 | 2012-08-01 14:48:59 +0200 | [diff] [blame] | 4131 |  | 
|  | 4132 | /* | 
|  | 4133 | * The cur_ino = root dir case is special here. We can't treat | 
|  | 4134 | * the inode as deleted+reused because it would generate a | 
|  | 4135 | * stream that tries to delete/mkdir the root dir. | 
|  | 4136 | */ | 
|  | 4137 | if (left_gen != right_gen && | 
|  | 4138 | sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4139 | sctx->cur_inode_new_gen = 1; | 
|  | 4140 | } | 
|  | 4141 |  | 
|  | 4142 | if (result == BTRFS_COMPARE_TREE_NEW) { | 
|  | 4143 | sctx->cur_inode_gen = left_gen; | 
|  | 4144 | sctx->cur_inode_new = 1; | 
|  | 4145 | sctx->cur_inode_deleted = 0; | 
|  | 4146 | sctx->cur_inode_size = btrfs_inode_size( | 
|  | 4147 | sctx->left_path->nodes[0], left_ii); | 
|  | 4148 | sctx->cur_inode_mode = btrfs_inode_mode( | 
|  | 4149 | sctx->left_path->nodes[0], left_ii); | 
|  | 4150 | if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 4151 | ret = send_create_inode_if_needed(sctx); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4152 | } else if (result == BTRFS_COMPARE_TREE_DELETED) { | 
|  | 4153 | sctx->cur_inode_gen = right_gen; | 
|  | 4154 | sctx->cur_inode_new = 0; | 
|  | 4155 | sctx->cur_inode_deleted = 1; | 
|  | 4156 | sctx->cur_inode_size = btrfs_inode_size( | 
|  | 4157 | sctx->right_path->nodes[0], right_ii); | 
|  | 4158 | sctx->cur_inode_mode = btrfs_inode_mode( | 
|  | 4159 | sctx->right_path->nodes[0], right_ii); | 
|  | 4160 | } else if (result == BTRFS_COMPARE_TREE_CHANGED) { | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4161 | /* | 
|  | 4162 | * We need to do some special handling in case the inode was | 
|  | 4163 | * reported as changed with a changed generation number. This | 
|  | 4164 | * means that the original inode was deleted and new inode | 
|  | 4165 | * reused the same inum. So we have to treat the old inode as | 
|  | 4166 | * deleted and the new one as new. | 
|  | 4167 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4168 | if (sctx->cur_inode_new_gen) { | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4169 | /* | 
|  | 4170 | * First, process the inode as if it was deleted. | 
|  | 4171 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4172 | sctx->cur_inode_gen = right_gen; | 
|  | 4173 | sctx->cur_inode_new = 0; | 
|  | 4174 | sctx->cur_inode_deleted = 1; | 
|  | 4175 | sctx->cur_inode_size = btrfs_inode_size( | 
|  | 4176 | sctx->right_path->nodes[0], right_ii); | 
|  | 4177 | sctx->cur_inode_mode = btrfs_inode_mode( | 
|  | 4178 | sctx->right_path->nodes[0], right_ii); | 
|  | 4179 | ret = process_all_refs(sctx, | 
|  | 4180 | BTRFS_COMPARE_TREE_DELETED); | 
|  | 4181 | if (ret < 0) | 
|  | 4182 | goto out; | 
|  | 4183 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4184 | /* | 
|  | 4185 | * Now process the inode as if it was new. | 
|  | 4186 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4187 | sctx->cur_inode_gen = left_gen; | 
|  | 4188 | sctx->cur_inode_new = 1; | 
|  | 4189 | sctx->cur_inode_deleted = 0; | 
|  | 4190 | sctx->cur_inode_size = btrfs_inode_size( | 
|  | 4191 | sctx->left_path->nodes[0], left_ii); | 
|  | 4192 | sctx->cur_inode_mode = btrfs_inode_mode( | 
|  | 4193 | sctx->left_path->nodes[0], left_ii); | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 4194 | ret = send_create_inode_if_needed(sctx); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4195 | if (ret < 0) | 
|  | 4196 | goto out; | 
|  | 4197 |  | 
|  | 4198 | ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); | 
|  | 4199 | if (ret < 0) | 
|  | 4200 | goto out; | 
| Alexander Block | e479d9b | 2012-07-28 16:09:35 +0200 | [diff] [blame] | 4201 | /* | 
|  | 4202 | * Advance send_progress now as we did not get into | 
|  | 4203 | * process_recorded_refs_if_needed in the new_gen case. | 
|  | 4204 | */ | 
|  | 4205 | sctx->send_progress = sctx->cur_ino + 1; | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4206 |  | 
|  | 4207 | /* | 
|  | 4208 | * Now process all extents and xattrs of the inode as if | 
|  | 4209 | * they were all new. | 
|  | 4210 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4211 | ret = process_all_extents(sctx); | 
|  | 4212 | if (ret < 0) | 
|  | 4213 | goto out; | 
|  | 4214 | ret = process_all_new_xattrs(sctx); | 
|  | 4215 | if (ret < 0) | 
|  | 4216 | goto out; | 
|  | 4217 | } else { | 
|  | 4218 | sctx->cur_inode_gen = left_gen; | 
|  | 4219 | sctx->cur_inode_new = 0; | 
|  | 4220 | sctx->cur_inode_new_gen = 0; | 
|  | 4221 | sctx->cur_inode_deleted = 0; | 
|  | 4222 | sctx->cur_inode_size = btrfs_inode_size( | 
|  | 4223 | sctx->left_path->nodes[0], left_ii); | 
|  | 4224 | sctx->cur_inode_mode = btrfs_inode_mode( | 
|  | 4225 | sctx->left_path->nodes[0], left_ii); | 
|  | 4226 | } | 
|  | 4227 | } | 
|  | 4228 |  | 
|  | 4229 | out: | 
|  | 4230 | return ret; | 
|  | 4231 | } | 
|  | 4232 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4233 | /* | 
|  | 4234 | * We have to process new refs before deleted refs, but compare_trees gives us | 
|  | 4235 | * the new and deleted refs mixed. To fix this, we record the new/deleted refs | 
|  | 4236 | * first and later process them in process_recorded_refs. | 
|  | 4237 | * For the cur_inode_new_gen case, we skip recording completely because | 
|  | 4238 | * changed_inode did already initiate processing of refs. The reason for this is | 
|  | 4239 | * that in this case, compare_tree actually compares the refs of 2 different | 
|  | 4240 | * inodes. To fix this, process_all_refs is used in changed_inode to handle all | 
|  | 4241 | * refs of the right tree as deleted and all refs of the left tree as new. | 
|  | 4242 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4243 | static int changed_ref(struct send_ctx *sctx, | 
|  | 4244 | enum btrfs_compare_tree_result result) | 
|  | 4245 | { | 
|  | 4246 | int ret = 0; | 
|  | 4247 |  | 
|  | 4248 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | 
|  | 4249 |  | 
|  | 4250 | if (!sctx->cur_inode_new_gen && | 
|  | 4251 | sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { | 
|  | 4252 | if (result == BTRFS_COMPARE_TREE_NEW) | 
|  | 4253 | ret = record_new_ref(sctx); | 
|  | 4254 | else if (result == BTRFS_COMPARE_TREE_DELETED) | 
|  | 4255 | ret = record_deleted_ref(sctx); | 
|  | 4256 | else if (result == BTRFS_COMPARE_TREE_CHANGED) | 
|  | 4257 | ret = record_changed_ref(sctx); | 
|  | 4258 | } | 
|  | 4259 |  | 
|  | 4260 | return ret; | 
|  | 4261 | } | 
|  | 4262 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4263 | /* | 
|  | 4264 | * Process new/deleted/changed xattrs. We skip processing in the | 
|  | 4265 | * cur_inode_new_gen case because changed_inode did already initiate processing | 
|  | 4266 | * of xattrs. The reason is the same as in changed_ref | 
|  | 4267 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4268 | static int changed_xattr(struct send_ctx *sctx, | 
|  | 4269 | enum btrfs_compare_tree_result result) | 
|  | 4270 | { | 
|  | 4271 | int ret = 0; | 
|  | 4272 |  | 
|  | 4273 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | 
|  | 4274 |  | 
|  | 4275 | if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { | 
|  | 4276 | if (result == BTRFS_COMPARE_TREE_NEW) | 
|  | 4277 | ret = process_new_xattr(sctx); | 
|  | 4278 | else if (result == BTRFS_COMPARE_TREE_DELETED) | 
|  | 4279 | ret = process_deleted_xattr(sctx); | 
|  | 4280 | else if (result == BTRFS_COMPARE_TREE_CHANGED) | 
|  | 4281 | ret = process_changed_xattr(sctx); | 
|  | 4282 | } | 
|  | 4283 |  | 
|  | 4284 | return ret; | 
|  | 4285 | } | 
|  | 4286 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4287 | /* | 
|  | 4288 | * Process new/deleted/changed extents. We skip processing in the | 
|  | 4289 | * cur_inode_new_gen case because changed_inode did already initiate processing | 
|  | 4290 | * of extents. The reason is the same as in changed_ref | 
|  | 4291 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4292 | static int changed_extent(struct send_ctx *sctx, | 
|  | 4293 | enum btrfs_compare_tree_result result) | 
|  | 4294 | { | 
|  | 4295 | int ret = 0; | 
|  | 4296 |  | 
|  | 4297 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | 
|  | 4298 |  | 
|  | 4299 | if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { | 
|  | 4300 | if (result != BTRFS_COMPARE_TREE_DELETED) | 
|  | 4301 | ret = process_extent(sctx, sctx->left_path, | 
|  | 4302 | sctx->cmp_key); | 
|  | 4303 | } | 
|  | 4304 |  | 
|  | 4305 | return ret; | 
|  | 4306 | } | 
|  | 4307 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4308 | /* | 
|  | 4309 | * Updates compare related fields in sctx and simply forwards to the actual | 
|  | 4310 | * changed_xxx functions. | 
|  | 4311 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4312 | static int changed_cb(struct btrfs_root *left_root, | 
|  | 4313 | struct btrfs_root *right_root, | 
|  | 4314 | struct btrfs_path *left_path, | 
|  | 4315 | struct btrfs_path *right_path, | 
|  | 4316 | struct btrfs_key *key, | 
|  | 4317 | enum btrfs_compare_tree_result result, | 
|  | 4318 | void *ctx) | 
|  | 4319 | { | 
|  | 4320 | int ret = 0; | 
|  | 4321 | struct send_ctx *sctx = ctx; | 
|  | 4322 |  | 
|  | 4323 | sctx->left_path = left_path; | 
|  | 4324 | sctx->right_path = right_path; | 
|  | 4325 | sctx->cmp_key = key; | 
|  | 4326 |  | 
|  | 4327 | ret = finish_inode_if_needed(sctx, 0); | 
|  | 4328 | if (ret < 0) | 
|  | 4329 | goto out; | 
|  | 4330 |  | 
| Alexander Block | 2981e22 | 2012-08-01 14:47:03 +0200 | [diff] [blame] | 4331 | /* Ignore non-FS objects */ | 
|  | 4332 | if (key->objectid == BTRFS_FREE_INO_OBJECTID || | 
|  | 4333 | key->objectid == BTRFS_FREE_SPACE_OBJECTID) | 
|  | 4334 | goto out; | 
|  | 4335 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4336 | if (key->type == BTRFS_INODE_ITEM_KEY) | 
|  | 4337 | ret = changed_inode(sctx, result); | 
|  | 4338 | else if (key->type == BTRFS_INODE_REF_KEY) | 
|  | 4339 | ret = changed_ref(sctx, result); | 
|  | 4340 | else if (key->type == BTRFS_XATTR_ITEM_KEY) | 
|  | 4341 | ret = changed_xattr(sctx, result); | 
|  | 4342 | else if (key->type == BTRFS_EXTENT_DATA_KEY) | 
|  | 4343 | ret = changed_extent(sctx, result); | 
|  | 4344 |  | 
|  | 4345 | out: | 
|  | 4346 | return ret; | 
|  | 4347 | } | 
|  | 4348 |  | 
|  | 4349 | static int full_send_tree(struct send_ctx *sctx) | 
|  | 4350 | { | 
|  | 4351 | int ret; | 
|  | 4352 | struct btrfs_trans_handle *trans = NULL; | 
|  | 4353 | struct btrfs_root *send_root = sctx->send_root; | 
|  | 4354 | struct btrfs_key key; | 
|  | 4355 | struct btrfs_key found_key; | 
|  | 4356 | struct btrfs_path *path; | 
|  | 4357 | struct extent_buffer *eb; | 
|  | 4358 | int slot; | 
|  | 4359 | u64 start_ctransid; | 
|  | 4360 | u64 ctransid; | 
|  | 4361 |  | 
|  | 4362 | path = alloc_path_for_send(); | 
|  | 4363 | if (!path) | 
|  | 4364 | return -ENOMEM; | 
|  | 4365 |  | 
|  | 4366 | spin_lock(&send_root->root_times_lock); | 
|  | 4367 | start_ctransid = btrfs_root_ctransid(&send_root->root_item); | 
|  | 4368 | spin_unlock(&send_root->root_times_lock); | 
|  | 4369 |  | 
|  | 4370 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; | 
|  | 4371 | key.type = BTRFS_INODE_ITEM_KEY; | 
|  | 4372 | key.offset = 0; | 
|  | 4373 |  | 
|  | 4374 | join_trans: | 
|  | 4375 | /* | 
|  | 4376 | * We need to make sure the transaction does not get committed | 
|  | 4377 | * while we do anything on commit roots. Join a transaction to prevent | 
|  | 4378 | * this. | 
|  | 4379 | */ | 
|  | 4380 | trans = btrfs_join_transaction(send_root); | 
|  | 4381 | if (IS_ERR(trans)) { | 
|  | 4382 | ret = PTR_ERR(trans); | 
|  | 4383 | trans = NULL; | 
|  | 4384 | goto out; | 
|  | 4385 | } | 
|  | 4386 |  | 
|  | 4387 | /* | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4388 | * Make sure the tree has not changed after re-joining. We detect this | 
|  | 4389 | * by comparing start_ctransid and ctransid. They should always match. | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4390 | */ | 
|  | 4391 | spin_lock(&send_root->root_times_lock); | 
|  | 4392 | ctransid = btrfs_root_ctransid(&send_root->root_item); | 
|  | 4393 | spin_unlock(&send_root->root_times_lock); | 
|  | 4394 |  | 
|  | 4395 | if (ctransid != start_ctransid) { | 
|  | 4396 | WARN(1, KERN_WARNING "btrfs: the root that you're trying to " | 
|  | 4397 | "send was modified in between. This is " | 
|  | 4398 | "probably a bug.\n"); | 
|  | 4399 | ret = -EIO; | 
|  | 4400 | goto out; | 
|  | 4401 | } | 
|  | 4402 |  | 
|  | 4403 | ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); | 
|  | 4404 | if (ret < 0) | 
|  | 4405 | goto out; | 
|  | 4406 | if (ret) | 
|  | 4407 | goto out_finish; | 
|  | 4408 |  | 
|  | 4409 | while (1) { | 
|  | 4410 | /* | 
|  | 4411 | * When someone want to commit while we iterate, end the | 
|  | 4412 | * joined transaction and rejoin. | 
|  | 4413 | */ | 
|  | 4414 | if (btrfs_should_end_transaction(trans, send_root)) { | 
|  | 4415 | ret = btrfs_end_transaction(trans, send_root); | 
|  | 4416 | trans = NULL; | 
|  | 4417 | if (ret < 0) | 
|  | 4418 | goto out; | 
|  | 4419 | btrfs_release_path(path); | 
|  | 4420 | goto join_trans; | 
|  | 4421 | } | 
|  | 4422 |  | 
|  | 4423 | eb = path->nodes[0]; | 
|  | 4424 | slot = path->slots[0]; | 
|  | 4425 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 4426 |  | 
|  | 4427 | ret = changed_cb(send_root, NULL, path, NULL, | 
|  | 4428 | &found_key, BTRFS_COMPARE_TREE_NEW, sctx); | 
|  | 4429 | if (ret < 0) | 
|  | 4430 | goto out; | 
|  | 4431 |  | 
|  | 4432 | key.objectid = found_key.objectid; | 
|  | 4433 | key.type = found_key.type; | 
|  | 4434 | key.offset = found_key.offset + 1; | 
|  | 4435 |  | 
|  | 4436 | ret = btrfs_next_item(send_root, path); | 
|  | 4437 | if (ret < 0) | 
|  | 4438 | goto out; | 
|  | 4439 | if (ret) { | 
|  | 4440 | ret  = 0; | 
|  | 4441 | break; | 
|  | 4442 | } | 
|  | 4443 | } | 
|  | 4444 |  | 
|  | 4445 | out_finish: | 
|  | 4446 | ret = finish_inode_if_needed(sctx, 1); | 
|  | 4447 |  | 
|  | 4448 | out: | 
|  | 4449 | btrfs_free_path(path); | 
|  | 4450 | if (trans) { | 
|  | 4451 | if (!ret) | 
|  | 4452 | ret = btrfs_end_transaction(trans, send_root); | 
|  | 4453 | else | 
|  | 4454 | btrfs_end_transaction(trans, send_root); | 
|  | 4455 | } | 
|  | 4456 | return ret; | 
|  | 4457 | } | 
|  | 4458 |  | 
|  | 4459 | static int send_subvol(struct send_ctx *sctx) | 
|  | 4460 | { | 
|  | 4461 | int ret; | 
|  | 4462 |  | 
|  | 4463 | ret = send_header(sctx); | 
|  | 4464 | if (ret < 0) | 
|  | 4465 | goto out; | 
|  | 4466 |  | 
|  | 4467 | ret = send_subvol_begin(sctx); | 
|  | 4468 | if (ret < 0) | 
|  | 4469 | goto out; | 
|  | 4470 |  | 
|  | 4471 | if (sctx->parent_root) { | 
|  | 4472 | ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, | 
|  | 4473 | changed_cb, sctx); | 
|  | 4474 | if (ret < 0) | 
|  | 4475 | goto out; | 
|  | 4476 | ret = finish_inode_if_needed(sctx, 1); | 
|  | 4477 | if (ret < 0) | 
|  | 4478 | goto out; | 
|  | 4479 | } else { | 
|  | 4480 | ret = full_send_tree(sctx); | 
|  | 4481 | if (ret < 0) | 
|  | 4482 | goto out; | 
|  | 4483 | } | 
|  | 4484 |  | 
|  | 4485 | out: | 
|  | 4486 | if (!ret) | 
|  | 4487 | ret = close_cur_inode_file(sctx); | 
|  | 4488 | else | 
|  | 4489 | close_cur_inode_file(sctx); | 
|  | 4490 |  | 
|  | 4491 | free_recorded_refs(sctx); | 
|  | 4492 | return ret; | 
|  | 4493 | } | 
|  | 4494 |  | 
|  | 4495 | long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) | 
|  | 4496 | { | 
|  | 4497 | int ret = 0; | 
|  | 4498 | struct btrfs_root *send_root; | 
|  | 4499 | struct btrfs_root *clone_root; | 
|  | 4500 | struct btrfs_fs_info *fs_info; | 
|  | 4501 | struct btrfs_ioctl_send_args *arg = NULL; | 
|  | 4502 | struct btrfs_key key; | 
|  | 4503 | struct file *filp = NULL; | 
|  | 4504 | struct send_ctx *sctx = NULL; | 
|  | 4505 | u32 i; | 
|  | 4506 | u64 *clone_sources_tmp = NULL; | 
|  | 4507 |  | 
|  | 4508 | if (!capable(CAP_SYS_ADMIN)) | 
|  | 4509 | return -EPERM; | 
|  | 4510 |  | 
|  | 4511 | send_root = BTRFS_I(fdentry(mnt_file)->d_inode)->root; | 
|  | 4512 | fs_info = send_root->fs_info; | 
|  | 4513 |  | 
|  | 4514 | arg = memdup_user(arg_, sizeof(*arg)); | 
|  | 4515 | if (IS_ERR(arg)) { | 
|  | 4516 | ret = PTR_ERR(arg); | 
|  | 4517 | arg = NULL; | 
|  | 4518 | goto out; | 
|  | 4519 | } | 
|  | 4520 |  | 
|  | 4521 | if (!access_ok(VERIFY_READ, arg->clone_sources, | 
|  | 4522 | sizeof(*arg->clone_sources * | 
|  | 4523 | arg->clone_sources_count))) { | 
|  | 4524 | ret = -EFAULT; | 
|  | 4525 | goto out; | 
|  | 4526 | } | 
|  | 4527 |  | 
|  | 4528 | sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS); | 
|  | 4529 | if (!sctx) { | 
|  | 4530 | ret = -ENOMEM; | 
|  | 4531 | goto out; | 
|  | 4532 | } | 
|  | 4533 |  | 
|  | 4534 | INIT_LIST_HEAD(&sctx->new_refs); | 
|  | 4535 | INIT_LIST_HEAD(&sctx->deleted_refs); | 
|  | 4536 | INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS); | 
|  | 4537 | INIT_LIST_HEAD(&sctx->name_cache_list); | 
|  | 4538 |  | 
|  | 4539 | sctx->send_filp = fget(arg->send_fd); | 
|  | 4540 | if (IS_ERR(sctx->send_filp)) { | 
|  | 4541 | ret = PTR_ERR(sctx->send_filp); | 
|  | 4542 | goto out; | 
|  | 4543 | } | 
|  | 4544 |  | 
|  | 4545 | sctx->mnt = mnt_file->f_path.mnt; | 
|  | 4546 |  | 
|  | 4547 | sctx->send_root = send_root; | 
|  | 4548 | sctx->clone_roots_cnt = arg->clone_sources_count; | 
|  | 4549 |  | 
|  | 4550 | sctx->send_max_size = BTRFS_SEND_BUF_SIZE; | 
|  | 4551 | sctx->send_buf = vmalloc(sctx->send_max_size); | 
|  | 4552 | if (!sctx->send_buf) { | 
|  | 4553 | ret = -ENOMEM; | 
|  | 4554 | goto out; | 
|  | 4555 | } | 
|  | 4556 |  | 
|  | 4557 | sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE); | 
|  | 4558 | if (!sctx->read_buf) { | 
|  | 4559 | ret = -ENOMEM; | 
|  | 4560 | goto out; | 
|  | 4561 | } | 
|  | 4562 |  | 
|  | 4563 | sctx->clone_roots = vzalloc(sizeof(struct clone_root) * | 
|  | 4564 | (arg->clone_sources_count + 1)); | 
|  | 4565 | if (!sctx->clone_roots) { | 
|  | 4566 | ret = -ENOMEM; | 
|  | 4567 | goto out; | 
|  | 4568 | } | 
|  | 4569 |  | 
|  | 4570 | if (arg->clone_sources_count) { | 
|  | 4571 | clone_sources_tmp = vmalloc(arg->clone_sources_count * | 
|  | 4572 | sizeof(*arg->clone_sources)); | 
|  | 4573 | if (!clone_sources_tmp) { | 
|  | 4574 | ret = -ENOMEM; | 
|  | 4575 | goto out; | 
|  | 4576 | } | 
|  | 4577 |  | 
|  | 4578 | ret = copy_from_user(clone_sources_tmp, arg->clone_sources, | 
|  | 4579 | arg->clone_sources_count * | 
|  | 4580 | sizeof(*arg->clone_sources)); | 
|  | 4581 | if (ret) { | 
|  | 4582 | ret = -EFAULT; | 
|  | 4583 | goto out; | 
|  | 4584 | } | 
|  | 4585 |  | 
|  | 4586 | for (i = 0; i < arg->clone_sources_count; i++) { | 
|  | 4587 | key.objectid = clone_sources_tmp[i]; | 
|  | 4588 | key.type = BTRFS_ROOT_ITEM_KEY; | 
|  | 4589 | key.offset = (u64)-1; | 
|  | 4590 | clone_root = btrfs_read_fs_root_no_name(fs_info, &key); | 
|  | 4591 | if (!clone_root) { | 
|  | 4592 | ret = -EINVAL; | 
|  | 4593 | goto out; | 
|  | 4594 | } | 
|  | 4595 | if (IS_ERR(clone_root)) { | 
|  | 4596 | ret = PTR_ERR(clone_root); | 
|  | 4597 | goto out; | 
|  | 4598 | } | 
|  | 4599 | sctx->clone_roots[i].root = clone_root; | 
|  | 4600 | } | 
|  | 4601 | vfree(clone_sources_tmp); | 
|  | 4602 | clone_sources_tmp = NULL; | 
|  | 4603 | } | 
|  | 4604 |  | 
|  | 4605 | if (arg->parent_root) { | 
|  | 4606 | key.objectid = arg->parent_root; | 
|  | 4607 | key.type = BTRFS_ROOT_ITEM_KEY; | 
|  | 4608 | key.offset = (u64)-1; | 
|  | 4609 | sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key); | 
|  | 4610 | if (!sctx->parent_root) { | 
|  | 4611 | ret = -EINVAL; | 
|  | 4612 | goto out; | 
|  | 4613 | } | 
|  | 4614 | } | 
|  | 4615 |  | 
|  | 4616 | /* | 
|  | 4617 | * Clones from send_root are allowed, but only if the clone source | 
|  | 4618 | * is behind the current send position. This is checked while searching | 
|  | 4619 | * for possible clone sources. | 
|  | 4620 | */ | 
|  | 4621 | sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root; | 
|  | 4622 |  | 
|  | 4623 | /* We do a bsearch later */ | 
|  | 4624 | sort(sctx->clone_roots, sctx->clone_roots_cnt, | 
|  | 4625 | sizeof(*sctx->clone_roots), __clone_root_cmp_sort, | 
|  | 4626 | NULL); | 
|  | 4627 |  | 
|  | 4628 | ret = send_subvol(sctx); | 
|  | 4629 | if (ret < 0) | 
|  | 4630 | goto out; | 
|  | 4631 |  | 
|  | 4632 | ret = begin_cmd(sctx, BTRFS_SEND_C_END); | 
|  | 4633 | if (ret < 0) | 
|  | 4634 | goto out; | 
|  | 4635 | ret = send_cmd(sctx); | 
|  | 4636 | if (ret < 0) | 
|  | 4637 | goto out; | 
|  | 4638 |  | 
|  | 4639 | out: | 
|  | 4640 | if (filp) | 
|  | 4641 | fput(filp); | 
|  | 4642 | kfree(arg); | 
|  | 4643 | vfree(clone_sources_tmp); | 
|  | 4644 |  | 
|  | 4645 | if (sctx) { | 
|  | 4646 | if (sctx->send_filp) | 
|  | 4647 | fput(sctx->send_filp); | 
|  | 4648 |  | 
|  | 4649 | vfree(sctx->clone_roots); | 
|  | 4650 | vfree(sctx->send_buf); | 
|  | 4651 | vfree(sctx->read_buf); | 
|  | 4652 |  | 
|  | 4653 | name_cache_free(sctx); | 
|  | 4654 |  | 
|  | 4655 | kfree(sctx); | 
|  | 4656 | } | 
|  | 4657 |  | 
|  | 4658 | return ret; | 
|  | 4659 | } |