2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
29 #include <linux/compat.h>
30 #include <linux/crc32c.h>
36 #include "btrfs_inode.h"
37 #include "transaction.h"
38 #include "compression.h"
41 * A fs_path is a helper to dynamically build path names with unknown size.
42 * It reallocates the internal buffer on demand.
43 * It allows fast adding of path elements on the right side (normal path) and
44 * fast adding to the left side (reversed path). A reversed path can also be
45 * unreversed if needed.
54 unsigned short buf_len:15;
55 unsigned short reversed:1;
59 * Average path length does not exceed 200 bytes, we'll have
60 * better packing in the slab and higher chance to satisfy
61 * a allocation later during send.
66 #define FS_PATH_INLINE_SIZE \
67 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
70 /* reused for each extent */
72 struct btrfs_root *root;
79 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
80 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
83 struct file *send_filp;
89 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
90 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
92 struct btrfs_root *send_root;
93 struct btrfs_root *parent_root;
94 struct clone_root *clone_roots;
97 /* current state of the compare_tree call */
98 struct btrfs_path *left_path;
99 struct btrfs_path *right_path;
100 struct btrfs_key *cmp_key;
103 * infos of the currently processed inode. In case of deleted inodes,
104 * these are the values from the deleted inode.
109 int cur_inode_new_gen;
110 int cur_inode_deleted;
114 u64 cur_inode_last_extent;
115 u64 cur_inode_next_write_offset;
119 struct list_head new_refs;
120 struct list_head deleted_refs;
122 struct radix_tree_root name_cache;
123 struct list_head name_cache_list;
126 struct file_ra_state ra;
131 * We process inodes by their increasing order, so if before an
132 * incremental send we reverse the parent/child relationship of
133 * directories such that a directory with a lower inode number was
134 * the parent of a directory with a higher inode number, and the one
135 * becoming the new parent got renamed too, we can't rename/move the
136 * directory with lower inode number when we finish processing it - we
137 * must process the directory with higher inode number first, then
138 * rename/move it and then rename/move the directory with lower inode
139 * number. Example follows.
141 * Tree state when the first send was performed:
153 * Tree state when the second (incremental) send is performed:
162 * The sequence of steps that lead to the second state was:
164 * mv /a/b/c/d /a/b/c2/d2
165 * mv /a/b/c /a/b/c2/d2/cc
167 * "c" has lower inode number, but we can't move it (2nd mv operation)
168 * before we move "d", which has higher inode number.
170 * So we just memorize which move/rename operations must be performed
171 * later when their respective parent is processed and moved/renamed.
174 /* Indexed by parent directory inode number. */
175 struct rb_root pending_dir_moves;
178 * Reverse index, indexed by the inode number of a directory that
179 * is waiting for the move/rename of its immediate parent before its
180 * own move/rename can be performed.
182 struct rb_root waiting_dir_moves;
185 * A directory that is going to be rm'ed might have a child directory
186 * which is in the pending directory moves index above. In this case,
187 * the directory can only be removed after the move/rename of its child
188 * is performed. Example:
208 * Sequence of steps that lead to the send snapshot:
209 * rm -f /a/b/c/foo.txt
211 * mv /a/b/c/x /a/b/YY
214 * When the child is processed, its move/rename is delayed until its
215 * parent is processed (as explained above), but all other operations
216 * like update utimes, chown, chgrp, etc, are performed and the paths
217 * that it uses for those operations must use the orphanized name of
218 * its parent (the directory we're going to rm later), so we need to
219 * memorize that name.
221 * Indexed by the inode number of the directory to be deleted.
223 struct rb_root orphan_dirs;
226 struct pending_dir_move {
228 struct list_head list;
232 struct list_head update_refs;
235 struct waiting_dir_move {
239 * There might be some directory that could not be removed because it
240 * was waiting for this directory inode to be moved first. Therefore
241 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
247 struct orphan_dir_info {
253 struct name_cache_entry {
254 struct list_head list;
256 * radix_tree has only 32bit entries but we need to handle 64bit inums.
257 * We use the lower 32bit of the 64bit inum to store it in the tree. If
258 * more then one inum would fall into the same entry, we use radix_list
259 * to store the additional entries. radix_list is also used to store
260 * entries where two entries have the same inum but different
263 struct list_head radix_list;
269 int need_later_update;
275 static void inconsistent_snapshot_error(struct send_ctx *sctx,
276 enum btrfs_compare_tree_result result,
279 const char *result_string;
282 case BTRFS_COMPARE_TREE_NEW:
283 result_string = "new";
285 case BTRFS_COMPARE_TREE_DELETED:
286 result_string = "deleted";
288 case BTRFS_COMPARE_TREE_CHANGED:
289 result_string = "updated";
291 case BTRFS_COMPARE_TREE_SAME:
293 result_string = "unchanged";
297 result_string = "unexpected";
300 btrfs_err(sctx->send_root->fs_info,
301 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
302 result_string, what, sctx->cmp_key->objectid,
303 sctx->send_root->root_key.objectid,
305 sctx->parent_root->root_key.objectid : 0));
308 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
310 static struct waiting_dir_move *
311 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
313 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
315 static int need_send_hole(struct send_ctx *sctx)
317 return (sctx->parent_root && !sctx->cur_inode_new &&
318 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
319 S_ISREG(sctx->cur_inode_mode));
322 static void fs_path_reset(struct fs_path *p)
325 p->start = p->buf + p->buf_len - 1;
335 static struct fs_path *fs_path_alloc(void)
339 p = kmalloc(sizeof(*p), GFP_KERNEL);
343 p->buf = p->inline_buf;
344 p->buf_len = FS_PATH_INLINE_SIZE;
349 static struct fs_path *fs_path_alloc_reversed(void)
361 static void fs_path_free(struct fs_path *p)
365 if (p->buf != p->inline_buf)
370 static int fs_path_len(struct fs_path *p)
372 return p->end - p->start;
375 static int fs_path_ensure_buf(struct fs_path *p, int len)
383 if (p->buf_len >= len)
386 if (len > PATH_MAX) {
391 path_len = p->end - p->start;
392 old_buf_len = p->buf_len;
395 * First time the inline_buf does not suffice
397 if (p->buf == p->inline_buf) {
398 tmp_buf = kmalloc(len, GFP_KERNEL);
400 memcpy(tmp_buf, p->buf, old_buf_len);
402 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
408 * The real size of the buffer is bigger, this will let the fast path
409 * happen most of the time
411 p->buf_len = ksize(p->buf);
414 tmp_buf = p->buf + old_buf_len - path_len - 1;
415 p->end = p->buf + p->buf_len - 1;
416 p->start = p->end - path_len;
417 memmove(p->start, tmp_buf, path_len + 1);
420 p->end = p->start + path_len;
425 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
431 new_len = p->end - p->start + name_len;
432 if (p->start != p->end)
434 ret = fs_path_ensure_buf(p, new_len);
439 if (p->start != p->end)
441 p->start -= name_len;
442 *prepared = p->start;
444 if (p->start != p->end)
455 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
460 ret = fs_path_prepare_for_add(p, name_len, &prepared);
463 memcpy(prepared, name, name_len);
469 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
474 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
477 memcpy(prepared, p2->start, p2->end - p2->start);
483 static int fs_path_add_from_extent_buffer(struct fs_path *p,
484 struct extent_buffer *eb,
485 unsigned long off, int len)
490 ret = fs_path_prepare_for_add(p, len, &prepared);
494 read_extent_buffer(eb, prepared, off, len);
500 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
504 p->reversed = from->reversed;
507 ret = fs_path_add_path(p, from);
513 static void fs_path_unreverse(struct fs_path *p)
522 len = p->end - p->start;
524 p->end = p->start + len;
525 memmove(p->start, tmp, len + 1);
529 static struct btrfs_path *alloc_path_for_send(void)
531 struct btrfs_path *path;
533 path = btrfs_alloc_path();
536 path->search_commit_root = 1;
537 path->skip_locking = 1;
538 path->need_commit_sem = 1;
542 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
548 ret = kernel_write(filp, buf + pos, len - pos, off);
549 /* TODO handle that correctly */
550 /*if (ret == -ERESTARTSYS) {
564 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
566 struct btrfs_tlv_header *hdr;
567 int total_len = sizeof(*hdr) + len;
568 int left = sctx->send_max_size - sctx->send_size;
570 if (unlikely(left < total_len))
573 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
574 hdr->tlv_type = cpu_to_le16(attr);
575 hdr->tlv_len = cpu_to_le16(len);
576 memcpy(hdr + 1, data, len);
577 sctx->send_size += total_len;
582 #define TLV_PUT_DEFINE_INT(bits) \
583 static int tlv_put_u##bits(struct send_ctx *sctx, \
584 u##bits attr, u##bits value) \
586 __le##bits __tmp = cpu_to_le##bits(value); \
587 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
590 TLV_PUT_DEFINE_INT(64)
592 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
593 const char *str, int len)
597 return tlv_put(sctx, attr, str, len);
600 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
603 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
606 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
607 struct extent_buffer *eb,
608 struct btrfs_timespec *ts)
610 struct btrfs_timespec bts;
611 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
612 return tlv_put(sctx, attr, &bts, sizeof(bts));
616 #define TLV_PUT(sctx, attrtype, attrlen, data) \
618 ret = tlv_put(sctx, attrtype, attrlen, data); \
620 goto tlv_put_failure; \
623 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
625 ret = tlv_put_u##bits(sctx, attrtype, value); \
627 goto tlv_put_failure; \
630 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
631 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
632 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
633 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
634 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
636 ret = tlv_put_string(sctx, attrtype, str, len); \
638 goto tlv_put_failure; \
640 #define TLV_PUT_PATH(sctx, attrtype, p) \
642 ret = tlv_put_string(sctx, attrtype, p->start, \
643 p->end - p->start); \
645 goto tlv_put_failure; \
647 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
649 ret = tlv_put_uuid(sctx, attrtype, uuid); \
651 goto tlv_put_failure; \
653 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
655 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
657 goto tlv_put_failure; \
660 static int send_header(struct send_ctx *sctx)
662 struct btrfs_stream_header hdr;
664 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
665 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
667 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
672 * For each command/item we want to send to userspace, we call this function.
674 static int begin_cmd(struct send_ctx *sctx, int cmd)
676 struct btrfs_cmd_header *hdr;
678 if (WARN_ON(!sctx->send_buf))
681 BUG_ON(sctx->send_size);
683 sctx->send_size += sizeof(*hdr);
684 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
685 hdr->cmd = cpu_to_le16(cmd);
690 static int send_cmd(struct send_ctx *sctx)
693 struct btrfs_cmd_header *hdr;
696 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
697 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
700 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
701 hdr->crc = cpu_to_le32(crc);
703 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
706 sctx->total_send_size += sctx->send_size;
707 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
714 * Sends a move instruction to user space
716 static int send_rename(struct send_ctx *sctx,
717 struct fs_path *from, struct fs_path *to)
719 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
722 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
724 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
728 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
729 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
731 ret = send_cmd(sctx);
739 * Sends a link instruction to user space
741 static int send_link(struct send_ctx *sctx,
742 struct fs_path *path, struct fs_path *lnk)
744 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
747 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
749 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
753 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
754 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
756 ret = send_cmd(sctx);
764 * Sends an unlink instruction to user space
766 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
768 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
771 btrfs_debug(fs_info, "send_unlink %s", path->start);
773 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
777 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
779 ret = send_cmd(sctx);
787 * Sends a rmdir instruction to user space
789 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
791 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
794 btrfs_debug(fs_info, "send_rmdir %s", path->start);
796 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
800 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
802 ret = send_cmd(sctx);
810 * Helper function to retrieve some fields from an inode item.
812 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
813 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
817 struct btrfs_inode_item *ii;
818 struct btrfs_key key;
821 key.type = BTRFS_INODE_ITEM_KEY;
823 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
830 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
831 struct btrfs_inode_item);
833 *size = btrfs_inode_size(path->nodes[0], ii);
835 *gen = btrfs_inode_generation(path->nodes[0], ii);
837 *mode = btrfs_inode_mode(path->nodes[0], ii);
839 *uid = btrfs_inode_uid(path->nodes[0], ii);
841 *gid = btrfs_inode_gid(path->nodes[0], ii);
843 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
848 static int get_inode_info(struct btrfs_root *root,
849 u64 ino, u64 *size, u64 *gen,
850 u64 *mode, u64 *uid, u64 *gid,
853 struct btrfs_path *path;
856 path = alloc_path_for_send();
859 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
861 btrfs_free_path(path);
865 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
870 * Helper function to iterate the entries in ONE btrfs_inode_ref or
871 * btrfs_inode_extref.
872 * The iterate callback may return a non zero value to stop iteration. This can
873 * be a negative value for error codes or 1 to simply stop it.
875 * path must point to the INODE_REF or INODE_EXTREF when called.
877 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
878 struct btrfs_key *found_key, int resolve,
879 iterate_inode_ref_t iterate, void *ctx)
881 struct extent_buffer *eb = path->nodes[0];
882 struct btrfs_item *item;
883 struct btrfs_inode_ref *iref;
884 struct btrfs_inode_extref *extref;
885 struct btrfs_path *tmp_path;
889 int slot = path->slots[0];
896 unsigned long name_off;
897 unsigned long elem_size;
900 p = fs_path_alloc_reversed();
904 tmp_path = alloc_path_for_send();
911 if (found_key->type == BTRFS_INODE_REF_KEY) {
912 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
913 struct btrfs_inode_ref);
914 item = btrfs_item_nr(slot);
915 total = btrfs_item_size(eb, item);
916 elem_size = sizeof(*iref);
918 ptr = btrfs_item_ptr_offset(eb, slot);
919 total = btrfs_item_size_nr(eb, slot);
920 elem_size = sizeof(*extref);
923 while (cur < total) {
926 if (found_key->type == BTRFS_INODE_REF_KEY) {
927 iref = (struct btrfs_inode_ref *)(ptr + cur);
928 name_len = btrfs_inode_ref_name_len(eb, iref);
929 name_off = (unsigned long)(iref + 1);
930 index = btrfs_inode_ref_index(eb, iref);
931 dir = found_key->offset;
933 extref = (struct btrfs_inode_extref *)(ptr + cur);
934 name_len = btrfs_inode_extref_name_len(eb, extref);
935 name_off = (unsigned long)&extref->name;
936 index = btrfs_inode_extref_index(eb, extref);
937 dir = btrfs_inode_extref_parent(eb, extref);
941 start = btrfs_ref_to_path(root, tmp_path, name_len,
945 ret = PTR_ERR(start);
948 if (start < p->buf) {
949 /* overflow , try again with larger buffer */
950 ret = fs_path_ensure_buf(p,
951 p->buf_len + p->buf - start);
954 start = btrfs_ref_to_path(root, tmp_path,
959 ret = PTR_ERR(start);
962 BUG_ON(start < p->buf);
966 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
972 cur += elem_size + name_len;
973 ret = iterate(num, dir, index, p, ctx);
980 btrfs_free_path(tmp_path);
985 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
986 const char *name, int name_len,
987 const char *data, int data_len,
991 * Helper function to iterate the entries in ONE btrfs_dir_item.
992 * The iterate callback may return a non zero value to stop iteration. This can
993 * be a negative value for error codes or 1 to simply stop it.
995 * path must point to the dir item when called.
997 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
998 iterate_dir_item_t iterate, void *ctx)
1001 struct extent_buffer *eb;
1002 struct btrfs_item *item;
1003 struct btrfs_dir_item *di;
1004 struct btrfs_key di_key;
1017 * Start with a small buffer (1 page). If later we end up needing more
1018 * space, which can happen for xattrs on a fs with a leaf size greater
1019 * then the page size, attempt to increase the buffer. Typically xattr
1023 buf = kmalloc(buf_len, GFP_KERNEL);
1029 eb = path->nodes[0];
1030 slot = path->slots[0];
1031 item = btrfs_item_nr(slot);
1032 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1035 total = btrfs_item_size(eb, item);
1038 while (cur < total) {
1039 name_len = btrfs_dir_name_len(eb, di);
1040 data_len = btrfs_dir_data_len(eb, di);
1041 type = btrfs_dir_type(eb, di);
1042 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1044 if (type == BTRFS_FT_XATTR) {
1045 if (name_len > XATTR_NAME_MAX) {
1046 ret = -ENAMETOOLONG;
1049 if (name_len + data_len >
1050 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1058 if (name_len + data_len > PATH_MAX) {
1059 ret = -ENAMETOOLONG;
1064 if (name_len + data_len > buf_len) {
1065 buf_len = name_len + data_len;
1066 if (is_vmalloc_addr(buf)) {
1070 char *tmp = krealloc(buf, buf_len,
1071 GFP_KERNEL | __GFP_NOWARN);
1078 buf = kvmalloc(buf_len, GFP_KERNEL);
1086 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1087 name_len + data_len);
1089 len = sizeof(*di) + name_len + data_len;
1090 di = (struct btrfs_dir_item *)((char *)di + len);
1093 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1094 data_len, type, ctx);
1110 static int __copy_first_ref(int num, u64 dir, int index,
1111 struct fs_path *p, void *ctx)
1114 struct fs_path *pt = ctx;
1116 ret = fs_path_copy(pt, p);
1120 /* we want the first only */
1125 * Retrieve the first path of an inode. If an inode has more then one
1126 * ref/hardlink, this is ignored.
1128 static int get_inode_path(struct btrfs_root *root,
1129 u64 ino, struct fs_path *path)
1132 struct btrfs_key key, found_key;
1133 struct btrfs_path *p;
1135 p = alloc_path_for_send();
1139 fs_path_reset(path);
1142 key.type = BTRFS_INODE_REF_KEY;
1145 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1152 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1153 if (found_key.objectid != ino ||
1154 (found_key.type != BTRFS_INODE_REF_KEY &&
1155 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1160 ret = iterate_inode_ref(root, p, &found_key, 1,
1161 __copy_first_ref, path);
1171 struct backref_ctx {
1172 struct send_ctx *sctx;
1174 struct btrfs_path *path;
1175 /* number of total found references */
1179 * used for clones found in send_root. clones found behind cur_objectid
1180 * and cur_offset are not considered as allowed clones.
1185 /* may be truncated in case it's the last extent in a file */
1188 /* data offset in the file extent item */
1191 /* Just to check for bugs in backref resolving */
1195 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1197 u64 root = (u64)(uintptr_t)key;
1198 struct clone_root *cr = (struct clone_root *)elt;
1200 if (root < cr->root->objectid)
1202 if (root > cr->root->objectid)
1207 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1209 struct clone_root *cr1 = (struct clone_root *)e1;
1210 struct clone_root *cr2 = (struct clone_root *)e2;
1212 if (cr1->root->objectid < cr2->root->objectid)
1214 if (cr1->root->objectid > cr2->root->objectid)
1220 * Called for every backref that is found for the current extent.
1221 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1223 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1225 struct backref_ctx *bctx = ctx_;
1226 struct clone_root *found;
1230 /* First check if the root is in the list of accepted clone sources */
1231 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1232 bctx->sctx->clone_roots_cnt,
1233 sizeof(struct clone_root),
1234 __clone_root_cmp_bsearch);
1238 if (found->root == bctx->sctx->send_root &&
1239 ino == bctx->cur_objectid &&
1240 offset == bctx->cur_offset) {
1241 bctx->found_itself = 1;
1245 * There are inodes that have extents that lie behind its i_size. Don't
1246 * accept clones from these extents.
1248 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
1250 btrfs_release_path(bctx->path);
1254 if (offset + bctx->data_offset + bctx->extent_len > i_size)
1258 * Make sure we don't consider clones from send_root that are
1259 * behind the current inode/offset.
1261 if (found->root == bctx->sctx->send_root) {
1263 * TODO for the moment we don't accept clones from the inode
1264 * that is currently send. We may change this when
1265 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1268 if (ino >= bctx->cur_objectid)
1273 found->found_refs++;
1274 if (ino < found->ino) {
1276 found->offset = offset;
1277 } else if (found->ino == ino) {
1279 * same extent found more then once in the same file.
1281 if (found->offset > offset + bctx->extent_len)
1282 found->offset = offset;
1289 * Given an inode, offset and extent item, it finds a good clone for a clone
1290 * instruction. Returns -ENOENT when none could be found. The function makes
1291 * sure that the returned clone is usable at the point where sending is at the
1292 * moment. This means, that no clones are accepted which lie behind the current
1295 * path must point to the extent item when called.
1297 static int find_extent_clone(struct send_ctx *sctx,
1298 struct btrfs_path *path,
1299 u64 ino, u64 data_offset,
1301 struct clone_root **found)
1303 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1309 u64 extent_item_pos;
1311 struct btrfs_file_extent_item *fi;
1312 struct extent_buffer *eb = path->nodes[0];
1313 struct backref_ctx *backref_ctx = NULL;
1314 struct clone_root *cur_clone_root;
1315 struct btrfs_key found_key;
1316 struct btrfs_path *tmp_path;
1320 tmp_path = alloc_path_for_send();
1324 /* We only use this path under the commit sem */
1325 tmp_path->need_commit_sem = 0;
1327 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1333 backref_ctx->path = tmp_path;
1335 if (data_offset >= ino_size) {
1337 * There may be extents that lie behind the file's size.
1338 * I at least had this in combination with snapshotting while
1339 * writing large files.
1345 fi = btrfs_item_ptr(eb, path->slots[0],
1346 struct btrfs_file_extent_item);
1347 extent_type = btrfs_file_extent_type(eb, fi);
1348 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1352 compressed = btrfs_file_extent_compression(eb, fi);
1354 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1355 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1356 if (disk_byte == 0) {
1360 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1362 down_read(&fs_info->commit_root_sem);
1363 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1364 &found_key, &flags);
1365 up_read(&fs_info->commit_root_sem);
1366 btrfs_release_path(tmp_path);
1370 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1376 * Setup the clone roots.
1378 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1379 cur_clone_root = sctx->clone_roots + i;
1380 cur_clone_root->ino = (u64)-1;
1381 cur_clone_root->offset = 0;
1382 cur_clone_root->found_refs = 0;
1385 backref_ctx->sctx = sctx;
1386 backref_ctx->found = 0;
1387 backref_ctx->cur_objectid = ino;
1388 backref_ctx->cur_offset = data_offset;
1389 backref_ctx->found_itself = 0;
1390 backref_ctx->extent_len = num_bytes;
1392 * For non-compressed extents iterate_extent_inodes() gives us extent
1393 * offsets that already take into account the data offset, but not for
1394 * compressed extents, since the offset is logical and not relative to
1395 * the physical extent locations. We must take this into account to
1396 * avoid sending clone offsets that go beyond the source file's size,
1397 * which would result in the clone ioctl failing with -EINVAL on the
1400 if (compressed == BTRFS_COMPRESS_NONE)
1401 backref_ctx->data_offset = 0;
1403 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
1406 * The last extent of a file may be too large due to page alignment.
1407 * We need to adjust extent_len in this case so that the checks in
1408 * __iterate_backrefs work.
1410 if (data_offset + num_bytes >= ino_size)
1411 backref_ctx->extent_len = ino_size - data_offset;
1414 * Now collect all backrefs.
1416 if (compressed == BTRFS_COMPRESS_NONE)
1417 extent_item_pos = logical - found_key.objectid;
1419 extent_item_pos = 0;
1420 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1421 extent_item_pos, 1, __iterate_backrefs,
1422 backref_ctx, false);
1427 if (!backref_ctx->found_itself) {
1428 /* found a bug in backref code? */
1431 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1432 ino, data_offset, disk_byte, found_key.objectid);
1436 btrfs_debug(fs_info,
1437 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1438 data_offset, ino, num_bytes, logical);
1440 if (!backref_ctx->found)
1441 btrfs_debug(fs_info, "no clones found");
1443 cur_clone_root = NULL;
1444 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1445 if (sctx->clone_roots[i].found_refs) {
1446 if (!cur_clone_root)
1447 cur_clone_root = sctx->clone_roots + i;
1448 else if (sctx->clone_roots[i].root == sctx->send_root)
1449 /* prefer clones from send_root over others */
1450 cur_clone_root = sctx->clone_roots + i;
1455 if (cur_clone_root) {
1456 *found = cur_clone_root;
1463 btrfs_free_path(tmp_path);
1468 static int read_symlink(struct btrfs_root *root,
1470 struct fs_path *dest)
1473 struct btrfs_path *path;
1474 struct btrfs_key key;
1475 struct btrfs_file_extent_item *ei;
1481 path = alloc_path_for_send();
1486 key.type = BTRFS_EXTENT_DATA_KEY;
1488 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1493 * An empty symlink inode. Can happen in rare error paths when
1494 * creating a symlink (transaction committed before the inode
1495 * eviction handler removed the symlink inode items and a crash
1496 * happened in between or the subvol was snapshoted in between).
1497 * Print an informative message to dmesg/syslog so that the user
1498 * can delete the symlink.
1500 btrfs_err(root->fs_info,
1501 "Found empty symlink inode %llu at root %llu",
1502 ino, root->root_key.objectid);
1507 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1508 struct btrfs_file_extent_item);
1509 type = btrfs_file_extent_type(path->nodes[0], ei);
1510 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1511 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1512 BUG_ON(compression);
1514 off = btrfs_file_extent_inline_start(ei);
1515 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
1517 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1520 btrfs_free_path(path);
1525 * Helper function to generate a file name that is unique in the root of
1526 * send_root and parent_root. This is used to generate names for orphan inodes.
1528 static int gen_unique_name(struct send_ctx *sctx,
1530 struct fs_path *dest)
1533 struct btrfs_path *path;
1534 struct btrfs_dir_item *di;
1539 path = alloc_path_for_send();
1544 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1546 ASSERT(len < sizeof(tmp));
1548 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1549 path, BTRFS_FIRST_FREE_OBJECTID,
1550 tmp, strlen(tmp), 0);
1551 btrfs_release_path(path);
1557 /* not unique, try again */
1562 if (!sctx->parent_root) {
1568 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1569 path, BTRFS_FIRST_FREE_OBJECTID,
1570 tmp, strlen(tmp), 0);
1571 btrfs_release_path(path);
1577 /* not unique, try again */
1585 ret = fs_path_add(dest, tmp, strlen(tmp));
1588 btrfs_free_path(path);
1593 inode_state_no_change,
1594 inode_state_will_create,
1595 inode_state_did_create,
1596 inode_state_will_delete,
1597 inode_state_did_delete,
1600 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1608 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1610 if (ret < 0 && ret != -ENOENT)
1614 if (!sctx->parent_root) {
1615 right_ret = -ENOENT;
1617 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1618 NULL, NULL, NULL, NULL);
1619 if (ret < 0 && ret != -ENOENT)
1624 if (!left_ret && !right_ret) {
1625 if (left_gen == gen && right_gen == gen) {
1626 ret = inode_state_no_change;
1627 } else if (left_gen == gen) {
1628 if (ino < sctx->send_progress)
1629 ret = inode_state_did_create;
1631 ret = inode_state_will_create;
1632 } else if (right_gen == gen) {
1633 if (ino < sctx->send_progress)
1634 ret = inode_state_did_delete;
1636 ret = inode_state_will_delete;
1640 } else if (!left_ret) {
1641 if (left_gen == gen) {
1642 if (ino < sctx->send_progress)
1643 ret = inode_state_did_create;
1645 ret = inode_state_will_create;
1649 } else if (!right_ret) {
1650 if (right_gen == gen) {
1651 if (ino < sctx->send_progress)
1652 ret = inode_state_did_delete;
1654 ret = inode_state_will_delete;
1666 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1670 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1673 ret = get_cur_inode_state(sctx, ino, gen);
1677 if (ret == inode_state_no_change ||
1678 ret == inode_state_did_create ||
1679 ret == inode_state_will_delete)
1689 * Helper function to lookup a dir item in a dir.
1691 static int lookup_dir_item_inode(struct btrfs_root *root,
1692 u64 dir, const char *name, int name_len,
1697 struct btrfs_dir_item *di;
1698 struct btrfs_key key;
1699 struct btrfs_path *path;
1701 path = alloc_path_for_send();
1705 di = btrfs_lookup_dir_item(NULL, root, path,
1706 dir, name, name_len, 0);
1715 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1716 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1720 *found_inode = key.objectid;
1721 *found_type = btrfs_dir_type(path->nodes[0], di);
1724 btrfs_free_path(path);
1729 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1730 * generation of the parent dir and the name of the dir entry.
1732 static int get_first_ref(struct btrfs_root *root, u64 ino,
1733 u64 *dir, u64 *dir_gen, struct fs_path *name)
1736 struct btrfs_key key;
1737 struct btrfs_key found_key;
1738 struct btrfs_path *path;
1742 path = alloc_path_for_send();
1747 key.type = BTRFS_INODE_REF_KEY;
1750 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1754 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1756 if (ret || found_key.objectid != ino ||
1757 (found_key.type != BTRFS_INODE_REF_KEY &&
1758 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1763 if (found_key.type == BTRFS_INODE_REF_KEY) {
1764 struct btrfs_inode_ref *iref;
1765 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1766 struct btrfs_inode_ref);
1767 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1768 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1769 (unsigned long)(iref + 1),
1771 parent_dir = found_key.offset;
1773 struct btrfs_inode_extref *extref;
1774 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1775 struct btrfs_inode_extref);
1776 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1777 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1778 (unsigned long)&extref->name, len);
1779 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1783 btrfs_release_path(path);
1786 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1795 btrfs_free_path(path);
1799 static int is_first_ref(struct btrfs_root *root,
1801 const char *name, int name_len)
1804 struct fs_path *tmp_name;
1807 tmp_name = fs_path_alloc();
1811 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1815 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1820 ret = !memcmp(tmp_name->start, name, name_len);
1823 fs_path_free(tmp_name);
1828 * Used by process_recorded_refs to determine if a new ref would overwrite an
1829 * already existing ref. In case it detects an overwrite, it returns the
1830 * inode/gen in who_ino/who_gen.
1831 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1832 * to make sure later references to the overwritten inode are possible.
1833 * Orphanizing is however only required for the first ref of an inode.
1834 * process_recorded_refs does an additional is_first_ref check to see if
1835 * orphanizing is really required.
1837 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1838 const char *name, int name_len,
1839 u64 *who_ino, u64 *who_gen, u64 *who_mode)
1843 u64 other_inode = 0;
1846 if (!sctx->parent_root)
1849 ret = is_inode_existent(sctx, dir, dir_gen);
1854 * If we have a parent root we need to verify that the parent dir was
1855 * not deleted and then re-created, if it was then we have no overwrite
1856 * and we can just unlink this entry.
1858 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1859 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1861 if (ret < 0 && ret != -ENOENT)
1871 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1872 &other_inode, &other_type);
1873 if (ret < 0 && ret != -ENOENT)
1881 * Check if the overwritten ref was already processed. If yes, the ref
1882 * was already unlinked/moved, so we can safely assume that we will not
1883 * overwrite anything at this point in time.
1885 if (other_inode > sctx->send_progress ||
1886 is_waiting_for_move(sctx, other_inode)) {
1887 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1888 who_gen, who_mode, NULL, NULL, NULL);
1893 *who_ino = other_inode;
1903 * Checks if the ref was overwritten by an already processed inode. This is
1904 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1905 * thus the orphan name needs be used.
1906 * process_recorded_refs also uses it to avoid unlinking of refs that were
1909 static int did_overwrite_ref(struct send_ctx *sctx,
1910 u64 dir, u64 dir_gen,
1911 u64 ino, u64 ino_gen,
1912 const char *name, int name_len)
1919 if (!sctx->parent_root)
1922 ret = is_inode_existent(sctx, dir, dir_gen);
1926 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1927 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1929 if (ret < 0 && ret != -ENOENT)
1939 /* check if the ref was overwritten by another ref */
1940 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1941 &ow_inode, &other_type);
1942 if (ret < 0 && ret != -ENOENT)
1945 /* was never and will never be overwritten */
1950 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1955 if (ow_inode == ino && gen == ino_gen) {
1961 * We know that it is or will be overwritten. Check this now.
1962 * The current inode being processed might have been the one that caused
1963 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1964 * the current inode being processed.
1966 if ((ow_inode < sctx->send_progress) ||
1967 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1968 gen == sctx->cur_inode_gen))
1978 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1979 * that got overwritten. This is used by process_recorded_refs to determine
1980 * if it has to use the path as returned by get_cur_path or the orphan name.
1982 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1985 struct fs_path *name = NULL;
1989 if (!sctx->parent_root)
1992 name = fs_path_alloc();
1996 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2000 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2001 name->start, fs_path_len(name));
2009 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2010 * so we need to do some special handling in case we have clashes. This function
2011 * takes care of this with the help of name_cache_entry::radix_list.
2012 * In case of error, nce is kfreed.
2014 static int name_cache_insert(struct send_ctx *sctx,
2015 struct name_cache_entry *nce)
2018 struct list_head *nce_head;
2020 nce_head = radix_tree_lookup(&sctx->name_cache,
2021 (unsigned long)nce->ino);
2023 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2028 INIT_LIST_HEAD(nce_head);
2030 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2037 list_add_tail(&nce->radix_list, nce_head);
2038 list_add_tail(&nce->list, &sctx->name_cache_list);
2039 sctx->name_cache_size++;
2044 static void name_cache_delete(struct send_ctx *sctx,
2045 struct name_cache_entry *nce)
2047 struct list_head *nce_head;
2049 nce_head = radix_tree_lookup(&sctx->name_cache,
2050 (unsigned long)nce->ino);
2052 btrfs_err(sctx->send_root->fs_info,
2053 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2054 nce->ino, sctx->name_cache_size);
2057 list_del(&nce->radix_list);
2058 list_del(&nce->list);
2059 sctx->name_cache_size--;
2062 * We may not get to the final release of nce_head if the lookup fails
2064 if (nce_head && list_empty(nce_head)) {
2065 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2070 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2073 struct list_head *nce_head;
2074 struct name_cache_entry *cur;
2076 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2080 list_for_each_entry(cur, nce_head, radix_list) {
2081 if (cur->ino == ino && cur->gen == gen)
2088 * Removes the entry from the list and adds it back to the end. This marks the
2089 * entry as recently used so that name_cache_clean_unused does not remove it.
2091 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
2093 list_del(&nce->list);
2094 list_add_tail(&nce->list, &sctx->name_cache_list);
2098 * Remove some entries from the beginning of name_cache_list.
2100 static void name_cache_clean_unused(struct send_ctx *sctx)
2102 struct name_cache_entry *nce;
2104 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2107 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2108 nce = list_entry(sctx->name_cache_list.next,
2109 struct name_cache_entry, list);
2110 name_cache_delete(sctx, nce);
2115 static void name_cache_free(struct send_ctx *sctx)
2117 struct name_cache_entry *nce;
2119 while (!list_empty(&sctx->name_cache_list)) {
2120 nce = list_entry(sctx->name_cache_list.next,
2121 struct name_cache_entry, list);
2122 name_cache_delete(sctx, nce);
2128 * Used by get_cur_path for each ref up to the root.
2129 * Returns 0 if it succeeded.
2130 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2131 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2132 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2133 * Returns <0 in case of error.
2135 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2139 struct fs_path *dest)
2143 struct name_cache_entry *nce = NULL;
2146 * First check if we already did a call to this function with the same
2147 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2148 * return the cached result.
2150 nce = name_cache_search(sctx, ino, gen);
2152 if (ino < sctx->send_progress && nce->need_later_update) {
2153 name_cache_delete(sctx, nce);
2157 name_cache_used(sctx, nce);
2158 *parent_ino = nce->parent_ino;
2159 *parent_gen = nce->parent_gen;
2160 ret = fs_path_add(dest, nce->name, nce->name_len);
2169 * If the inode is not existent yet, add the orphan name and return 1.
2170 * This should only happen for the parent dir that we determine in
2173 ret = is_inode_existent(sctx, ino, gen);
2178 ret = gen_unique_name(sctx, ino, gen, dest);
2186 * Depending on whether the inode was already processed or not, use
2187 * send_root or parent_root for ref lookup.
2189 if (ino < sctx->send_progress)
2190 ret = get_first_ref(sctx->send_root, ino,
2191 parent_ino, parent_gen, dest);
2193 ret = get_first_ref(sctx->parent_root, ino,
2194 parent_ino, parent_gen, dest);
2199 * Check if the ref was overwritten by an inode's ref that was processed
2200 * earlier. If yes, treat as orphan and return 1.
2202 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2203 dest->start, dest->end - dest->start);
2207 fs_path_reset(dest);
2208 ret = gen_unique_name(sctx, ino, gen, dest);
2216 * Store the result of the lookup in the name cache.
2218 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2226 nce->parent_ino = *parent_ino;
2227 nce->parent_gen = *parent_gen;
2228 nce->name_len = fs_path_len(dest);
2230 strcpy(nce->name, dest->start);
2232 if (ino < sctx->send_progress)
2233 nce->need_later_update = 0;
2235 nce->need_later_update = 1;
2237 nce_ret = name_cache_insert(sctx, nce);
2240 name_cache_clean_unused(sctx);
2247 * Magic happens here. This function returns the first ref to an inode as it
2248 * would look like while receiving the stream at this point in time.
2249 * We walk the path up to the root. For every inode in between, we check if it
2250 * was already processed/sent. If yes, we continue with the parent as found
2251 * in send_root. If not, we continue with the parent as found in parent_root.
2252 * If we encounter an inode that was deleted at this point in time, we use the
2253 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2254 * that were not created yet and overwritten inodes/refs.
2256 * When do we have have orphan inodes:
2257 * 1. When an inode is freshly created and thus no valid refs are available yet
2258 * 2. When a directory lost all it's refs (deleted) but still has dir items
2259 * inside which were not processed yet (pending for move/delete). If anyone
2260 * tried to get the path to the dir items, it would get a path inside that
2262 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2263 * of an unprocessed inode. If in that case the first ref would be
2264 * overwritten, the overwritten inode gets "orphanized". Later when we
2265 * process this overwritten inode, it is restored at a new place by moving
2268 * sctx->send_progress tells this function at which point in time receiving
2271 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2272 struct fs_path *dest)
2275 struct fs_path *name = NULL;
2276 u64 parent_inode = 0;
2280 name = fs_path_alloc();
2287 fs_path_reset(dest);
2289 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2290 struct waiting_dir_move *wdm;
2292 fs_path_reset(name);
2294 if (is_waiting_for_rm(sctx, ino)) {
2295 ret = gen_unique_name(sctx, ino, gen, name);
2298 ret = fs_path_add_path(dest, name);
2302 wdm = get_waiting_dir_move(sctx, ino);
2303 if (wdm && wdm->orphanized) {
2304 ret = gen_unique_name(sctx, ino, gen, name);
2307 ret = get_first_ref(sctx->parent_root, ino,
2308 &parent_inode, &parent_gen, name);
2310 ret = __get_cur_name_and_parent(sctx, ino, gen,
2320 ret = fs_path_add_path(dest, name);
2331 fs_path_unreverse(dest);
2336 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2338 static int send_subvol_begin(struct send_ctx *sctx)
2341 struct btrfs_root *send_root = sctx->send_root;
2342 struct btrfs_root *parent_root = sctx->parent_root;
2343 struct btrfs_path *path;
2344 struct btrfs_key key;
2345 struct btrfs_root_ref *ref;
2346 struct extent_buffer *leaf;
2350 path = btrfs_alloc_path();
2354 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2356 btrfs_free_path(path);
2360 key.objectid = send_root->objectid;
2361 key.type = BTRFS_ROOT_BACKREF_KEY;
2364 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2373 leaf = path->nodes[0];
2374 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2375 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2376 key.objectid != send_root->objectid) {
2380 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2381 namelen = btrfs_root_ref_name_len(leaf, ref);
2382 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2383 btrfs_release_path(path);
2386 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2390 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2395 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2397 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2398 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2399 sctx->send_root->root_item.received_uuid);
2401 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2402 sctx->send_root->root_item.uuid);
2404 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2405 le64_to_cpu(sctx->send_root->root_item.ctransid));
2407 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2408 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2409 parent_root->root_item.received_uuid);
2411 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2412 parent_root->root_item.uuid);
2413 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2414 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2417 ret = send_cmd(sctx);
2421 btrfs_free_path(path);
2426 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2428 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2432 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2434 p = fs_path_alloc();
2438 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2442 ret = get_cur_path(sctx, ino, gen, p);
2445 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2446 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2448 ret = send_cmd(sctx);
2456 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2458 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2462 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2464 p = fs_path_alloc();
2468 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2472 ret = get_cur_path(sctx, ino, gen, p);
2475 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2476 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2478 ret = send_cmd(sctx);
2486 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2488 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2492 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2495 p = fs_path_alloc();
2499 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2503 ret = get_cur_path(sctx, ino, gen, p);
2506 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2507 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2508 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2510 ret = send_cmd(sctx);
2518 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2520 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2522 struct fs_path *p = NULL;
2523 struct btrfs_inode_item *ii;
2524 struct btrfs_path *path = NULL;
2525 struct extent_buffer *eb;
2526 struct btrfs_key key;
2529 btrfs_debug(fs_info, "send_utimes %llu", ino);
2531 p = fs_path_alloc();
2535 path = alloc_path_for_send();
2542 key.type = BTRFS_INODE_ITEM_KEY;
2544 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2550 eb = path->nodes[0];
2551 slot = path->slots[0];
2552 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2554 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2558 ret = get_cur_path(sctx, ino, gen, p);
2561 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2562 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2563 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2564 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2565 /* TODO Add otime support when the otime patches get into upstream */
2567 ret = send_cmd(sctx);
2572 btrfs_free_path(path);
2577 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2578 * a valid path yet because we did not process the refs yet. So, the inode
2579 * is created as orphan.
2581 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2583 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2591 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2593 p = fs_path_alloc();
2597 if (ino != sctx->cur_ino) {
2598 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2603 gen = sctx->cur_inode_gen;
2604 mode = sctx->cur_inode_mode;
2605 rdev = sctx->cur_inode_rdev;
2608 if (S_ISREG(mode)) {
2609 cmd = BTRFS_SEND_C_MKFILE;
2610 } else if (S_ISDIR(mode)) {
2611 cmd = BTRFS_SEND_C_MKDIR;
2612 } else if (S_ISLNK(mode)) {
2613 cmd = BTRFS_SEND_C_SYMLINK;
2614 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2615 cmd = BTRFS_SEND_C_MKNOD;
2616 } else if (S_ISFIFO(mode)) {
2617 cmd = BTRFS_SEND_C_MKFIFO;
2618 } else if (S_ISSOCK(mode)) {
2619 cmd = BTRFS_SEND_C_MKSOCK;
2621 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2622 (int)(mode & S_IFMT));
2627 ret = begin_cmd(sctx, cmd);
2631 ret = gen_unique_name(sctx, ino, gen, p);
2635 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2636 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2638 if (S_ISLNK(mode)) {
2640 ret = read_symlink(sctx->send_root, ino, p);
2643 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2644 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2645 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2646 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2647 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2650 ret = send_cmd(sctx);
2662 * We need some special handling for inodes that get processed before the parent
2663 * directory got created. See process_recorded_refs for details.
2664 * This function does the check if we already created the dir out of order.
2666 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2669 struct btrfs_path *path = NULL;
2670 struct btrfs_key key;
2671 struct btrfs_key found_key;
2672 struct btrfs_key di_key;
2673 struct extent_buffer *eb;
2674 struct btrfs_dir_item *di;
2677 path = alloc_path_for_send();
2684 key.type = BTRFS_DIR_INDEX_KEY;
2686 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2691 eb = path->nodes[0];
2692 slot = path->slots[0];
2693 if (slot >= btrfs_header_nritems(eb)) {
2694 ret = btrfs_next_leaf(sctx->send_root, path);
2697 } else if (ret > 0) {
2704 btrfs_item_key_to_cpu(eb, &found_key, slot);
2705 if (found_key.objectid != key.objectid ||
2706 found_key.type != key.type) {
2711 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2712 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2714 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2715 di_key.objectid < sctx->send_progress) {
2724 btrfs_free_path(path);
2729 * Only creates the inode if it is:
2730 * 1. Not a directory
2731 * 2. Or a directory which was not created already due to out of order
2732 * directories. See did_create_dir and process_recorded_refs for details.
2734 static int send_create_inode_if_needed(struct send_ctx *sctx)
2738 if (S_ISDIR(sctx->cur_inode_mode)) {
2739 ret = did_create_dir(sctx, sctx->cur_ino);
2748 ret = send_create_inode(sctx, sctx->cur_ino);
2756 struct recorded_ref {
2757 struct list_head list;
2759 struct fs_path *full_path;
2765 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2767 ref->full_path = path;
2768 ref->name = (char *)kbasename(ref->full_path->start);
2769 ref->name_len = ref->full_path->end - ref->name;
2773 * We need to process new refs before deleted refs, but compare_tree gives us
2774 * everything mixed. So we first record all refs and later process them.
2775 * This function is a helper to record one ref.
2777 static int __record_ref(struct list_head *head, u64 dir,
2778 u64 dir_gen, struct fs_path *path)
2780 struct recorded_ref *ref;
2782 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2787 ref->dir_gen = dir_gen;
2788 set_ref_path(ref, path);
2789 list_add_tail(&ref->list, head);
2793 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2795 struct recorded_ref *new;
2797 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2801 new->dir = ref->dir;
2802 new->dir_gen = ref->dir_gen;
2803 new->full_path = NULL;
2804 INIT_LIST_HEAD(&new->list);
2805 list_add_tail(&new->list, list);
2809 static void __free_recorded_refs(struct list_head *head)
2811 struct recorded_ref *cur;
2813 while (!list_empty(head)) {
2814 cur = list_entry(head->next, struct recorded_ref, list);
2815 fs_path_free(cur->full_path);
2816 list_del(&cur->list);
2821 static void free_recorded_refs(struct send_ctx *sctx)
2823 __free_recorded_refs(&sctx->new_refs);
2824 __free_recorded_refs(&sctx->deleted_refs);
2828 * Renames/moves a file/dir to its orphan name. Used when the first
2829 * ref of an unprocessed inode gets overwritten and for all non empty
2832 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2833 struct fs_path *path)
2836 struct fs_path *orphan;
2838 orphan = fs_path_alloc();
2842 ret = gen_unique_name(sctx, ino, gen, orphan);
2846 ret = send_rename(sctx, path, orphan);
2849 fs_path_free(orphan);
2853 static struct orphan_dir_info *
2854 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2856 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2857 struct rb_node *parent = NULL;
2858 struct orphan_dir_info *entry, *odi;
2860 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2862 return ERR_PTR(-ENOMEM);
2868 entry = rb_entry(parent, struct orphan_dir_info, node);
2869 if (dir_ino < entry->ino) {
2871 } else if (dir_ino > entry->ino) {
2872 p = &(*p)->rb_right;
2879 rb_link_node(&odi->node, parent, p);
2880 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2884 static struct orphan_dir_info *
2885 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2887 struct rb_node *n = sctx->orphan_dirs.rb_node;
2888 struct orphan_dir_info *entry;
2891 entry = rb_entry(n, struct orphan_dir_info, node);
2892 if (dir_ino < entry->ino)
2894 else if (dir_ino > entry->ino)
2902 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
2904 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
2909 static void free_orphan_dir_info(struct send_ctx *sctx,
2910 struct orphan_dir_info *odi)
2914 rb_erase(&odi->node, &sctx->orphan_dirs);
2919 * Returns 1 if a directory can be removed at this point in time.
2920 * We check this by iterating all dir items and checking if the inode behind
2921 * the dir item was already processed.
2923 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2927 struct btrfs_root *root = sctx->parent_root;
2928 struct btrfs_path *path;
2929 struct btrfs_key key;
2930 struct btrfs_key found_key;
2931 struct btrfs_key loc;
2932 struct btrfs_dir_item *di;
2935 * Don't try to rmdir the top/root subvolume dir.
2937 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2940 path = alloc_path_for_send();
2945 key.type = BTRFS_DIR_INDEX_KEY;
2947 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2952 struct waiting_dir_move *dm;
2954 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2955 ret = btrfs_next_leaf(root, path);
2962 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2964 if (found_key.objectid != key.objectid ||
2965 found_key.type != key.type)
2968 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2969 struct btrfs_dir_item);
2970 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2972 dm = get_waiting_dir_move(sctx, loc.objectid);
2974 struct orphan_dir_info *odi;
2976 odi = add_orphan_dir_info(sctx, dir);
2982 dm->rmdir_ino = dir;
2987 if (loc.objectid > send_progress) {
2988 struct orphan_dir_info *odi;
2990 odi = get_orphan_dir_info(sctx, dir);
2991 free_orphan_dir_info(sctx, odi);
3002 btrfs_free_path(path);
3006 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3008 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3010 return entry != NULL;
3013 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3015 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3016 struct rb_node *parent = NULL;
3017 struct waiting_dir_move *entry, *dm;
3019 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3024 dm->orphanized = orphanized;
3028 entry = rb_entry(parent, struct waiting_dir_move, node);
3029 if (ino < entry->ino) {
3031 } else if (ino > entry->ino) {
3032 p = &(*p)->rb_right;
3039 rb_link_node(&dm->node, parent, p);
3040 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3044 static struct waiting_dir_move *
3045 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3047 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3048 struct waiting_dir_move *entry;
3051 entry = rb_entry(n, struct waiting_dir_move, node);
3052 if (ino < entry->ino)
3054 else if (ino > entry->ino)
3062 static void free_waiting_dir_move(struct send_ctx *sctx,
3063 struct waiting_dir_move *dm)
3067 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3071 static int add_pending_dir_move(struct send_ctx *sctx,
3075 struct list_head *new_refs,
3076 struct list_head *deleted_refs,
3077 const bool is_orphan)
3079 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3080 struct rb_node *parent = NULL;
3081 struct pending_dir_move *entry = NULL, *pm;
3082 struct recorded_ref *cur;
3086 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3089 pm->parent_ino = parent_ino;
3092 INIT_LIST_HEAD(&pm->list);
3093 INIT_LIST_HEAD(&pm->update_refs);
3094 RB_CLEAR_NODE(&pm->node);
3098 entry = rb_entry(parent, struct pending_dir_move, node);
3099 if (parent_ino < entry->parent_ino) {
3101 } else if (parent_ino > entry->parent_ino) {
3102 p = &(*p)->rb_right;
3109 list_for_each_entry(cur, deleted_refs, list) {
3110 ret = dup_ref(cur, &pm->update_refs);
3114 list_for_each_entry(cur, new_refs, list) {
3115 ret = dup_ref(cur, &pm->update_refs);
3120 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3125 list_add_tail(&pm->list, &entry->list);
3127 rb_link_node(&pm->node, parent, p);
3128 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3133 __free_recorded_refs(&pm->update_refs);
3139 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3142 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3143 struct pending_dir_move *entry;
3146 entry = rb_entry(n, struct pending_dir_move, node);
3147 if (parent_ino < entry->parent_ino)
3149 else if (parent_ino > entry->parent_ino)
3157 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3158 u64 ino, u64 gen, u64 *ancestor_ino)
3161 u64 parent_inode = 0;
3163 u64 start_ino = ino;
3166 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3167 fs_path_reset(name);
3169 if (is_waiting_for_rm(sctx, ino))
3171 if (is_waiting_for_move(sctx, ino)) {
3172 if (*ancestor_ino == 0)
3173 *ancestor_ino = ino;
3174 ret = get_first_ref(sctx->parent_root, ino,
3175 &parent_inode, &parent_gen, name);
3177 ret = __get_cur_name_and_parent(sctx, ino, gen,
3187 if (parent_inode == start_ino) {
3189 if (*ancestor_ino == 0)
3190 *ancestor_ino = ino;
3199 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3201 struct fs_path *from_path = NULL;
3202 struct fs_path *to_path = NULL;
3203 struct fs_path *name = NULL;
3204 u64 orig_progress = sctx->send_progress;
3205 struct recorded_ref *cur;
3206 u64 parent_ino, parent_gen;
3207 struct waiting_dir_move *dm = NULL;
3213 name = fs_path_alloc();
3214 from_path = fs_path_alloc();
3215 if (!name || !from_path) {
3220 dm = get_waiting_dir_move(sctx, pm->ino);
3222 rmdir_ino = dm->rmdir_ino;
3223 is_orphan = dm->orphanized;
3224 free_waiting_dir_move(sctx, dm);
3227 ret = gen_unique_name(sctx, pm->ino,
3228 pm->gen, from_path);
3230 ret = get_first_ref(sctx->parent_root, pm->ino,
3231 &parent_ino, &parent_gen, name);
3234 ret = get_cur_path(sctx, parent_ino, parent_gen,
3238 ret = fs_path_add_path(from_path, name);
3243 sctx->send_progress = sctx->cur_ino + 1;
3244 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3248 LIST_HEAD(deleted_refs);
3249 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3250 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3251 &pm->update_refs, &deleted_refs,
3256 dm = get_waiting_dir_move(sctx, pm->ino);
3258 dm->rmdir_ino = rmdir_ino;
3262 fs_path_reset(name);
3265 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3269 ret = send_rename(sctx, from_path, to_path);
3274 struct orphan_dir_info *odi;
3276 odi = get_orphan_dir_info(sctx, rmdir_ino);
3278 /* already deleted */
3281 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino);
3287 name = fs_path_alloc();
3292 ret = get_cur_path(sctx, rmdir_ino, odi->gen, name);
3295 ret = send_rmdir(sctx, name);
3298 free_orphan_dir_info(sctx, odi);
3302 ret = send_utimes(sctx, pm->ino, pm->gen);
3307 * After rename/move, need to update the utimes of both new parent(s)
3308 * and old parent(s).
3310 list_for_each_entry(cur, &pm->update_refs, list) {
3312 * The parent inode might have been deleted in the send snapshot
3314 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3315 NULL, NULL, NULL, NULL, NULL);
3316 if (ret == -ENOENT) {
3323 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3330 fs_path_free(from_path);
3331 fs_path_free(to_path);
3332 sctx->send_progress = orig_progress;
3337 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3339 if (!list_empty(&m->list))
3341 if (!RB_EMPTY_NODE(&m->node))
3342 rb_erase(&m->node, &sctx->pending_dir_moves);
3343 __free_recorded_refs(&m->update_refs);
3347 static void tail_append_pending_moves(struct pending_dir_move *moves,
3348 struct list_head *stack)
3350 if (list_empty(&moves->list)) {
3351 list_add_tail(&moves->list, stack);
3354 list_splice_init(&moves->list, &list);
3355 list_add_tail(&moves->list, stack);
3356 list_splice_tail(&list, stack);
3360 static int apply_children_dir_moves(struct send_ctx *sctx)
3362 struct pending_dir_move *pm;
3363 struct list_head stack;
3364 u64 parent_ino = sctx->cur_ino;
3367 pm = get_pending_dir_moves(sctx, parent_ino);
3371 INIT_LIST_HEAD(&stack);
3372 tail_append_pending_moves(pm, &stack);
3374 while (!list_empty(&stack)) {
3375 pm = list_first_entry(&stack, struct pending_dir_move, list);
3376 parent_ino = pm->ino;
3377 ret = apply_dir_move(sctx, pm);
3378 free_pending_move(sctx, pm);
3381 pm = get_pending_dir_moves(sctx, parent_ino);
3383 tail_append_pending_moves(pm, &stack);
3388 while (!list_empty(&stack)) {
3389 pm = list_first_entry(&stack, struct pending_dir_move, list);
3390 free_pending_move(sctx, pm);
3396 * We might need to delay a directory rename even when no ancestor directory
3397 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3398 * renamed. This happens when we rename a directory to the old name (the name
3399 * in the parent root) of some other unrelated directory that got its rename
3400 * delayed due to some ancestor with higher number that got renamed.
3406 * |---- a/ (ino 257)
3407 * | |---- file (ino 260)
3409 * |---- b/ (ino 258)
3410 * |---- c/ (ino 259)
3414 * |---- a/ (ino 258)
3415 * |---- x/ (ino 259)
3416 * |---- y/ (ino 257)
3417 * |----- file (ino 260)
3419 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3420 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3421 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3424 * 1 - rename 259 from 'c' to 'x'
3425 * 2 - rename 257 from 'a' to 'x/y'
3426 * 3 - rename 258 from 'b' to 'a'
3428 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3429 * be done right away and < 0 on error.
3431 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3432 struct recorded_ref *parent_ref,
3433 const bool is_orphan)
3435 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3436 struct btrfs_path *path;
3437 struct btrfs_key key;
3438 struct btrfs_key di_key;
3439 struct btrfs_dir_item *di;
3443 struct waiting_dir_move *wdm;
3445 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3448 path = alloc_path_for_send();
3452 key.objectid = parent_ref->dir;
3453 key.type = BTRFS_DIR_ITEM_KEY;
3454 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3456 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3459 } else if (ret > 0) {
3464 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3465 parent_ref->name_len);
3471 * di_key.objectid has the number of the inode that has a dentry in the
3472 * parent directory with the same name that sctx->cur_ino is being
3473 * renamed to. We need to check if that inode is in the send root as
3474 * well and if it is currently marked as an inode with a pending rename,
3475 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3476 * that it happens after that other inode is renamed.
3478 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3479 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3484 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3485 &left_gen, NULL, NULL, NULL, NULL);
3488 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3489 &right_gen, NULL, NULL, NULL, NULL);
3496 /* Different inode, no need to delay the rename of sctx->cur_ino */
3497 if (right_gen != left_gen) {
3502 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3503 if (wdm && !wdm->orphanized) {
3504 ret = add_pending_dir_move(sctx,
3506 sctx->cur_inode_gen,
3509 &sctx->deleted_refs,
3515 btrfs_free_path(path);
3520 * Check if inode ino2, or any of its ancestors, is inode ino1.
3521 * Return 1 if true, 0 if false and < 0 on error.
3523 static int check_ino_in_path(struct btrfs_root *root,
3528 struct fs_path *fs_path)
3533 return ino1_gen == ino2_gen;
3535 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3540 fs_path_reset(fs_path);
3541 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3545 return parent_gen == ino1_gen;
3552 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3553 * possible path (in case ino2 is not a directory and has multiple hard links).
3554 * Return 1 if true, 0 if false and < 0 on error.
3556 static int is_ancestor(struct btrfs_root *root,
3560 struct fs_path *fs_path)
3562 bool free_fs_path = false;
3564 struct btrfs_path *path = NULL;
3565 struct btrfs_key key;
3568 fs_path = fs_path_alloc();
3571 free_fs_path = true;
3574 path = alloc_path_for_send();
3580 key.objectid = ino2;
3581 key.type = BTRFS_INODE_REF_KEY;
3584 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3589 struct extent_buffer *leaf = path->nodes[0];
3590 int slot = path->slots[0];
3594 if (slot >= btrfs_header_nritems(leaf)) {
3595 ret = btrfs_next_leaf(root, path);
3603 btrfs_item_key_to_cpu(leaf, &key, slot);
3604 if (key.objectid != ino2)
3606 if (key.type != BTRFS_INODE_REF_KEY &&
3607 key.type != BTRFS_INODE_EXTREF_KEY)
3610 item_size = btrfs_item_size_nr(leaf, slot);
3611 while (cur_offset < item_size) {
3615 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3617 struct btrfs_inode_extref *extref;
3619 ptr = btrfs_item_ptr_offset(leaf, slot);
3620 extref = (struct btrfs_inode_extref *)
3622 parent = btrfs_inode_extref_parent(leaf,
3624 cur_offset += sizeof(*extref);
3625 cur_offset += btrfs_inode_extref_name_len(leaf,
3628 parent = key.offset;
3629 cur_offset = item_size;
3632 ret = get_inode_info(root, parent, NULL, &parent_gen,
3633 NULL, NULL, NULL, NULL);
3636 ret = check_ino_in_path(root, ino1, ino1_gen,
3637 parent, parent_gen, fs_path);
3645 btrfs_free_path(path);
3647 fs_path_free(fs_path);
3651 static int wait_for_parent_move(struct send_ctx *sctx,
3652 struct recorded_ref *parent_ref,
3653 const bool is_orphan)
3656 u64 ino = parent_ref->dir;
3657 u64 ino_gen = parent_ref->dir_gen;
3658 u64 parent_ino_before, parent_ino_after;
3659 struct fs_path *path_before = NULL;
3660 struct fs_path *path_after = NULL;
3663 path_after = fs_path_alloc();
3664 path_before = fs_path_alloc();
3665 if (!path_after || !path_before) {
3671 * Our current directory inode may not yet be renamed/moved because some
3672 * ancestor (immediate or not) has to be renamed/moved first. So find if
3673 * such ancestor exists and make sure our own rename/move happens after
3674 * that ancestor is processed to avoid path build infinite loops (done
3675 * at get_cur_path()).
3677 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3678 u64 parent_ino_after_gen;
3680 if (is_waiting_for_move(sctx, ino)) {
3682 * If the current inode is an ancestor of ino in the
3683 * parent root, we need to delay the rename of the
3684 * current inode, otherwise don't delayed the rename
3685 * because we can end up with a circular dependency
3686 * of renames, resulting in some directories never
3687 * getting the respective rename operations issued in
3688 * the send stream or getting into infinite path build
3691 ret = is_ancestor(sctx->parent_root,
3692 sctx->cur_ino, sctx->cur_inode_gen,
3698 fs_path_reset(path_before);
3699 fs_path_reset(path_after);
3701 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3702 &parent_ino_after_gen, path_after);
3705 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3707 if (ret < 0 && ret != -ENOENT) {
3709 } else if (ret == -ENOENT) {
3714 len1 = fs_path_len(path_before);
3715 len2 = fs_path_len(path_after);
3716 if (ino > sctx->cur_ino &&
3717 (parent_ino_before != parent_ino_after || len1 != len2 ||
3718 memcmp(path_before->start, path_after->start, len1))) {
3721 ret = get_inode_info(sctx->parent_root, ino, NULL,
3722 &parent_ino_gen, NULL, NULL, NULL,
3726 if (ino_gen == parent_ino_gen) {
3731 ino = parent_ino_after;
3732 ino_gen = parent_ino_after_gen;
3736 fs_path_free(path_before);
3737 fs_path_free(path_after);
3740 ret = add_pending_dir_move(sctx,
3742 sctx->cur_inode_gen,
3745 &sctx->deleted_refs,
3754 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3757 struct fs_path *new_path;
3760 * Our reference's name member points to its full_path member string, so
3761 * we use here a new path.
3763 new_path = fs_path_alloc();
3767 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3769 fs_path_free(new_path);
3772 ret = fs_path_add(new_path, ref->name, ref->name_len);
3774 fs_path_free(new_path);
3778 fs_path_free(ref->full_path);
3779 set_ref_path(ref, new_path);
3785 * This does all the move/link/unlink/rmdir magic.
3787 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3789 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3791 struct recorded_ref *cur;
3792 struct recorded_ref *cur2;
3793 struct list_head check_dirs;
3794 struct fs_path *valid_path = NULL;
3798 int did_overwrite = 0;
3800 u64 last_dir_ino_rm = 0;
3801 bool can_rename = true;
3802 bool orphanized_dir = false;
3803 bool orphanized_ancestor = false;
3805 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3808 * This should never happen as the root dir always has the same ref
3809 * which is always '..'
3811 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3812 INIT_LIST_HEAD(&check_dirs);
3814 valid_path = fs_path_alloc();
3821 * First, check if the first ref of the current inode was overwritten
3822 * before. If yes, we know that the current inode was already orphanized
3823 * and thus use the orphan name. If not, we can use get_cur_path to
3824 * get the path of the first ref as it would like while receiving at
3825 * this point in time.
3826 * New inodes are always orphan at the beginning, so force to use the
3827 * orphan name in this case.
3828 * The first ref is stored in valid_path and will be updated if it
3829 * gets moved around.
3831 if (!sctx->cur_inode_new) {
3832 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3833 sctx->cur_inode_gen);
3839 if (sctx->cur_inode_new || did_overwrite) {
3840 ret = gen_unique_name(sctx, sctx->cur_ino,
3841 sctx->cur_inode_gen, valid_path);
3846 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3852 list_for_each_entry(cur, &sctx->new_refs, list) {
3854 * We may have refs where the parent directory does not exist
3855 * yet. This happens if the parent directories inum is higher
3856 * the the current inum. To handle this case, we create the
3857 * parent directory out of order. But we need to check if this
3858 * did already happen before due to other refs in the same dir.
3860 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3863 if (ret == inode_state_will_create) {
3866 * First check if any of the current inodes refs did
3867 * already create the dir.
3869 list_for_each_entry(cur2, &sctx->new_refs, list) {
3872 if (cur2->dir == cur->dir) {
3879 * If that did not happen, check if a previous inode
3880 * did already create the dir.
3883 ret = did_create_dir(sctx, cur->dir);
3887 ret = send_create_inode(sctx, cur->dir);
3894 * Check if this new ref would overwrite the first ref of
3895 * another unprocessed inode. If yes, orphanize the
3896 * overwritten inode. If we find an overwritten ref that is
3897 * not the first ref, simply unlink it.
3899 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3900 cur->name, cur->name_len,
3901 &ow_inode, &ow_gen, &ow_mode);
3905 ret = is_first_ref(sctx->parent_root,
3906 ow_inode, cur->dir, cur->name,
3911 struct name_cache_entry *nce;
3912 struct waiting_dir_move *wdm;
3914 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3918 if (S_ISDIR(ow_mode))
3919 orphanized_dir = true;
3922 * If ow_inode has its rename operation delayed
3923 * make sure that its orphanized name is used in
3924 * the source path when performing its rename
3927 if (is_waiting_for_move(sctx, ow_inode)) {
3928 wdm = get_waiting_dir_move(sctx,
3931 wdm->orphanized = true;
3935 * Make sure we clear our orphanized inode's
3936 * name from the name cache. This is because the
3937 * inode ow_inode might be an ancestor of some
3938 * other inode that will be orphanized as well
3939 * later and has an inode number greater than
3940 * sctx->send_progress. We need to prevent
3941 * future name lookups from using the old name
3942 * and get instead the orphan name.
3944 nce = name_cache_search(sctx, ow_inode, ow_gen);
3946 name_cache_delete(sctx, nce);
3951 * ow_inode might currently be an ancestor of
3952 * cur_ino, therefore compute valid_path (the
3953 * current path of cur_ino) again because it
3954 * might contain the pre-orphanization name of
3955 * ow_inode, which is no longer valid.
3957 ret = is_ancestor(sctx->parent_root,
3959 sctx->cur_ino, NULL);
3961 orphanized_ancestor = true;
3962 fs_path_reset(valid_path);
3963 ret = get_cur_path(sctx, sctx->cur_ino,
3964 sctx->cur_inode_gen,
3970 ret = send_unlink(sctx, cur->full_path);
3976 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
3977 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
3986 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
3988 ret = wait_for_parent_move(sctx, cur, is_orphan);
3998 * link/move the ref to the new place. If we have an orphan
3999 * inode, move it and update valid_path. If not, link or move
4000 * it depending on the inode mode.
4002 if (is_orphan && can_rename) {
4003 ret = send_rename(sctx, valid_path, cur->full_path);
4007 ret = fs_path_copy(valid_path, cur->full_path);
4010 } else if (can_rename) {
4011 if (S_ISDIR(sctx->cur_inode_mode)) {
4013 * Dirs can't be linked, so move it. For moved
4014 * dirs, we always have one new and one deleted
4015 * ref. The deleted ref is ignored later.
4017 ret = send_rename(sctx, valid_path,
4020 ret = fs_path_copy(valid_path,
4026 * We might have previously orphanized an inode
4027 * which is an ancestor of our current inode,
4028 * so our reference's full path, which was
4029 * computed before any such orphanizations, must
4032 if (orphanized_dir) {
4033 ret = update_ref_path(sctx, cur);
4037 ret = send_link(sctx, cur->full_path,
4043 ret = dup_ref(cur, &check_dirs);
4048 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4050 * Check if we can already rmdir the directory. If not,
4051 * orphanize it. For every dir item inside that gets deleted
4052 * later, we do this check again and rmdir it then if possible.
4053 * See the use of check_dirs for more details.
4055 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4060 ret = send_rmdir(sctx, valid_path);
4063 } else if (!is_orphan) {
4064 ret = orphanize_inode(sctx, sctx->cur_ino,
4065 sctx->cur_inode_gen, valid_path);
4071 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4072 ret = dup_ref(cur, &check_dirs);
4076 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4077 !list_empty(&sctx->deleted_refs)) {
4079 * We have a moved dir. Add the old parent to check_dirs
4081 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4083 ret = dup_ref(cur, &check_dirs);
4086 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4088 * We have a non dir inode. Go through all deleted refs and
4089 * unlink them if they were not already overwritten by other
4092 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4093 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4094 sctx->cur_ino, sctx->cur_inode_gen,
4095 cur->name, cur->name_len);
4100 * If we orphanized any ancestor before, we need
4101 * to recompute the full path for deleted names,
4102 * since any such path was computed before we
4103 * processed any references and orphanized any
4106 if (orphanized_ancestor) {
4107 ret = update_ref_path(sctx, cur);
4111 ret = send_unlink(sctx, cur->full_path);
4115 ret = dup_ref(cur, &check_dirs);
4120 * If the inode is still orphan, unlink the orphan. This may
4121 * happen when a previous inode did overwrite the first ref
4122 * of this inode and no new refs were added for the current
4123 * inode. Unlinking does not mean that the inode is deleted in
4124 * all cases. There may still be links to this inode in other
4128 ret = send_unlink(sctx, valid_path);
4135 * We did collect all parent dirs where cur_inode was once located. We
4136 * now go through all these dirs and check if they are pending for
4137 * deletion and if it's finally possible to perform the rmdir now.
4138 * We also update the inode stats of the parent dirs here.
4140 list_for_each_entry(cur, &check_dirs, list) {
4142 * In case we had refs into dirs that were not processed yet,
4143 * we don't need to do the utime and rmdir logic for these dirs.
4144 * The dir will be processed later.
4146 if (cur->dir > sctx->cur_ino)
4149 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4153 if (ret == inode_state_did_create ||
4154 ret == inode_state_no_change) {
4155 /* TODO delayed utimes */
4156 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4159 } else if (ret == inode_state_did_delete &&
4160 cur->dir != last_dir_ino_rm) {
4161 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4166 ret = get_cur_path(sctx, cur->dir,
4167 cur->dir_gen, valid_path);
4170 ret = send_rmdir(sctx, valid_path);
4173 last_dir_ino_rm = cur->dir;
4181 __free_recorded_refs(&check_dirs);
4182 free_recorded_refs(sctx);
4183 fs_path_free(valid_path);
4187 static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4188 void *ctx, struct list_head *refs)
4191 struct send_ctx *sctx = ctx;
4195 p = fs_path_alloc();
4199 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4204 ret = get_cur_path(sctx, dir, gen, p);
4207 ret = fs_path_add_path(p, name);
4211 ret = __record_ref(refs, dir, gen, p);
4219 static int __record_new_ref(int num, u64 dir, int index,
4220 struct fs_path *name,
4223 struct send_ctx *sctx = ctx;
4224 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4228 static int __record_deleted_ref(int num, u64 dir, int index,
4229 struct fs_path *name,
4232 struct send_ctx *sctx = ctx;
4233 return record_ref(sctx->parent_root, dir, name, ctx,
4234 &sctx->deleted_refs);
4237 static int record_new_ref(struct send_ctx *sctx)
4241 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4242 sctx->cmp_key, 0, __record_new_ref, sctx);
4251 static int record_deleted_ref(struct send_ctx *sctx)
4255 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4256 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4265 struct find_ref_ctx {
4268 struct btrfs_root *root;
4269 struct fs_path *name;
4273 static int __find_iref(int num, u64 dir, int index,
4274 struct fs_path *name,
4277 struct find_ref_ctx *ctx = ctx_;
4281 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4282 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4284 * To avoid doing extra lookups we'll only do this if everything
4287 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4291 if (dir_gen != ctx->dir_gen)
4293 ctx->found_idx = num;
4299 static int find_iref(struct btrfs_root *root,
4300 struct btrfs_path *path,
4301 struct btrfs_key *key,
4302 u64 dir, u64 dir_gen, struct fs_path *name)
4305 struct find_ref_ctx ctx;
4309 ctx.dir_gen = dir_gen;
4313 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4317 if (ctx.found_idx == -1)
4320 return ctx.found_idx;
4323 static int __record_changed_new_ref(int num, u64 dir, int index,
4324 struct fs_path *name,
4329 struct send_ctx *sctx = ctx;
4331 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4336 ret = find_iref(sctx->parent_root, sctx->right_path,
4337 sctx->cmp_key, dir, dir_gen, name);
4339 ret = __record_new_ref(num, dir, index, name, sctx);
4346 static int __record_changed_deleted_ref(int num, u64 dir, int index,
4347 struct fs_path *name,
4352 struct send_ctx *sctx = ctx;
4354 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4359 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4360 dir, dir_gen, name);
4362 ret = __record_deleted_ref(num, dir, index, name, sctx);
4369 static int record_changed_ref(struct send_ctx *sctx)
4373 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4374 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4377 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4378 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4388 * Record and process all refs at once. Needed when an inode changes the
4389 * generation number, which means that it was deleted and recreated.
4391 static int process_all_refs(struct send_ctx *sctx,
4392 enum btrfs_compare_tree_result cmd)
4395 struct btrfs_root *root;
4396 struct btrfs_path *path;
4397 struct btrfs_key key;
4398 struct btrfs_key found_key;
4399 struct extent_buffer *eb;
4401 iterate_inode_ref_t cb;
4402 int pending_move = 0;
4404 path = alloc_path_for_send();
4408 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4409 root = sctx->send_root;
4410 cb = __record_new_ref;
4411 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4412 root = sctx->parent_root;
4413 cb = __record_deleted_ref;
4415 btrfs_err(sctx->send_root->fs_info,
4416 "Wrong command %d in process_all_refs", cmd);
4421 key.objectid = sctx->cmp_key->objectid;
4422 key.type = BTRFS_INODE_REF_KEY;
4424 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4429 eb = path->nodes[0];
4430 slot = path->slots[0];
4431 if (slot >= btrfs_header_nritems(eb)) {
4432 ret = btrfs_next_leaf(root, path);
4440 btrfs_item_key_to_cpu(eb, &found_key, slot);
4442 if (found_key.objectid != key.objectid ||
4443 (found_key.type != BTRFS_INODE_REF_KEY &&
4444 found_key.type != BTRFS_INODE_EXTREF_KEY))
4447 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4453 btrfs_release_path(path);
4456 * We don't actually care about pending_move as we are simply
4457 * re-creating this inode and will be rename'ing it into place once we
4458 * rename the parent directory.
4460 ret = process_recorded_refs(sctx, &pending_move);
4462 btrfs_free_path(path);
4466 static int send_set_xattr(struct send_ctx *sctx,
4467 struct fs_path *path,
4468 const char *name, int name_len,
4469 const char *data, int data_len)
4473 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4477 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4478 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4479 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4481 ret = send_cmd(sctx);
4488 static int send_remove_xattr(struct send_ctx *sctx,
4489 struct fs_path *path,
4490 const char *name, int name_len)
4494 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4498 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4499 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4501 ret = send_cmd(sctx);
4508 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4509 const char *name, int name_len,
4510 const char *data, int data_len,
4514 struct send_ctx *sctx = ctx;
4516 struct posix_acl_xattr_header dummy_acl;
4518 p = fs_path_alloc();
4523 * This hack is needed because empty acls are stored as zero byte
4524 * data in xattrs. Problem with that is, that receiving these zero byte
4525 * acls will fail later. To fix this, we send a dummy acl list that
4526 * only contains the version number and no entries.
4528 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4529 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4530 if (data_len == 0) {
4531 dummy_acl.a_version =
4532 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4533 data = (char *)&dummy_acl;
4534 data_len = sizeof(dummy_acl);
4538 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4542 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4549 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4550 const char *name, int name_len,
4551 const char *data, int data_len,
4555 struct send_ctx *sctx = ctx;
4558 p = fs_path_alloc();
4562 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4566 ret = send_remove_xattr(sctx, p, name, name_len);
4573 static int process_new_xattr(struct send_ctx *sctx)
4577 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4578 __process_new_xattr, sctx);
4583 static int process_deleted_xattr(struct send_ctx *sctx)
4585 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4586 __process_deleted_xattr, sctx);
4589 struct find_xattr_ctx {
4597 static int __find_xattr(int num, struct btrfs_key *di_key,
4598 const char *name, int name_len,
4599 const char *data, int data_len,
4600 u8 type, void *vctx)
4602 struct find_xattr_ctx *ctx = vctx;
4604 if (name_len == ctx->name_len &&
4605 strncmp(name, ctx->name, name_len) == 0) {
4606 ctx->found_idx = num;
4607 ctx->found_data_len = data_len;
4608 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4609 if (!ctx->found_data)
4616 static int find_xattr(struct btrfs_root *root,
4617 struct btrfs_path *path,
4618 struct btrfs_key *key,
4619 const char *name, int name_len,
4620 char **data, int *data_len)
4623 struct find_xattr_ctx ctx;
4626 ctx.name_len = name_len;
4628 ctx.found_data = NULL;
4629 ctx.found_data_len = 0;
4631 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4635 if (ctx.found_idx == -1)
4638 *data = ctx.found_data;
4639 *data_len = ctx.found_data_len;
4641 kfree(ctx.found_data);
4643 return ctx.found_idx;
4647 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4648 const char *name, int name_len,
4649 const char *data, int data_len,
4653 struct send_ctx *sctx = ctx;
4654 char *found_data = NULL;
4655 int found_data_len = 0;
4657 ret = find_xattr(sctx->parent_root, sctx->right_path,
4658 sctx->cmp_key, name, name_len, &found_data,
4660 if (ret == -ENOENT) {
4661 ret = __process_new_xattr(num, di_key, name, name_len, data,
4662 data_len, type, ctx);
4663 } else if (ret >= 0) {
4664 if (data_len != found_data_len ||
4665 memcmp(data, found_data, data_len)) {
4666 ret = __process_new_xattr(num, di_key, name, name_len,
4667 data, data_len, type, ctx);
4677 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4678 const char *name, int name_len,
4679 const char *data, int data_len,
4683 struct send_ctx *sctx = ctx;
4685 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4686 name, name_len, NULL, NULL);
4688 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4689 data_len, type, ctx);
4696 static int process_changed_xattr(struct send_ctx *sctx)
4700 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4701 __process_changed_new_xattr, sctx);
4704 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4705 __process_changed_deleted_xattr, sctx);
4711 static int process_all_new_xattrs(struct send_ctx *sctx)
4714 struct btrfs_root *root;
4715 struct btrfs_path *path;
4716 struct btrfs_key key;
4717 struct btrfs_key found_key;
4718 struct extent_buffer *eb;
4721 path = alloc_path_for_send();
4725 root = sctx->send_root;
4727 key.objectid = sctx->cmp_key->objectid;
4728 key.type = BTRFS_XATTR_ITEM_KEY;
4730 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4735 eb = path->nodes[0];
4736 slot = path->slots[0];
4737 if (slot >= btrfs_header_nritems(eb)) {
4738 ret = btrfs_next_leaf(root, path);
4741 } else if (ret > 0) {
4748 btrfs_item_key_to_cpu(eb, &found_key, slot);
4749 if (found_key.objectid != key.objectid ||
4750 found_key.type != key.type) {
4755 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4763 btrfs_free_path(path);
4767 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4769 struct btrfs_root *root = sctx->send_root;
4770 struct btrfs_fs_info *fs_info = root->fs_info;
4771 struct inode *inode;
4774 struct btrfs_key key;
4775 pgoff_t index = offset >> PAGE_SHIFT;
4777 unsigned pg_offset = offset & ~PAGE_MASK;
4780 key.objectid = sctx->cur_ino;
4781 key.type = BTRFS_INODE_ITEM_KEY;
4784 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4786 return PTR_ERR(inode);
4788 if (offset + len > i_size_read(inode)) {
4789 if (offset > i_size_read(inode))
4792 len = offset - i_size_read(inode);
4797 last_index = (offset + len - 1) >> PAGE_SHIFT;
4799 /* initial readahead */
4800 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4801 file_ra_state_init(&sctx->ra, inode->i_mapping);
4803 while (index <= last_index) {
4804 unsigned cur_len = min_t(unsigned, len,
4805 PAGE_SIZE - pg_offset);
4807 page = find_lock_page(inode->i_mapping, index);
4809 page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4810 NULL, index, last_index + 1 - index);
4812 page = find_or_create_page(inode->i_mapping, index,
4820 if (PageReadahead(page)) {
4821 page_cache_async_readahead(inode->i_mapping, &sctx->ra,
4822 NULL, page, index, last_index + 1 - index);
4825 if (!PageUptodate(page)) {
4826 btrfs_readpage(NULL, page);
4828 if (!PageUptodate(page)) {
4837 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4852 * Read some bytes from the current inode/file and send a write command to
4855 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4857 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
4860 ssize_t num_read = 0;
4862 p = fs_path_alloc();
4866 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
4868 num_read = fill_read_buf(sctx, offset, len);
4869 if (num_read <= 0) {
4875 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4879 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4883 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4884 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4885 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
4887 ret = send_cmd(sctx);
4898 * Send a clone command to user space.
4900 static int send_clone(struct send_ctx *sctx,
4901 u64 offset, u32 len,
4902 struct clone_root *clone_root)
4908 btrfs_debug(sctx->send_root->fs_info,
4909 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
4910 offset, len, clone_root->root->objectid, clone_root->ino,
4911 clone_root->offset);
4913 p = fs_path_alloc();
4917 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
4921 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4925 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4926 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
4927 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4929 if (clone_root->root == sctx->send_root) {
4930 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
4931 &gen, NULL, NULL, NULL, NULL);
4934 ret = get_cur_path(sctx, clone_root->ino, gen, p);
4936 ret = get_inode_path(clone_root->root, clone_root->ino, p);
4942 * If the parent we're using has a received_uuid set then use that as
4943 * our clone source as that is what we will look for when doing a
4946 * This covers the case that we create a snapshot off of a received
4947 * subvolume and then use that as the parent and try to receive on a
4950 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
4951 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4952 clone_root->root->root_item.received_uuid);
4954 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4955 clone_root->root->root_item.uuid);
4956 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
4957 le64_to_cpu(clone_root->root->root_item.ctransid));
4958 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
4959 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
4960 clone_root->offset);
4962 ret = send_cmd(sctx);
4971 * Send an update extent command to user space.
4973 static int send_update_extent(struct send_ctx *sctx,
4974 u64 offset, u32 len)
4979 p = fs_path_alloc();
4983 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
4987 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4991 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4992 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4993 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
4995 ret = send_cmd(sctx);
5003 static int send_hole(struct send_ctx *sctx, u64 end)
5005 struct fs_path *p = NULL;
5006 u64 offset = sctx->cur_inode_last_extent;
5010 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5011 return send_update_extent(sctx, offset, end - offset);
5013 p = fs_path_alloc();
5016 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5018 goto tlv_put_failure;
5019 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
5020 while (offset < end) {
5021 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
5023 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5026 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5027 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5028 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
5029 ret = send_cmd(sctx);
5034 sctx->cur_inode_next_write_offset = offset;
5040 static int send_extent_data(struct send_ctx *sctx,
5046 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5047 return send_update_extent(sctx, offset, len);
5049 while (sent < len) {
5050 u64 size = len - sent;
5053 if (size > BTRFS_SEND_READ_SIZE)
5054 size = BTRFS_SEND_READ_SIZE;
5055 ret = send_write(sctx, offset + sent, size);
5065 static int clone_range(struct send_ctx *sctx,
5066 struct clone_root *clone_root,
5067 const u64 disk_byte,
5072 struct btrfs_path *path;
5073 struct btrfs_key key;
5077 * Prevent cloning from a zero offset with a length matching the sector
5078 * size because in some scenarios this will make the receiver fail.
5080 * For example, if in the source filesystem the extent at offset 0
5081 * has a length of sectorsize and it was written using direct IO, then
5082 * it can never be an inline extent (even if compression is enabled).
5083 * Then this extent can be cloned in the original filesystem to a non
5084 * zero file offset, but it may not be possible to clone in the
5085 * destination filesystem because it can be inlined due to compression
5086 * on the destination filesystem (as the receiver's write operations are
5087 * always done using buffered IO). The same happens when the original
5088 * filesystem does not have compression enabled but the destination
5091 if (clone_root->offset == 0 &&
5092 len == sctx->send_root->fs_info->sectorsize)
5093 return send_extent_data(sctx, offset, len);
5095 path = alloc_path_for_send();
5100 * We can't send a clone operation for the entire range if we find
5101 * extent items in the respective range in the source file that
5102 * refer to different extents or if we find holes.
5103 * So check for that and do a mix of clone and regular write/copy
5104 * operations if needed.
5108 * mkfs.btrfs -f /dev/sda
5109 * mount /dev/sda /mnt
5110 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5111 * cp --reflink=always /mnt/foo /mnt/bar
5112 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5113 * btrfs subvolume snapshot -r /mnt /mnt/snap
5115 * If when we send the snapshot and we are processing file bar (which
5116 * has a higher inode number than foo) we blindly send a clone operation
5117 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5118 * a file bar that matches the content of file foo - iow, doesn't match
5119 * the content from bar in the original filesystem.
5121 key.objectid = clone_root->ino;
5122 key.type = BTRFS_EXTENT_DATA_KEY;
5123 key.offset = clone_root->offset;
5124 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5127 if (ret > 0 && path->slots[0] > 0) {
5128 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5129 if (key.objectid == clone_root->ino &&
5130 key.type == BTRFS_EXTENT_DATA_KEY)
5135 struct extent_buffer *leaf = path->nodes[0];
5136 int slot = path->slots[0];
5137 struct btrfs_file_extent_item *ei;
5142 if (slot >= btrfs_header_nritems(leaf)) {
5143 ret = btrfs_next_leaf(clone_root->root, path);
5151 btrfs_item_key_to_cpu(leaf, &key, slot);
5154 * We might have an implicit trailing hole (NO_HOLES feature
5155 * enabled). We deal with it after leaving this loop.
5157 if (key.objectid != clone_root->ino ||
5158 key.type != BTRFS_EXTENT_DATA_KEY)
5161 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5162 type = btrfs_file_extent_type(leaf, ei);
5163 if (type == BTRFS_FILE_EXTENT_INLINE) {
5164 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
5165 ext_len = PAGE_ALIGN(ext_len);
5167 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5170 if (key.offset + ext_len <= clone_root->offset)
5173 if (key.offset > clone_root->offset) {
5174 /* Implicit hole, NO_HOLES feature enabled. */
5175 u64 hole_len = key.offset - clone_root->offset;
5179 ret = send_extent_data(sctx, offset, hole_len);
5187 clone_root->offset += hole_len;
5188 data_offset += hole_len;
5191 if (key.offset >= clone_root->offset + len)
5194 clone_len = min_t(u64, ext_len, len);
5196 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5197 btrfs_file_extent_offset(leaf, ei) == data_offset)
5198 ret = send_clone(sctx, offset, clone_len, clone_root);
5200 ret = send_extent_data(sctx, offset, clone_len);
5208 offset += clone_len;
5209 clone_root->offset += clone_len;
5210 data_offset += clone_len;
5216 ret = send_extent_data(sctx, offset, len);
5220 btrfs_free_path(path);
5224 static int send_write_or_clone(struct send_ctx *sctx,
5225 struct btrfs_path *path,
5226 struct btrfs_key *key,
5227 struct clone_root *clone_root)
5230 struct btrfs_file_extent_item *ei;
5231 u64 offset = key->offset;
5234 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5236 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5237 struct btrfs_file_extent_item);
5238 type = btrfs_file_extent_type(path->nodes[0], ei);
5239 if (type == BTRFS_FILE_EXTENT_INLINE) {
5240 len = btrfs_file_extent_inline_len(path->nodes[0],
5241 path->slots[0], ei);
5243 * it is possible the inline item won't cover the whole page,
5244 * but there may be items after this page. Make
5245 * sure to send the whole thing
5247 len = PAGE_ALIGN(len);
5249 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
5252 if (offset + len > sctx->cur_inode_size)
5253 len = sctx->cur_inode_size - offset;
5259 if (clone_root && IS_ALIGNED(offset + len, bs)) {
5263 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5264 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5265 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5268 ret = send_extent_data(sctx, offset, len);
5270 sctx->cur_inode_next_write_offset = offset + len;
5275 static int is_extent_unchanged(struct send_ctx *sctx,
5276 struct btrfs_path *left_path,
5277 struct btrfs_key *ekey)
5280 struct btrfs_key key;
5281 struct btrfs_path *path = NULL;
5282 struct extent_buffer *eb;
5284 struct btrfs_key found_key;
5285 struct btrfs_file_extent_item *ei;
5290 u64 left_offset_fixed;
5298 path = alloc_path_for_send();
5302 eb = left_path->nodes[0];
5303 slot = left_path->slots[0];
5304 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5305 left_type = btrfs_file_extent_type(eb, ei);
5307 if (left_type != BTRFS_FILE_EXTENT_REG) {
5311 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5312 left_len = btrfs_file_extent_num_bytes(eb, ei);
5313 left_offset = btrfs_file_extent_offset(eb, ei);
5314 left_gen = btrfs_file_extent_generation(eb, ei);
5317 * Following comments will refer to these graphics. L is the left
5318 * extents which we are checking at the moment. 1-8 are the right
5319 * extents that we iterate.
5322 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5325 * |--1--|-2b-|...(same as above)
5327 * Alternative situation. Happens on files where extents got split.
5329 * |-----------7-----------|-6-|
5331 * Alternative situation. Happens on files which got larger.
5334 * Nothing follows after 8.
5337 key.objectid = ekey->objectid;
5338 key.type = BTRFS_EXTENT_DATA_KEY;
5339 key.offset = ekey->offset;
5340 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5349 * Handle special case where the right side has no extents at all.
5351 eb = path->nodes[0];
5352 slot = path->slots[0];
5353 btrfs_item_key_to_cpu(eb, &found_key, slot);
5354 if (found_key.objectid != key.objectid ||
5355 found_key.type != key.type) {
5356 /* If we're a hole then just pretend nothing changed */
5357 ret = (left_disknr) ? 0 : 1;
5362 * We're now on 2a, 2b or 7.
5365 while (key.offset < ekey->offset + left_len) {
5366 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5367 right_type = btrfs_file_extent_type(eb, ei);
5368 if (right_type != BTRFS_FILE_EXTENT_REG &&
5369 right_type != BTRFS_FILE_EXTENT_INLINE) {
5374 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5375 right_len = btrfs_file_extent_inline_len(eb, slot, ei);
5376 right_len = PAGE_ALIGN(right_len);
5378 right_len = btrfs_file_extent_num_bytes(eb, ei);
5382 * Are we at extent 8? If yes, we know the extent is changed.
5383 * This may only happen on the first iteration.
5385 if (found_key.offset + right_len <= ekey->offset) {
5386 /* If we're a hole just pretend nothing changed */
5387 ret = (left_disknr) ? 0 : 1;
5392 * We just wanted to see if when we have an inline extent, what
5393 * follows it is a regular extent (wanted to check the above
5394 * condition for inline extents too). This should normally not
5395 * happen but it's possible for example when we have an inline
5396 * compressed extent representing data with a size matching
5397 * the page size (currently the same as sector size).
5399 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5404 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5405 right_offset = btrfs_file_extent_offset(eb, ei);
5406 right_gen = btrfs_file_extent_generation(eb, ei);
5408 left_offset_fixed = left_offset;
5409 if (key.offset < ekey->offset) {
5410 /* Fix the right offset for 2a and 7. */
5411 right_offset += ekey->offset - key.offset;
5413 /* Fix the left offset for all behind 2a and 2b */
5414 left_offset_fixed += key.offset - ekey->offset;
5418 * Check if we have the same extent.
5420 if (left_disknr != right_disknr ||
5421 left_offset_fixed != right_offset ||
5422 left_gen != right_gen) {
5428 * Go to the next extent.
5430 ret = btrfs_next_item(sctx->parent_root, path);
5434 eb = path->nodes[0];
5435 slot = path->slots[0];
5436 btrfs_item_key_to_cpu(eb, &found_key, slot);
5438 if (ret || found_key.objectid != key.objectid ||
5439 found_key.type != key.type) {
5440 key.offset += right_len;
5443 if (found_key.offset != key.offset + right_len) {
5451 * We're now behind the left extent (treat as unchanged) or at the end
5452 * of the right side (treat as changed).
5454 if (key.offset >= ekey->offset + left_len)
5461 btrfs_free_path(path);
5465 static int get_last_extent(struct send_ctx *sctx, u64 offset)
5467 struct btrfs_path *path;
5468 struct btrfs_root *root = sctx->send_root;
5469 struct btrfs_file_extent_item *fi;
5470 struct btrfs_key key;
5475 path = alloc_path_for_send();
5479 sctx->cur_inode_last_extent = 0;
5481 key.objectid = sctx->cur_ino;
5482 key.type = BTRFS_EXTENT_DATA_KEY;
5483 key.offset = offset;
5484 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5488 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5489 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5492 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5493 struct btrfs_file_extent_item);
5494 type = btrfs_file_extent_type(path->nodes[0], fi);
5495 if (type == BTRFS_FILE_EXTENT_INLINE) {
5496 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
5497 path->slots[0], fi);
5498 extent_end = ALIGN(key.offset + size,
5499 sctx->send_root->fs_info->sectorsize);
5501 extent_end = key.offset +
5502 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5504 sctx->cur_inode_last_extent = extent_end;
5506 btrfs_free_path(path);
5510 static int range_is_hole_in_parent(struct send_ctx *sctx,
5514 struct btrfs_path *path;
5515 struct btrfs_key key;
5516 struct btrfs_root *root = sctx->parent_root;
5517 u64 search_start = start;
5520 path = alloc_path_for_send();
5524 key.objectid = sctx->cur_ino;
5525 key.type = BTRFS_EXTENT_DATA_KEY;
5526 key.offset = search_start;
5527 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5530 if (ret > 0 && path->slots[0] > 0)
5533 while (search_start < end) {
5534 struct extent_buffer *leaf = path->nodes[0];
5535 int slot = path->slots[0];
5536 struct btrfs_file_extent_item *fi;
5539 if (slot >= btrfs_header_nritems(leaf)) {
5540 ret = btrfs_next_leaf(root, path);
5548 btrfs_item_key_to_cpu(leaf, &key, slot);
5549 if (key.objectid < sctx->cur_ino ||
5550 key.type < BTRFS_EXTENT_DATA_KEY)
5552 if (key.objectid > sctx->cur_ino ||
5553 key.type > BTRFS_EXTENT_DATA_KEY ||
5557 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5558 if (btrfs_file_extent_type(leaf, fi) ==
5559 BTRFS_FILE_EXTENT_INLINE) {
5560 u64 size = btrfs_file_extent_inline_len(leaf, slot, fi);
5562 extent_end = ALIGN(key.offset + size,
5563 root->fs_info->sectorsize);
5565 extent_end = key.offset +
5566 btrfs_file_extent_num_bytes(leaf, fi);
5568 if (extent_end <= start)
5570 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5571 search_start = extent_end;
5581 btrfs_free_path(path);
5585 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5586 struct btrfs_key *key)
5588 struct btrfs_file_extent_item *fi;
5593 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5596 if (sctx->cur_inode_last_extent == (u64)-1) {
5597 ret = get_last_extent(sctx, key->offset - 1);
5602 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5603 struct btrfs_file_extent_item);
5604 type = btrfs_file_extent_type(path->nodes[0], fi);
5605 if (type == BTRFS_FILE_EXTENT_INLINE) {
5606 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
5607 path->slots[0], fi);
5608 extent_end = ALIGN(key->offset + size,
5609 sctx->send_root->fs_info->sectorsize);
5611 extent_end = key->offset +
5612 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5615 if (path->slots[0] == 0 &&
5616 sctx->cur_inode_last_extent < key->offset) {
5618 * We might have skipped entire leafs that contained only
5619 * file extent items for our current inode. These leafs have
5620 * a generation number smaller (older) than the one in the
5621 * current leaf and the leaf our last extent came from, and
5622 * are located between these 2 leafs.
5624 ret = get_last_extent(sctx, key->offset - 1);
5629 if (sctx->cur_inode_last_extent < key->offset) {
5630 ret = range_is_hole_in_parent(sctx,
5631 sctx->cur_inode_last_extent,
5636 ret = send_hole(sctx, key->offset);
5640 sctx->cur_inode_last_extent = extent_end;
5644 static int process_extent(struct send_ctx *sctx,
5645 struct btrfs_path *path,
5646 struct btrfs_key *key)
5648 struct clone_root *found_clone = NULL;
5651 if (S_ISLNK(sctx->cur_inode_mode))
5654 if (sctx->parent_root && !sctx->cur_inode_new) {
5655 ret = is_extent_unchanged(sctx, path, key);
5663 struct btrfs_file_extent_item *ei;
5666 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5667 struct btrfs_file_extent_item);
5668 type = btrfs_file_extent_type(path->nodes[0], ei);
5669 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5670 type == BTRFS_FILE_EXTENT_REG) {
5672 * The send spec does not have a prealloc command yet,
5673 * so just leave a hole for prealloc'ed extents until
5674 * we have enough commands queued up to justify rev'ing
5677 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5682 /* Have a hole, just skip it. */
5683 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5690 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5691 sctx->cur_inode_size, &found_clone);
5692 if (ret != -ENOENT && ret < 0)
5695 ret = send_write_or_clone(sctx, path, key, found_clone);
5699 ret = maybe_send_hole(sctx, path, key);
5704 static int process_all_extents(struct send_ctx *sctx)
5707 struct btrfs_root *root;
5708 struct btrfs_path *path;
5709 struct btrfs_key key;
5710 struct btrfs_key found_key;
5711 struct extent_buffer *eb;
5714 root = sctx->send_root;
5715 path = alloc_path_for_send();
5719 key.objectid = sctx->cmp_key->objectid;
5720 key.type = BTRFS_EXTENT_DATA_KEY;
5722 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5727 eb = path->nodes[0];
5728 slot = path->slots[0];
5730 if (slot >= btrfs_header_nritems(eb)) {
5731 ret = btrfs_next_leaf(root, path);
5734 } else if (ret > 0) {
5741 btrfs_item_key_to_cpu(eb, &found_key, slot);
5743 if (found_key.objectid != key.objectid ||
5744 found_key.type != key.type) {
5749 ret = process_extent(sctx, path, &found_key);
5757 btrfs_free_path(path);
5761 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
5763 int *refs_processed)
5767 if (sctx->cur_ino == 0)
5769 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
5770 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
5772 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
5775 ret = process_recorded_refs(sctx, pending_move);
5779 *refs_processed = 1;
5784 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
5795 int need_truncate = 1;
5796 int pending_move = 0;
5797 int refs_processed = 0;
5799 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
5805 * We have processed the refs and thus need to advance send_progress.
5806 * Now, calls to get_cur_xxx will take the updated refs of the current
5807 * inode into account.
5809 * On the other hand, if our current inode is a directory and couldn't
5810 * be moved/renamed because its parent was renamed/moved too and it has
5811 * a higher inode number, we can only move/rename our current inode
5812 * after we moved/renamed its parent. Therefore in this case operate on
5813 * the old path (pre move/rename) of our current inode, and the
5814 * move/rename will be performed later.
5816 if (refs_processed && !pending_move)
5817 sctx->send_progress = sctx->cur_ino + 1;
5819 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
5821 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
5824 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
5825 &left_mode, &left_uid, &left_gid, NULL);
5829 if (!sctx->parent_root || sctx->cur_inode_new) {
5831 if (!S_ISLNK(sctx->cur_inode_mode))
5833 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
5838 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
5839 &old_size, NULL, &right_mode, &right_uid,
5844 if (left_uid != right_uid || left_gid != right_gid)
5846 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
5848 if ((old_size == sctx->cur_inode_size) ||
5849 (sctx->cur_inode_size > old_size &&
5850 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
5854 if (S_ISREG(sctx->cur_inode_mode)) {
5855 if (need_send_hole(sctx)) {
5856 if (sctx->cur_inode_last_extent == (u64)-1 ||
5857 sctx->cur_inode_last_extent <
5858 sctx->cur_inode_size) {
5859 ret = get_last_extent(sctx, (u64)-1);
5863 if (sctx->cur_inode_last_extent <
5864 sctx->cur_inode_size) {
5865 ret = send_hole(sctx, sctx->cur_inode_size);
5870 if (need_truncate) {
5871 ret = send_truncate(sctx, sctx->cur_ino,
5872 sctx->cur_inode_gen,
5873 sctx->cur_inode_size);
5880 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5881 left_uid, left_gid);
5886 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5893 * If other directory inodes depended on our current directory
5894 * inode's move/rename, now do their move/rename operations.
5896 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
5897 ret = apply_children_dir_moves(sctx);
5901 * Need to send that every time, no matter if it actually
5902 * changed between the two trees as we have done changes to
5903 * the inode before. If our inode is a directory and it's
5904 * waiting to be moved/renamed, we will send its utimes when
5905 * it's moved/renamed, therefore we don't need to do it here.
5907 sctx->send_progress = sctx->cur_ino + 1;
5908 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
5917 static int changed_inode(struct send_ctx *sctx,
5918 enum btrfs_compare_tree_result result)
5921 struct btrfs_key *key = sctx->cmp_key;
5922 struct btrfs_inode_item *left_ii = NULL;
5923 struct btrfs_inode_item *right_ii = NULL;
5927 sctx->cur_ino = key->objectid;
5928 sctx->cur_inode_new_gen = 0;
5929 sctx->cur_inode_last_extent = (u64)-1;
5930 sctx->cur_inode_next_write_offset = 0;
5933 * Set send_progress to current inode. This will tell all get_cur_xxx
5934 * functions that the current inode's refs are not updated yet. Later,
5935 * when process_recorded_refs is finished, it is set to cur_ino + 1.
5937 sctx->send_progress = sctx->cur_ino;
5939 if (result == BTRFS_COMPARE_TREE_NEW ||
5940 result == BTRFS_COMPARE_TREE_CHANGED) {
5941 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
5942 sctx->left_path->slots[0],
5943 struct btrfs_inode_item);
5944 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
5947 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5948 sctx->right_path->slots[0],
5949 struct btrfs_inode_item);
5950 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5953 if (result == BTRFS_COMPARE_TREE_CHANGED) {
5954 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5955 sctx->right_path->slots[0],
5956 struct btrfs_inode_item);
5958 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5962 * The cur_ino = root dir case is special here. We can't treat
5963 * the inode as deleted+reused because it would generate a
5964 * stream that tries to delete/mkdir the root dir.
5966 if (left_gen != right_gen &&
5967 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5968 sctx->cur_inode_new_gen = 1;
5971 if (result == BTRFS_COMPARE_TREE_NEW) {
5972 sctx->cur_inode_gen = left_gen;
5973 sctx->cur_inode_new = 1;
5974 sctx->cur_inode_deleted = 0;
5975 sctx->cur_inode_size = btrfs_inode_size(
5976 sctx->left_path->nodes[0], left_ii);
5977 sctx->cur_inode_mode = btrfs_inode_mode(
5978 sctx->left_path->nodes[0], left_ii);
5979 sctx->cur_inode_rdev = btrfs_inode_rdev(
5980 sctx->left_path->nodes[0], left_ii);
5981 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5982 ret = send_create_inode_if_needed(sctx);
5983 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
5984 sctx->cur_inode_gen = right_gen;
5985 sctx->cur_inode_new = 0;
5986 sctx->cur_inode_deleted = 1;
5987 sctx->cur_inode_size = btrfs_inode_size(
5988 sctx->right_path->nodes[0], right_ii);
5989 sctx->cur_inode_mode = btrfs_inode_mode(
5990 sctx->right_path->nodes[0], right_ii);
5991 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
5993 * We need to do some special handling in case the inode was
5994 * reported as changed with a changed generation number. This
5995 * means that the original inode was deleted and new inode
5996 * reused the same inum. So we have to treat the old inode as
5997 * deleted and the new one as new.
5999 if (sctx->cur_inode_new_gen) {
6001 * First, process the inode as if it was deleted.
6003 sctx->cur_inode_gen = right_gen;
6004 sctx->cur_inode_new = 0;
6005 sctx->cur_inode_deleted = 1;
6006 sctx->cur_inode_size = btrfs_inode_size(
6007 sctx->right_path->nodes[0], right_ii);
6008 sctx->cur_inode_mode = btrfs_inode_mode(
6009 sctx->right_path->nodes[0], right_ii);
6010 ret = process_all_refs(sctx,
6011 BTRFS_COMPARE_TREE_DELETED);
6016 * Now process the inode as if it was new.
6018 sctx->cur_inode_gen = left_gen;
6019 sctx->cur_inode_new = 1;
6020 sctx->cur_inode_deleted = 0;
6021 sctx->cur_inode_size = btrfs_inode_size(
6022 sctx->left_path->nodes[0], left_ii);
6023 sctx->cur_inode_mode = btrfs_inode_mode(
6024 sctx->left_path->nodes[0], left_ii);
6025 sctx->cur_inode_rdev = btrfs_inode_rdev(
6026 sctx->left_path->nodes[0], left_ii);
6027 ret = send_create_inode_if_needed(sctx);
6031 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6035 * Advance send_progress now as we did not get into
6036 * process_recorded_refs_if_needed in the new_gen case.
6038 sctx->send_progress = sctx->cur_ino + 1;
6041 * Now process all extents and xattrs of the inode as if
6042 * they were all new.
6044 ret = process_all_extents(sctx);
6047 ret = process_all_new_xattrs(sctx);
6051 sctx->cur_inode_gen = left_gen;
6052 sctx->cur_inode_new = 0;
6053 sctx->cur_inode_new_gen = 0;
6054 sctx->cur_inode_deleted = 0;
6055 sctx->cur_inode_size = btrfs_inode_size(
6056 sctx->left_path->nodes[0], left_ii);
6057 sctx->cur_inode_mode = btrfs_inode_mode(
6058 sctx->left_path->nodes[0], left_ii);
6067 * We have to process new refs before deleted refs, but compare_trees gives us
6068 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6069 * first and later process them in process_recorded_refs.
6070 * For the cur_inode_new_gen case, we skip recording completely because
6071 * changed_inode did already initiate processing of refs. The reason for this is
6072 * that in this case, compare_tree actually compares the refs of 2 different
6073 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6074 * refs of the right tree as deleted and all refs of the left tree as new.
6076 static int changed_ref(struct send_ctx *sctx,
6077 enum btrfs_compare_tree_result result)
6081 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6082 inconsistent_snapshot_error(sctx, result, "reference");
6086 if (!sctx->cur_inode_new_gen &&
6087 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6088 if (result == BTRFS_COMPARE_TREE_NEW)
6089 ret = record_new_ref(sctx);
6090 else if (result == BTRFS_COMPARE_TREE_DELETED)
6091 ret = record_deleted_ref(sctx);
6092 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6093 ret = record_changed_ref(sctx);
6100 * Process new/deleted/changed xattrs. We skip processing in the
6101 * cur_inode_new_gen case because changed_inode did already initiate processing
6102 * of xattrs. The reason is the same as in changed_ref
6104 static int changed_xattr(struct send_ctx *sctx,
6105 enum btrfs_compare_tree_result result)
6109 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6110 inconsistent_snapshot_error(sctx, result, "xattr");
6114 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6115 if (result == BTRFS_COMPARE_TREE_NEW)
6116 ret = process_new_xattr(sctx);
6117 else if (result == BTRFS_COMPARE_TREE_DELETED)
6118 ret = process_deleted_xattr(sctx);
6119 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6120 ret = process_changed_xattr(sctx);
6127 * Process new/deleted/changed extents. We skip processing in the
6128 * cur_inode_new_gen case because changed_inode did already initiate processing
6129 * of extents. The reason is the same as in changed_ref
6131 static int changed_extent(struct send_ctx *sctx,
6132 enum btrfs_compare_tree_result result)
6136 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6138 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6139 struct extent_buffer *leaf_l;
6140 struct extent_buffer *leaf_r;
6141 struct btrfs_file_extent_item *ei_l;
6142 struct btrfs_file_extent_item *ei_r;
6144 leaf_l = sctx->left_path->nodes[0];
6145 leaf_r = sctx->right_path->nodes[0];
6146 ei_l = btrfs_item_ptr(leaf_l,
6147 sctx->left_path->slots[0],
6148 struct btrfs_file_extent_item);
6149 ei_r = btrfs_item_ptr(leaf_r,
6150 sctx->right_path->slots[0],
6151 struct btrfs_file_extent_item);
6154 * We may have found an extent item that has changed
6155 * only its disk_bytenr field and the corresponding
6156 * inode item was not updated. This case happens due to
6157 * very specific timings during relocation when a leaf
6158 * that contains file extent items is COWed while
6159 * relocation is ongoing and its in the stage where it
6160 * updates data pointers. So when this happens we can
6161 * safely ignore it since we know it's the same extent,
6162 * but just at different logical and physical locations
6163 * (when an extent is fully replaced with a new one, we
6164 * know the generation number must have changed too,
6165 * since snapshot creation implies committing the current
6166 * transaction, and the inode item must have been updated
6168 * This replacement of the disk_bytenr happens at
6169 * relocation.c:replace_file_extents() through
6170 * relocation.c:btrfs_reloc_cow_block().
6172 if (btrfs_file_extent_generation(leaf_l, ei_l) ==
6173 btrfs_file_extent_generation(leaf_r, ei_r) &&
6174 btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
6175 btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
6176 btrfs_file_extent_compression(leaf_l, ei_l) ==
6177 btrfs_file_extent_compression(leaf_r, ei_r) &&
6178 btrfs_file_extent_encryption(leaf_l, ei_l) ==
6179 btrfs_file_extent_encryption(leaf_r, ei_r) &&
6180 btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
6181 btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
6182 btrfs_file_extent_type(leaf_l, ei_l) ==
6183 btrfs_file_extent_type(leaf_r, ei_r) &&
6184 btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
6185 btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
6186 btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
6187 btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
6188 btrfs_file_extent_offset(leaf_l, ei_l) ==
6189 btrfs_file_extent_offset(leaf_r, ei_r) &&
6190 btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
6191 btrfs_file_extent_num_bytes(leaf_r, ei_r))
6195 inconsistent_snapshot_error(sctx, result, "extent");
6199 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6200 if (result != BTRFS_COMPARE_TREE_DELETED)
6201 ret = process_extent(sctx, sctx->left_path,
6208 static int dir_changed(struct send_ctx *sctx, u64 dir)
6210 u64 orig_gen, new_gen;
6213 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6218 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6223 return (orig_gen != new_gen) ? 1 : 0;
6226 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6227 struct btrfs_key *key)
6229 struct btrfs_inode_extref *extref;
6230 struct extent_buffer *leaf;
6231 u64 dirid = 0, last_dirid = 0;
6238 /* Easy case, just check this one dirid */
6239 if (key->type == BTRFS_INODE_REF_KEY) {
6240 dirid = key->offset;
6242 ret = dir_changed(sctx, dirid);
6246 leaf = path->nodes[0];
6247 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6248 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6249 while (cur_offset < item_size) {
6250 extref = (struct btrfs_inode_extref *)(ptr +
6252 dirid = btrfs_inode_extref_parent(leaf, extref);
6253 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6254 cur_offset += ref_name_len + sizeof(*extref);
6255 if (dirid == last_dirid)
6257 ret = dir_changed(sctx, dirid);
6267 * Updates compare related fields in sctx and simply forwards to the actual
6268 * changed_xxx functions.
6270 static int changed_cb(struct btrfs_path *left_path,
6271 struct btrfs_path *right_path,
6272 struct btrfs_key *key,
6273 enum btrfs_compare_tree_result result,
6277 struct send_ctx *sctx = ctx;
6279 if (result == BTRFS_COMPARE_TREE_SAME) {
6280 if (key->type == BTRFS_INODE_REF_KEY ||
6281 key->type == BTRFS_INODE_EXTREF_KEY) {
6282 ret = compare_refs(sctx, left_path, key);
6287 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6288 return maybe_send_hole(sctx, left_path, key);
6292 result = BTRFS_COMPARE_TREE_CHANGED;
6296 sctx->left_path = left_path;
6297 sctx->right_path = right_path;
6298 sctx->cmp_key = key;
6300 ret = finish_inode_if_needed(sctx, 0);
6304 /* Ignore non-FS objects */
6305 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6306 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6309 if (key->type == BTRFS_INODE_ITEM_KEY)
6310 ret = changed_inode(sctx, result);
6311 else if (key->type == BTRFS_INODE_REF_KEY ||
6312 key->type == BTRFS_INODE_EXTREF_KEY)
6313 ret = changed_ref(sctx, result);
6314 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6315 ret = changed_xattr(sctx, result);
6316 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6317 ret = changed_extent(sctx, result);
6323 static int full_send_tree(struct send_ctx *sctx)
6326 struct btrfs_root *send_root = sctx->send_root;
6327 struct btrfs_key key;
6328 struct btrfs_key found_key;
6329 struct btrfs_path *path;
6330 struct extent_buffer *eb;
6333 path = alloc_path_for_send();
6337 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6338 key.type = BTRFS_INODE_ITEM_KEY;
6341 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6348 eb = path->nodes[0];
6349 slot = path->slots[0];
6350 btrfs_item_key_to_cpu(eb, &found_key, slot);
6352 ret = changed_cb(path, NULL, &found_key,
6353 BTRFS_COMPARE_TREE_NEW, sctx);
6357 key.objectid = found_key.objectid;
6358 key.type = found_key.type;
6359 key.offset = found_key.offset + 1;
6361 ret = btrfs_next_item(send_root, path);
6371 ret = finish_inode_if_needed(sctx, 1);
6374 btrfs_free_path(path);
6378 static int send_subvol(struct send_ctx *sctx)
6382 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
6383 ret = send_header(sctx);
6388 ret = send_subvol_begin(sctx);
6392 if (sctx->parent_root) {
6393 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
6397 ret = finish_inode_if_needed(sctx, 1);
6401 ret = full_send_tree(sctx);
6407 free_recorded_refs(sctx);
6412 * If orphan cleanup did remove any orphans from a root, it means the tree
6413 * was modified and therefore the commit root is not the same as the current
6414 * root anymore. This is a problem, because send uses the commit root and
6415 * therefore can see inode items that don't exist in the current root anymore,
6416 * and for example make calls to btrfs_iget, which will do tree lookups based
6417 * on the current root and not on the commit root. Those lookups will fail,
6418 * returning a -ESTALE error, and making send fail with that error. So make
6419 * sure a send does not see any orphans we have just removed, and that it will
6420 * see the same inodes regardless of whether a transaction commit happened
6421 * before it started (meaning that the commit root will be the same as the
6422 * current root) or not.
6424 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
6427 struct btrfs_trans_handle *trans = NULL;
6430 if (sctx->parent_root &&
6431 sctx->parent_root->node != sctx->parent_root->commit_root)
6434 for (i = 0; i < sctx->clone_roots_cnt; i++)
6435 if (sctx->clone_roots[i].root->node !=
6436 sctx->clone_roots[i].root->commit_root)
6440 return btrfs_end_transaction(trans);
6445 /* Use any root, all fs roots will get their commit roots updated. */
6447 trans = btrfs_join_transaction(sctx->send_root);
6449 return PTR_ERR(trans);
6453 return btrfs_commit_transaction(trans);
6456 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
6458 spin_lock(&root->root_item_lock);
6459 root->send_in_progress--;
6461 * Not much left to do, we don't know why it's unbalanced and
6462 * can't blindly reset it to 0.
6464 if (root->send_in_progress < 0)
6465 btrfs_err(root->fs_info,
6466 "send_in_progres unbalanced %d root %llu",
6467 root->send_in_progress, root->root_key.objectid);
6468 spin_unlock(&root->root_item_lock);
6471 long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
6474 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
6475 struct btrfs_fs_info *fs_info = send_root->fs_info;
6476 struct btrfs_root *clone_root;
6477 struct btrfs_key key;
6478 struct send_ctx *sctx = NULL;
6480 u64 *clone_sources_tmp = NULL;
6481 int clone_sources_to_rollback = 0;
6482 unsigned alloc_size;
6483 int sort_clone_roots = 0;
6486 if (!capable(CAP_SYS_ADMIN))
6490 * The subvolume must remain read-only during send, protect against
6491 * making it RW. This also protects against deletion.
6493 spin_lock(&send_root->root_item_lock);
6494 send_root->send_in_progress++;
6495 spin_unlock(&send_root->root_item_lock);
6498 * This is done when we lookup the root, it should already be complete
6499 * by the time we get here.
6501 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
6504 * Userspace tools do the checks and warn the user if it's
6507 if (!btrfs_root_readonly(send_root)) {
6513 * Check that we don't overflow at later allocations, we request
6514 * clone_sources_count + 1 items, and compare to unsigned long inside
6517 if (arg->clone_sources_count >
6518 ULONG_MAX / sizeof(struct clone_root) - 1) {
6523 if (!access_ok(VERIFY_READ, arg->clone_sources,
6524 sizeof(*arg->clone_sources) *
6525 arg->clone_sources_count)) {
6530 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
6535 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
6541 INIT_LIST_HEAD(&sctx->new_refs);
6542 INIT_LIST_HEAD(&sctx->deleted_refs);
6543 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
6544 INIT_LIST_HEAD(&sctx->name_cache_list);
6546 sctx->flags = arg->flags;
6548 sctx->send_filp = fget(arg->send_fd);
6549 if (!sctx->send_filp) {
6554 sctx->send_root = send_root;
6556 * Unlikely but possible, if the subvolume is marked for deletion but
6557 * is slow to remove the directory entry, send can still be started
6559 if (btrfs_root_dead(sctx->send_root)) {
6564 sctx->clone_roots_cnt = arg->clone_sources_count;
6566 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
6567 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
6568 if (!sctx->send_buf) {
6573 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
6574 if (!sctx->read_buf) {
6579 sctx->pending_dir_moves = RB_ROOT;
6580 sctx->waiting_dir_moves = RB_ROOT;
6581 sctx->orphan_dirs = RB_ROOT;
6583 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
6585 sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL);
6586 if (!sctx->clone_roots) {
6591 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
6593 if (arg->clone_sources_count) {
6594 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
6595 if (!clone_sources_tmp) {
6600 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
6607 for (i = 0; i < arg->clone_sources_count; i++) {
6608 key.objectid = clone_sources_tmp[i];
6609 key.type = BTRFS_ROOT_ITEM_KEY;
6610 key.offset = (u64)-1;
6612 index = srcu_read_lock(&fs_info->subvol_srcu);
6614 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
6615 if (IS_ERR(clone_root)) {
6616 srcu_read_unlock(&fs_info->subvol_srcu, index);
6617 ret = PTR_ERR(clone_root);
6620 spin_lock(&clone_root->root_item_lock);
6621 if (!btrfs_root_readonly(clone_root) ||
6622 btrfs_root_dead(clone_root)) {
6623 spin_unlock(&clone_root->root_item_lock);
6624 srcu_read_unlock(&fs_info->subvol_srcu, index);
6628 clone_root->send_in_progress++;
6629 spin_unlock(&clone_root->root_item_lock);
6630 srcu_read_unlock(&fs_info->subvol_srcu, index);
6632 sctx->clone_roots[i].root = clone_root;
6633 clone_sources_to_rollback = i + 1;
6635 kvfree(clone_sources_tmp);
6636 clone_sources_tmp = NULL;
6639 if (arg->parent_root) {
6640 key.objectid = arg->parent_root;
6641 key.type = BTRFS_ROOT_ITEM_KEY;
6642 key.offset = (u64)-1;
6644 index = srcu_read_lock(&fs_info->subvol_srcu);
6646 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
6647 if (IS_ERR(sctx->parent_root)) {
6648 srcu_read_unlock(&fs_info->subvol_srcu, index);
6649 ret = PTR_ERR(sctx->parent_root);
6653 spin_lock(&sctx->parent_root->root_item_lock);
6654 sctx->parent_root->send_in_progress++;
6655 if (!btrfs_root_readonly(sctx->parent_root) ||
6656 btrfs_root_dead(sctx->parent_root)) {
6657 spin_unlock(&sctx->parent_root->root_item_lock);
6658 srcu_read_unlock(&fs_info->subvol_srcu, index);
6662 spin_unlock(&sctx->parent_root->root_item_lock);
6664 srcu_read_unlock(&fs_info->subvol_srcu, index);
6668 * Clones from send_root are allowed, but only if the clone source
6669 * is behind the current send position. This is checked while searching
6670 * for possible clone sources.
6672 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
6674 /* We do a bsearch later */
6675 sort(sctx->clone_roots, sctx->clone_roots_cnt,
6676 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
6678 sort_clone_roots = 1;
6680 ret = ensure_commit_roots_uptodate(sctx);
6684 current->journal_info = BTRFS_SEND_TRANS_STUB;
6685 ret = send_subvol(sctx);
6686 current->journal_info = NULL;
6690 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
6691 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
6694 ret = send_cmd(sctx);
6700 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
6701 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
6703 struct pending_dir_move *pm;
6705 n = rb_first(&sctx->pending_dir_moves);
6706 pm = rb_entry(n, struct pending_dir_move, node);
6707 while (!list_empty(&pm->list)) {
6708 struct pending_dir_move *pm2;
6710 pm2 = list_first_entry(&pm->list,
6711 struct pending_dir_move, list);
6712 free_pending_move(sctx, pm2);
6714 free_pending_move(sctx, pm);
6717 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
6718 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
6720 struct waiting_dir_move *dm;
6722 n = rb_first(&sctx->waiting_dir_moves);
6723 dm = rb_entry(n, struct waiting_dir_move, node);
6724 rb_erase(&dm->node, &sctx->waiting_dir_moves);
6728 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
6729 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
6731 struct orphan_dir_info *odi;
6733 n = rb_first(&sctx->orphan_dirs);
6734 odi = rb_entry(n, struct orphan_dir_info, node);
6735 free_orphan_dir_info(sctx, odi);
6738 if (sort_clone_roots) {
6739 for (i = 0; i < sctx->clone_roots_cnt; i++)
6740 btrfs_root_dec_send_in_progress(
6741 sctx->clone_roots[i].root);
6743 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
6744 btrfs_root_dec_send_in_progress(
6745 sctx->clone_roots[i].root);
6747 btrfs_root_dec_send_in_progress(send_root);
6749 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
6750 btrfs_root_dec_send_in_progress(sctx->parent_root);
6752 kvfree(clone_sources_tmp);
6755 if (sctx->send_filp)
6756 fput(sctx->send_filp);
6758 kvfree(sctx->clone_roots);
6759 kvfree(sctx->send_buf);
6760 kvfree(sctx->read_buf);
6762 name_cache_free(sctx);