2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
38 static int g_verbose = 0;
40 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
43 * A fs_path is a helper to dynamically build path names with unknown size.
44 * It reallocates the internal buffer on demand.
45 * It allows fast adding of path elements on the right side (normal path) and
46 * fast adding to the left side (reversed path). A reversed path can also be
47 * unreversed if needed.
56 unsigned short buf_len:15;
57 unsigned short reversed:1;
61 * Average path length does not exceed 200 bytes, we'll have
62 * better packing in the slab and higher chance to satisfy
63 * a allocation later during send.
68 #define FS_PATH_INLINE_SIZE \
69 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
72 /* reused for each extent */
74 struct btrfs_root *root;
81 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
82 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
85 struct file *send_filp;
91 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
92 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
94 struct btrfs_root *send_root;
95 struct btrfs_root *parent_root;
96 struct clone_root *clone_roots;
99 /* current state of the compare_tree call */
100 struct btrfs_path *left_path;
101 struct btrfs_path *right_path;
102 struct btrfs_key *cmp_key;
105 * infos of the currently processed inode. In case of deleted inodes,
106 * these are the values from the deleted inode.
111 int cur_inode_new_gen;
112 int cur_inode_deleted;
116 u64 cur_inode_last_extent;
120 struct list_head new_refs;
121 struct list_head deleted_refs;
123 struct radix_tree_root name_cache;
124 struct list_head name_cache_list;
127 struct file_ra_state ra;
132 * We process inodes by their increasing order, so if before an
133 * incremental send we reverse the parent/child relationship of
134 * directories such that a directory with a lower inode number was
135 * the parent of a directory with a higher inode number, and the one
136 * becoming the new parent got renamed too, we can't rename/move the
137 * directory with lower inode number when we finish processing it - we
138 * must process the directory with higher inode number first, then
139 * rename/move it and then rename/move the directory with lower inode
140 * number. Example follows.
142 * Tree state when the first send was performed:
154 * Tree state when the second (incremental) send is performed:
163 * The sequence of steps that lead to the second state was:
165 * mv /a/b/c/d /a/b/c2/d2
166 * mv /a/b/c /a/b/c2/d2/cc
168 * "c" has lower inode number, but we can't move it (2nd mv operation)
169 * before we move "d", which has higher inode number.
171 * So we just memorize which move/rename operations must be performed
172 * later when their respective parent is processed and moved/renamed.
175 /* Indexed by parent directory inode number. */
176 struct rb_root pending_dir_moves;
179 * Reverse index, indexed by the inode number of a directory that
180 * is waiting for the move/rename of its immediate parent before its
181 * own move/rename can be performed.
183 struct rb_root waiting_dir_moves;
186 * A directory that is going to be rm'ed might have a child directory
187 * which is in the pending directory moves index above. In this case,
188 * the directory can only be removed after the move/rename of its child
189 * is performed. Example:
209 * Sequence of steps that lead to the send snapshot:
210 * rm -f /a/b/c/foo.txt
212 * mv /a/b/c/x /a/b/YY
215 * When the child is processed, its move/rename is delayed until its
216 * parent is processed (as explained above), but all other operations
217 * like update utimes, chown, chgrp, etc, are performed and the paths
218 * that it uses for those operations must use the orphanized name of
219 * its parent (the directory we're going to rm later), so we need to
220 * memorize that name.
222 * Indexed by the inode number of the directory to be deleted.
224 struct rb_root orphan_dirs;
227 struct pending_dir_move {
229 struct list_head list;
233 struct list_head update_refs;
236 struct waiting_dir_move {
240 * There might be some directory that could not be removed because it
241 * was waiting for this directory inode to be moved first. Therefore
242 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
247 struct orphan_dir_info {
253 struct name_cache_entry {
254 struct list_head list;
256 * radix_tree has only 32bit entries but we need to handle 64bit inums.
257 * We use the lower 32bit of the 64bit inum to store it in the tree. If
258 * more then one inum would fall into the same entry, we use radix_list
259 * to store the additional entries. radix_list is also used to store
260 * entries where two entries have the same inum but different
263 struct list_head radix_list;
269 int need_later_update;
274 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
276 static struct waiting_dir_move *
277 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
279 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
281 static int need_send_hole(struct send_ctx *sctx)
283 return (sctx->parent_root && !sctx->cur_inode_new &&
284 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
285 S_ISREG(sctx->cur_inode_mode));
288 static void fs_path_reset(struct fs_path *p)
291 p->start = p->buf + p->buf_len - 1;
301 static struct fs_path *fs_path_alloc(void)
305 p = kmalloc(sizeof(*p), GFP_NOFS);
309 p->buf = p->inline_buf;
310 p->buf_len = FS_PATH_INLINE_SIZE;
315 static struct fs_path *fs_path_alloc_reversed(void)
327 static void fs_path_free(struct fs_path *p)
331 if (p->buf != p->inline_buf)
336 static int fs_path_len(struct fs_path *p)
338 return p->end - p->start;
341 static int fs_path_ensure_buf(struct fs_path *p, int len)
349 if (p->buf_len >= len)
352 if (len > PATH_MAX) {
357 path_len = p->end - p->start;
358 old_buf_len = p->buf_len;
361 * First time the inline_buf does not suffice
363 if (p->buf == p->inline_buf) {
364 tmp_buf = kmalloc(len, GFP_NOFS);
366 memcpy(tmp_buf, p->buf, old_buf_len);
368 tmp_buf = krealloc(p->buf, len, GFP_NOFS);
374 * The real size of the buffer is bigger, this will let the fast path
375 * happen most of the time
377 p->buf_len = ksize(p->buf);
380 tmp_buf = p->buf + old_buf_len - path_len - 1;
381 p->end = p->buf + p->buf_len - 1;
382 p->start = p->end - path_len;
383 memmove(p->start, tmp_buf, path_len + 1);
386 p->end = p->start + path_len;
391 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
397 new_len = p->end - p->start + name_len;
398 if (p->start != p->end)
400 ret = fs_path_ensure_buf(p, new_len);
405 if (p->start != p->end)
407 p->start -= name_len;
408 *prepared = p->start;
410 if (p->start != p->end)
421 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
426 ret = fs_path_prepare_for_add(p, name_len, &prepared);
429 memcpy(prepared, name, name_len);
435 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
440 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
443 memcpy(prepared, p2->start, p2->end - p2->start);
449 static int fs_path_add_from_extent_buffer(struct fs_path *p,
450 struct extent_buffer *eb,
451 unsigned long off, int len)
456 ret = fs_path_prepare_for_add(p, len, &prepared);
460 read_extent_buffer(eb, prepared, off, len);
466 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
470 p->reversed = from->reversed;
473 ret = fs_path_add_path(p, from);
479 static void fs_path_unreverse(struct fs_path *p)
488 len = p->end - p->start;
490 p->end = p->start + len;
491 memmove(p->start, tmp, len + 1);
495 static struct btrfs_path *alloc_path_for_send(void)
497 struct btrfs_path *path;
499 path = btrfs_alloc_path();
502 path->search_commit_root = 1;
503 path->skip_locking = 1;
504 path->need_commit_sem = 1;
508 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
518 ret = vfs_write(filp, (__force const char __user *)buf + pos,
520 /* TODO handle that correctly */
521 /*if (ret == -ERESTARTSYS) {
540 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
542 struct btrfs_tlv_header *hdr;
543 int total_len = sizeof(*hdr) + len;
544 int left = sctx->send_max_size - sctx->send_size;
546 if (unlikely(left < total_len))
549 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
550 hdr->tlv_type = cpu_to_le16(attr);
551 hdr->tlv_len = cpu_to_le16(len);
552 memcpy(hdr + 1, data, len);
553 sctx->send_size += total_len;
558 #define TLV_PUT_DEFINE_INT(bits) \
559 static int tlv_put_u##bits(struct send_ctx *sctx, \
560 u##bits attr, u##bits value) \
562 __le##bits __tmp = cpu_to_le##bits(value); \
563 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
566 TLV_PUT_DEFINE_INT(64)
568 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
569 const char *str, int len)
573 return tlv_put(sctx, attr, str, len);
576 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
579 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
582 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
583 struct extent_buffer *eb,
584 struct btrfs_timespec *ts)
586 struct btrfs_timespec bts;
587 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
588 return tlv_put(sctx, attr, &bts, sizeof(bts));
592 #define TLV_PUT(sctx, attrtype, attrlen, data) \
594 ret = tlv_put(sctx, attrtype, attrlen, data); \
596 goto tlv_put_failure; \
599 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
601 ret = tlv_put_u##bits(sctx, attrtype, value); \
603 goto tlv_put_failure; \
606 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
607 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
608 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
609 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
610 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
612 ret = tlv_put_string(sctx, attrtype, str, len); \
614 goto tlv_put_failure; \
616 #define TLV_PUT_PATH(sctx, attrtype, p) \
618 ret = tlv_put_string(sctx, attrtype, p->start, \
619 p->end - p->start); \
621 goto tlv_put_failure; \
623 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
625 ret = tlv_put_uuid(sctx, attrtype, uuid); \
627 goto tlv_put_failure; \
629 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
631 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
633 goto tlv_put_failure; \
636 static int send_header(struct send_ctx *sctx)
638 struct btrfs_stream_header hdr;
640 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
641 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
643 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
648 * For each command/item we want to send to userspace, we call this function.
650 static int begin_cmd(struct send_ctx *sctx, int cmd)
652 struct btrfs_cmd_header *hdr;
654 if (WARN_ON(!sctx->send_buf))
657 BUG_ON(sctx->send_size);
659 sctx->send_size += sizeof(*hdr);
660 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
661 hdr->cmd = cpu_to_le16(cmd);
666 static int send_cmd(struct send_ctx *sctx)
669 struct btrfs_cmd_header *hdr;
672 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
673 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
676 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
677 hdr->crc = cpu_to_le32(crc);
679 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
682 sctx->total_send_size += sctx->send_size;
683 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
690 * Sends a move instruction to user space
692 static int send_rename(struct send_ctx *sctx,
693 struct fs_path *from, struct fs_path *to)
697 verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
699 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
703 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
704 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
706 ret = send_cmd(sctx);
714 * Sends a link instruction to user space
716 static int send_link(struct send_ctx *sctx,
717 struct fs_path *path, struct fs_path *lnk)
721 verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
723 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
727 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
728 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
730 ret = send_cmd(sctx);
738 * Sends an unlink instruction to user space
740 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
744 verbose_printk("btrfs: send_unlink %s\n", path->start);
746 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
750 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
752 ret = send_cmd(sctx);
760 * Sends a rmdir instruction to user space
762 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
766 verbose_printk("btrfs: send_rmdir %s\n", path->start);
768 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
772 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
774 ret = send_cmd(sctx);
782 * Helper function to retrieve some fields from an inode item.
784 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
785 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
789 struct btrfs_inode_item *ii;
790 struct btrfs_key key;
793 key.type = BTRFS_INODE_ITEM_KEY;
795 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
802 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
803 struct btrfs_inode_item);
805 *size = btrfs_inode_size(path->nodes[0], ii);
807 *gen = btrfs_inode_generation(path->nodes[0], ii);
809 *mode = btrfs_inode_mode(path->nodes[0], ii);
811 *uid = btrfs_inode_uid(path->nodes[0], ii);
813 *gid = btrfs_inode_gid(path->nodes[0], ii);
815 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
820 static int get_inode_info(struct btrfs_root *root,
821 u64 ino, u64 *size, u64 *gen,
822 u64 *mode, u64 *uid, u64 *gid,
825 struct btrfs_path *path;
828 path = alloc_path_for_send();
831 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
833 btrfs_free_path(path);
837 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
842 * Helper function to iterate the entries in ONE btrfs_inode_ref or
843 * btrfs_inode_extref.
844 * The iterate callback may return a non zero value to stop iteration. This can
845 * be a negative value for error codes or 1 to simply stop it.
847 * path must point to the INODE_REF or INODE_EXTREF when called.
849 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
850 struct btrfs_key *found_key, int resolve,
851 iterate_inode_ref_t iterate, void *ctx)
853 struct extent_buffer *eb = path->nodes[0];
854 struct btrfs_item *item;
855 struct btrfs_inode_ref *iref;
856 struct btrfs_inode_extref *extref;
857 struct btrfs_path *tmp_path;
861 int slot = path->slots[0];
868 unsigned long name_off;
869 unsigned long elem_size;
872 p = fs_path_alloc_reversed();
876 tmp_path = alloc_path_for_send();
883 if (found_key->type == BTRFS_INODE_REF_KEY) {
884 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
885 struct btrfs_inode_ref);
886 item = btrfs_item_nr(slot);
887 total = btrfs_item_size(eb, item);
888 elem_size = sizeof(*iref);
890 ptr = btrfs_item_ptr_offset(eb, slot);
891 total = btrfs_item_size_nr(eb, slot);
892 elem_size = sizeof(*extref);
895 while (cur < total) {
898 if (found_key->type == BTRFS_INODE_REF_KEY) {
899 iref = (struct btrfs_inode_ref *)(ptr + cur);
900 name_len = btrfs_inode_ref_name_len(eb, iref);
901 name_off = (unsigned long)(iref + 1);
902 index = btrfs_inode_ref_index(eb, iref);
903 dir = found_key->offset;
905 extref = (struct btrfs_inode_extref *)(ptr + cur);
906 name_len = btrfs_inode_extref_name_len(eb, extref);
907 name_off = (unsigned long)&extref->name;
908 index = btrfs_inode_extref_index(eb, extref);
909 dir = btrfs_inode_extref_parent(eb, extref);
913 start = btrfs_ref_to_path(root, tmp_path, name_len,
917 ret = PTR_ERR(start);
920 if (start < p->buf) {
921 /* overflow , try again with larger buffer */
922 ret = fs_path_ensure_buf(p,
923 p->buf_len + p->buf - start);
926 start = btrfs_ref_to_path(root, tmp_path,
931 ret = PTR_ERR(start);
934 BUG_ON(start < p->buf);
938 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
944 cur += elem_size + name_len;
945 ret = iterate(num, dir, index, p, ctx);
952 btrfs_free_path(tmp_path);
957 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
958 const char *name, int name_len,
959 const char *data, int data_len,
963 * Helper function to iterate the entries in ONE btrfs_dir_item.
964 * The iterate callback may return a non zero value to stop iteration. This can
965 * be a negative value for error codes or 1 to simply stop it.
967 * path must point to the dir item when called.
969 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
970 struct btrfs_key *found_key,
971 iterate_dir_item_t iterate, void *ctx)
974 struct extent_buffer *eb;
975 struct btrfs_item *item;
976 struct btrfs_dir_item *di;
977 struct btrfs_key di_key;
990 * Start with a small buffer (1 page). If later we end up needing more
991 * space, which can happen for xattrs on a fs with a leaf size greater
992 * then the page size, attempt to increase the buffer. Typically xattr
996 buf = kmalloc(buf_len, GFP_NOFS);
1002 eb = path->nodes[0];
1003 slot = path->slots[0];
1004 item = btrfs_item_nr(slot);
1005 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1008 total = btrfs_item_size(eb, item);
1011 while (cur < total) {
1012 name_len = btrfs_dir_name_len(eb, di);
1013 data_len = btrfs_dir_data_len(eb, di);
1014 type = btrfs_dir_type(eb, di);
1015 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1017 if (type == BTRFS_FT_XATTR) {
1018 if (name_len > XATTR_NAME_MAX) {
1019 ret = -ENAMETOOLONG;
1022 if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
1030 if (name_len + data_len > PATH_MAX) {
1031 ret = -ENAMETOOLONG;
1036 if (name_len + data_len > buf_len) {
1037 buf_len = name_len + data_len;
1038 if (is_vmalloc_addr(buf)) {
1042 char *tmp = krealloc(buf, buf_len,
1043 GFP_NOFS | __GFP_NOWARN);
1050 buf = vmalloc(buf_len);
1058 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1059 name_len + data_len);
1061 len = sizeof(*di) + name_len + data_len;
1062 di = (struct btrfs_dir_item *)((char *)di + len);
1065 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1066 data_len, type, ctx);
1082 static int __copy_first_ref(int num, u64 dir, int index,
1083 struct fs_path *p, void *ctx)
1086 struct fs_path *pt = ctx;
1088 ret = fs_path_copy(pt, p);
1092 /* we want the first only */
1097 * Retrieve the first path of an inode. If an inode has more then one
1098 * ref/hardlink, this is ignored.
1100 static int get_inode_path(struct btrfs_root *root,
1101 u64 ino, struct fs_path *path)
1104 struct btrfs_key key, found_key;
1105 struct btrfs_path *p;
1107 p = alloc_path_for_send();
1111 fs_path_reset(path);
1114 key.type = BTRFS_INODE_REF_KEY;
1117 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1124 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1125 if (found_key.objectid != ino ||
1126 (found_key.type != BTRFS_INODE_REF_KEY &&
1127 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1132 ret = iterate_inode_ref(root, p, &found_key, 1,
1133 __copy_first_ref, path);
1143 struct backref_ctx {
1144 struct send_ctx *sctx;
1146 struct btrfs_path *path;
1147 /* number of total found references */
1151 * used for clones found in send_root. clones found behind cur_objectid
1152 * and cur_offset are not considered as allowed clones.
1157 /* may be truncated in case it's the last extent in a file */
1160 /* Just to check for bugs in backref resolving */
1164 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1166 u64 root = (u64)(uintptr_t)key;
1167 struct clone_root *cr = (struct clone_root *)elt;
1169 if (root < cr->root->objectid)
1171 if (root > cr->root->objectid)
1176 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1178 struct clone_root *cr1 = (struct clone_root *)e1;
1179 struct clone_root *cr2 = (struct clone_root *)e2;
1181 if (cr1->root->objectid < cr2->root->objectid)
1183 if (cr1->root->objectid > cr2->root->objectid)
1189 * Called for every backref that is found for the current extent.
1190 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1192 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1194 struct backref_ctx *bctx = ctx_;
1195 struct clone_root *found;
1199 /* First check if the root is in the list of accepted clone sources */
1200 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1201 bctx->sctx->clone_roots_cnt,
1202 sizeof(struct clone_root),
1203 __clone_root_cmp_bsearch);
1207 if (found->root == bctx->sctx->send_root &&
1208 ino == bctx->cur_objectid &&
1209 offset == bctx->cur_offset) {
1210 bctx->found_itself = 1;
1214 * There are inodes that have extents that lie behind its i_size. Don't
1215 * accept clones from these extents.
1217 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
1219 btrfs_release_path(bctx->path);
1223 if (offset + bctx->extent_len > i_size)
1227 * Make sure we don't consider clones from send_root that are
1228 * behind the current inode/offset.
1230 if (found->root == bctx->sctx->send_root) {
1232 * TODO for the moment we don't accept clones from the inode
1233 * that is currently send. We may change this when
1234 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1237 if (ino >= bctx->cur_objectid)
1240 if (ino > bctx->cur_objectid)
1242 if (offset + bctx->extent_len > bctx->cur_offset)
1248 found->found_refs++;
1249 if (ino < found->ino) {
1251 found->offset = offset;
1252 } else if (found->ino == ino) {
1254 * same extent found more then once in the same file.
1256 if (found->offset > offset + bctx->extent_len)
1257 found->offset = offset;
1264 * Given an inode, offset and extent item, it finds a good clone for a clone
1265 * instruction. Returns -ENOENT when none could be found. The function makes
1266 * sure that the returned clone is usable at the point where sending is at the
1267 * moment. This means, that no clones are accepted which lie behind the current
1270 * path must point to the extent item when called.
1272 static int find_extent_clone(struct send_ctx *sctx,
1273 struct btrfs_path *path,
1274 u64 ino, u64 data_offset,
1276 struct clone_root **found)
1283 u64 extent_item_pos;
1285 struct btrfs_file_extent_item *fi;
1286 struct extent_buffer *eb = path->nodes[0];
1287 struct backref_ctx *backref_ctx = NULL;
1288 struct clone_root *cur_clone_root;
1289 struct btrfs_key found_key;
1290 struct btrfs_path *tmp_path;
1294 tmp_path = alloc_path_for_send();
1298 /* We only use this path under the commit sem */
1299 tmp_path->need_commit_sem = 0;
1301 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
1307 backref_ctx->path = tmp_path;
1309 if (data_offset >= ino_size) {
1311 * There may be extents that lie behind the file's size.
1312 * I at least had this in combination with snapshotting while
1313 * writing large files.
1319 fi = btrfs_item_ptr(eb, path->slots[0],
1320 struct btrfs_file_extent_item);
1321 extent_type = btrfs_file_extent_type(eb, fi);
1322 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1326 compressed = btrfs_file_extent_compression(eb, fi);
1328 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1329 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1330 if (disk_byte == 0) {
1334 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1336 down_read(&sctx->send_root->fs_info->commit_root_sem);
1337 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
1338 &found_key, &flags);
1339 up_read(&sctx->send_root->fs_info->commit_root_sem);
1340 btrfs_release_path(tmp_path);
1344 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1350 * Setup the clone roots.
1352 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1353 cur_clone_root = sctx->clone_roots + i;
1354 cur_clone_root->ino = (u64)-1;
1355 cur_clone_root->offset = 0;
1356 cur_clone_root->found_refs = 0;
1359 backref_ctx->sctx = sctx;
1360 backref_ctx->found = 0;
1361 backref_ctx->cur_objectid = ino;
1362 backref_ctx->cur_offset = data_offset;
1363 backref_ctx->found_itself = 0;
1364 backref_ctx->extent_len = num_bytes;
1367 * The last extent of a file may be too large due to page alignment.
1368 * We need to adjust extent_len in this case so that the checks in
1369 * __iterate_backrefs work.
1371 if (data_offset + num_bytes >= ino_size)
1372 backref_ctx->extent_len = ino_size - data_offset;
1375 * Now collect all backrefs.
1377 if (compressed == BTRFS_COMPRESS_NONE)
1378 extent_item_pos = logical - found_key.objectid;
1380 extent_item_pos = 0;
1381 ret = iterate_extent_inodes(sctx->send_root->fs_info,
1382 found_key.objectid, extent_item_pos, 1,
1383 __iterate_backrefs, backref_ctx);
1388 if (!backref_ctx->found_itself) {
1389 /* found a bug in backref code? */
1391 btrfs_err(sctx->send_root->fs_info, "did not find backref in "
1392 "send_root. inode=%llu, offset=%llu, "
1393 "disk_byte=%llu found extent=%llu",
1394 ino, data_offset, disk_byte, found_key.objectid);
1398 verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
1400 "num_bytes=%llu, logical=%llu\n",
1401 data_offset, ino, num_bytes, logical);
1403 if (!backref_ctx->found)
1404 verbose_printk("btrfs: no clones found\n");
1406 cur_clone_root = NULL;
1407 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1408 if (sctx->clone_roots[i].found_refs) {
1409 if (!cur_clone_root)
1410 cur_clone_root = sctx->clone_roots + i;
1411 else if (sctx->clone_roots[i].root == sctx->send_root)
1412 /* prefer clones from send_root over others */
1413 cur_clone_root = sctx->clone_roots + i;
1418 if (cur_clone_root) {
1419 if (compressed != BTRFS_COMPRESS_NONE) {
1421 * Offsets given by iterate_extent_inodes() are relative
1422 * to the start of the extent, we need to add logical
1423 * offset from the file extent item.
1424 * (See why at backref.c:check_extent_in_eb())
1426 cur_clone_root->offset += btrfs_file_extent_offset(eb,
1429 *found = cur_clone_root;
1436 btrfs_free_path(tmp_path);
1441 static int read_symlink(struct btrfs_root *root,
1443 struct fs_path *dest)
1446 struct btrfs_path *path;
1447 struct btrfs_key key;
1448 struct btrfs_file_extent_item *ei;
1454 path = alloc_path_for_send();
1459 key.type = BTRFS_EXTENT_DATA_KEY;
1461 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1466 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1467 struct btrfs_file_extent_item);
1468 type = btrfs_file_extent_type(path->nodes[0], ei);
1469 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1470 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1471 BUG_ON(compression);
1473 off = btrfs_file_extent_inline_start(ei);
1474 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
1476 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1479 btrfs_free_path(path);
1484 * Helper function to generate a file name that is unique in the root of
1485 * send_root and parent_root. This is used to generate names for orphan inodes.
1487 static int gen_unique_name(struct send_ctx *sctx,
1489 struct fs_path *dest)
1492 struct btrfs_path *path;
1493 struct btrfs_dir_item *di;
1498 path = alloc_path_for_send();
1503 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1505 ASSERT(len < sizeof(tmp));
1507 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1508 path, BTRFS_FIRST_FREE_OBJECTID,
1509 tmp, strlen(tmp), 0);
1510 btrfs_release_path(path);
1516 /* not unique, try again */
1521 if (!sctx->parent_root) {
1527 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1528 path, BTRFS_FIRST_FREE_OBJECTID,
1529 tmp, strlen(tmp), 0);
1530 btrfs_release_path(path);
1536 /* not unique, try again */
1544 ret = fs_path_add(dest, tmp, strlen(tmp));
1547 btrfs_free_path(path);
1552 inode_state_no_change,
1553 inode_state_will_create,
1554 inode_state_did_create,
1555 inode_state_will_delete,
1556 inode_state_did_delete,
1559 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1567 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1569 if (ret < 0 && ret != -ENOENT)
1573 if (!sctx->parent_root) {
1574 right_ret = -ENOENT;
1576 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1577 NULL, NULL, NULL, NULL);
1578 if (ret < 0 && ret != -ENOENT)
1583 if (!left_ret && !right_ret) {
1584 if (left_gen == gen && right_gen == gen) {
1585 ret = inode_state_no_change;
1586 } else if (left_gen == gen) {
1587 if (ino < sctx->send_progress)
1588 ret = inode_state_did_create;
1590 ret = inode_state_will_create;
1591 } else if (right_gen == gen) {
1592 if (ino < sctx->send_progress)
1593 ret = inode_state_did_delete;
1595 ret = inode_state_will_delete;
1599 } else if (!left_ret) {
1600 if (left_gen == gen) {
1601 if (ino < sctx->send_progress)
1602 ret = inode_state_did_create;
1604 ret = inode_state_will_create;
1608 } else if (!right_ret) {
1609 if (right_gen == gen) {
1610 if (ino < sctx->send_progress)
1611 ret = inode_state_did_delete;
1613 ret = inode_state_will_delete;
1625 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1629 ret = get_cur_inode_state(sctx, ino, gen);
1633 if (ret == inode_state_no_change ||
1634 ret == inode_state_did_create ||
1635 ret == inode_state_will_delete)
1645 * Helper function to lookup a dir item in a dir.
1647 static int lookup_dir_item_inode(struct btrfs_root *root,
1648 u64 dir, const char *name, int name_len,
1653 struct btrfs_dir_item *di;
1654 struct btrfs_key key;
1655 struct btrfs_path *path;
1657 path = alloc_path_for_send();
1661 di = btrfs_lookup_dir_item(NULL, root, path,
1662 dir, name, name_len, 0);
1671 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1672 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1676 *found_inode = key.objectid;
1677 *found_type = btrfs_dir_type(path->nodes[0], di);
1680 btrfs_free_path(path);
1685 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1686 * generation of the parent dir and the name of the dir entry.
1688 static int get_first_ref(struct btrfs_root *root, u64 ino,
1689 u64 *dir, u64 *dir_gen, struct fs_path *name)
1692 struct btrfs_key key;
1693 struct btrfs_key found_key;
1694 struct btrfs_path *path;
1698 path = alloc_path_for_send();
1703 key.type = BTRFS_INODE_REF_KEY;
1706 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1710 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1712 if (ret || found_key.objectid != ino ||
1713 (found_key.type != BTRFS_INODE_REF_KEY &&
1714 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1719 if (found_key.type == BTRFS_INODE_REF_KEY) {
1720 struct btrfs_inode_ref *iref;
1721 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1722 struct btrfs_inode_ref);
1723 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1724 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1725 (unsigned long)(iref + 1),
1727 parent_dir = found_key.offset;
1729 struct btrfs_inode_extref *extref;
1730 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1731 struct btrfs_inode_extref);
1732 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1733 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1734 (unsigned long)&extref->name, len);
1735 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1739 btrfs_release_path(path);
1742 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1751 btrfs_free_path(path);
1755 static int is_first_ref(struct btrfs_root *root,
1757 const char *name, int name_len)
1760 struct fs_path *tmp_name;
1763 tmp_name = fs_path_alloc();
1767 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1771 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1776 ret = !memcmp(tmp_name->start, name, name_len);
1779 fs_path_free(tmp_name);
1784 * Used by process_recorded_refs to determine if a new ref would overwrite an
1785 * already existing ref. In case it detects an overwrite, it returns the
1786 * inode/gen in who_ino/who_gen.
1787 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1788 * to make sure later references to the overwritten inode are possible.
1789 * Orphanizing is however only required for the first ref of an inode.
1790 * process_recorded_refs does an additional is_first_ref check to see if
1791 * orphanizing is really required.
1793 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1794 const char *name, int name_len,
1795 u64 *who_ino, u64 *who_gen)
1799 u64 other_inode = 0;
1802 if (!sctx->parent_root)
1805 ret = is_inode_existent(sctx, dir, dir_gen);
1810 * If we have a parent root we need to verify that the parent dir was
1811 * not delted and then re-created, if it was then we have no overwrite
1812 * and we can just unlink this entry.
1814 if (sctx->parent_root) {
1815 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1817 if (ret < 0 && ret != -ENOENT)
1827 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1828 &other_inode, &other_type);
1829 if (ret < 0 && ret != -ENOENT)
1837 * Check if the overwritten ref was already processed. If yes, the ref
1838 * was already unlinked/moved, so we can safely assume that we will not
1839 * overwrite anything at this point in time.
1841 if (other_inode > sctx->send_progress) {
1842 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1843 who_gen, NULL, NULL, NULL, NULL);
1848 *who_ino = other_inode;
1858 * Checks if the ref was overwritten by an already processed inode. This is
1859 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1860 * thus the orphan name needs be used.
1861 * process_recorded_refs also uses it to avoid unlinking of refs that were
1864 static int did_overwrite_ref(struct send_ctx *sctx,
1865 u64 dir, u64 dir_gen,
1866 u64 ino, u64 ino_gen,
1867 const char *name, int name_len)
1874 if (!sctx->parent_root)
1877 ret = is_inode_existent(sctx, dir, dir_gen);
1881 /* check if the ref was overwritten by another ref */
1882 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1883 &ow_inode, &other_type);
1884 if (ret < 0 && ret != -ENOENT)
1887 /* was never and will never be overwritten */
1892 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1897 if (ow_inode == ino && gen == ino_gen) {
1902 /* we know that it is or will be overwritten. check this now */
1903 if (ow_inode < sctx->send_progress)
1913 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1914 * that got overwritten. This is used by process_recorded_refs to determine
1915 * if it has to use the path as returned by get_cur_path or the orphan name.
1917 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1920 struct fs_path *name = NULL;
1924 if (!sctx->parent_root)
1927 name = fs_path_alloc();
1931 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
1935 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1936 name->start, fs_path_len(name));
1944 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1945 * so we need to do some special handling in case we have clashes. This function
1946 * takes care of this with the help of name_cache_entry::radix_list.
1947 * In case of error, nce is kfreed.
1949 static int name_cache_insert(struct send_ctx *sctx,
1950 struct name_cache_entry *nce)
1953 struct list_head *nce_head;
1955 nce_head = radix_tree_lookup(&sctx->name_cache,
1956 (unsigned long)nce->ino);
1958 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
1963 INIT_LIST_HEAD(nce_head);
1965 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
1972 list_add_tail(&nce->radix_list, nce_head);
1973 list_add_tail(&nce->list, &sctx->name_cache_list);
1974 sctx->name_cache_size++;
1979 static void name_cache_delete(struct send_ctx *sctx,
1980 struct name_cache_entry *nce)
1982 struct list_head *nce_head;
1984 nce_head = radix_tree_lookup(&sctx->name_cache,
1985 (unsigned long)nce->ino);
1987 btrfs_err(sctx->send_root->fs_info,
1988 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
1989 nce->ino, sctx->name_cache_size);
1992 list_del(&nce->radix_list);
1993 list_del(&nce->list);
1994 sctx->name_cache_size--;
1997 * We may not get to the final release of nce_head if the lookup fails
1999 if (nce_head && list_empty(nce_head)) {
2000 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2005 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2008 struct list_head *nce_head;
2009 struct name_cache_entry *cur;
2011 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2015 list_for_each_entry(cur, nce_head, radix_list) {
2016 if (cur->ino == ino && cur->gen == gen)
2023 * Removes the entry from the list and adds it back to the end. This marks the
2024 * entry as recently used so that name_cache_clean_unused does not remove it.
2026 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
2028 list_del(&nce->list);
2029 list_add_tail(&nce->list, &sctx->name_cache_list);
2033 * Remove some entries from the beginning of name_cache_list.
2035 static void name_cache_clean_unused(struct send_ctx *sctx)
2037 struct name_cache_entry *nce;
2039 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2042 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2043 nce = list_entry(sctx->name_cache_list.next,
2044 struct name_cache_entry, list);
2045 name_cache_delete(sctx, nce);
2050 static void name_cache_free(struct send_ctx *sctx)
2052 struct name_cache_entry *nce;
2054 while (!list_empty(&sctx->name_cache_list)) {
2055 nce = list_entry(sctx->name_cache_list.next,
2056 struct name_cache_entry, list);
2057 name_cache_delete(sctx, nce);
2063 * Used by get_cur_path for each ref up to the root.
2064 * Returns 0 if it succeeded.
2065 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2066 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2067 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2068 * Returns <0 in case of error.
2070 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2074 struct fs_path *dest)
2078 struct name_cache_entry *nce = NULL;
2081 * First check if we already did a call to this function with the same
2082 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2083 * return the cached result.
2085 nce = name_cache_search(sctx, ino, gen);
2087 if (ino < sctx->send_progress && nce->need_later_update) {
2088 name_cache_delete(sctx, nce);
2092 name_cache_used(sctx, nce);
2093 *parent_ino = nce->parent_ino;
2094 *parent_gen = nce->parent_gen;
2095 ret = fs_path_add(dest, nce->name, nce->name_len);
2104 * If the inode is not existent yet, add the orphan name and return 1.
2105 * This should only happen for the parent dir that we determine in
2108 ret = is_inode_existent(sctx, ino, gen);
2113 ret = gen_unique_name(sctx, ino, gen, dest);
2121 * Depending on whether the inode was already processed or not, use
2122 * send_root or parent_root for ref lookup.
2124 if (ino < sctx->send_progress)
2125 ret = get_first_ref(sctx->send_root, ino,
2126 parent_ino, parent_gen, dest);
2128 ret = get_first_ref(sctx->parent_root, ino,
2129 parent_ino, parent_gen, dest);
2134 * Check if the ref was overwritten by an inode's ref that was processed
2135 * earlier. If yes, treat as orphan and return 1.
2137 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2138 dest->start, dest->end - dest->start);
2142 fs_path_reset(dest);
2143 ret = gen_unique_name(sctx, ino, gen, dest);
2151 * Store the result of the lookup in the name cache.
2153 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
2161 nce->parent_ino = *parent_ino;
2162 nce->parent_gen = *parent_gen;
2163 nce->name_len = fs_path_len(dest);
2165 strcpy(nce->name, dest->start);
2167 if (ino < sctx->send_progress)
2168 nce->need_later_update = 0;
2170 nce->need_later_update = 1;
2172 nce_ret = name_cache_insert(sctx, nce);
2175 name_cache_clean_unused(sctx);
2182 * Magic happens here. This function returns the first ref to an inode as it
2183 * would look like while receiving the stream at this point in time.
2184 * We walk the path up to the root. For every inode in between, we check if it
2185 * was already processed/sent. If yes, we continue with the parent as found
2186 * in send_root. If not, we continue with the parent as found in parent_root.
2187 * If we encounter an inode that was deleted at this point in time, we use the
2188 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2189 * that were not created yet and overwritten inodes/refs.
2191 * When do we have have orphan inodes:
2192 * 1. When an inode is freshly created and thus no valid refs are available yet
2193 * 2. When a directory lost all it's refs (deleted) but still has dir items
2194 * inside which were not processed yet (pending for move/delete). If anyone
2195 * tried to get the path to the dir items, it would get a path inside that
2197 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2198 * of an unprocessed inode. If in that case the first ref would be
2199 * overwritten, the overwritten inode gets "orphanized". Later when we
2200 * process this overwritten inode, it is restored at a new place by moving
2203 * sctx->send_progress tells this function at which point in time receiving
2206 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2207 struct fs_path *dest)
2210 struct fs_path *name = NULL;
2211 u64 parent_inode = 0;
2215 name = fs_path_alloc();
2222 fs_path_reset(dest);
2224 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2225 fs_path_reset(name);
2227 if (is_waiting_for_rm(sctx, ino)) {
2228 ret = gen_unique_name(sctx, ino, gen, name);
2231 ret = fs_path_add_path(dest, name);
2235 if (is_waiting_for_move(sctx, ino)) {
2236 ret = get_first_ref(sctx->parent_root, ino,
2237 &parent_inode, &parent_gen, name);
2239 ret = __get_cur_name_and_parent(sctx, ino, gen,
2249 ret = fs_path_add_path(dest, name);
2260 fs_path_unreverse(dest);
2265 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2267 static int send_subvol_begin(struct send_ctx *sctx)
2270 struct btrfs_root *send_root = sctx->send_root;
2271 struct btrfs_root *parent_root = sctx->parent_root;
2272 struct btrfs_path *path;
2273 struct btrfs_key key;
2274 struct btrfs_root_ref *ref;
2275 struct extent_buffer *leaf;
2279 path = btrfs_alloc_path();
2283 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
2285 btrfs_free_path(path);
2289 key.objectid = send_root->objectid;
2290 key.type = BTRFS_ROOT_BACKREF_KEY;
2293 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2302 leaf = path->nodes[0];
2303 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2304 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2305 key.objectid != send_root->objectid) {
2309 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2310 namelen = btrfs_root_ref_name_len(leaf, ref);
2311 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2312 btrfs_release_path(path);
2315 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2319 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2324 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2325 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2326 sctx->send_root->root_item.uuid);
2327 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2328 le64_to_cpu(sctx->send_root->root_item.ctransid));
2330 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2331 sctx->parent_root->root_item.uuid);
2332 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2333 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2336 ret = send_cmd(sctx);
2340 btrfs_free_path(path);
2345 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2350 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
2352 p = fs_path_alloc();
2356 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2360 ret = get_cur_path(sctx, ino, gen, p);
2363 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2364 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2366 ret = send_cmd(sctx);
2374 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2379 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
2381 p = fs_path_alloc();
2385 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2389 ret = get_cur_path(sctx, ino, gen, p);
2392 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2393 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2395 ret = send_cmd(sctx);
2403 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2408 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
2410 p = fs_path_alloc();
2414 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2418 ret = get_cur_path(sctx, ino, gen, p);
2421 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2422 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2423 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2425 ret = send_cmd(sctx);
2433 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2436 struct fs_path *p = NULL;
2437 struct btrfs_inode_item *ii;
2438 struct btrfs_path *path = NULL;
2439 struct extent_buffer *eb;
2440 struct btrfs_key key;
2443 verbose_printk("btrfs: send_utimes %llu\n", ino);
2445 p = fs_path_alloc();
2449 path = alloc_path_for_send();
2456 key.type = BTRFS_INODE_ITEM_KEY;
2458 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2462 eb = path->nodes[0];
2463 slot = path->slots[0];
2464 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2466 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2470 ret = get_cur_path(sctx, ino, gen, p);
2473 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2474 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2475 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2476 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2477 /* TODO Add otime support when the otime patches get into upstream */
2479 ret = send_cmd(sctx);
2484 btrfs_free_path(path);
2489 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2490 * a valid path yet because we did not process the refs yet. So, the inode
2491 * is created as orphan.
2493 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2502 verbose_printk("btrfs: send_create_inode %llu\n", ino);
2504 p = fs_path_alloc();
2508 if (ino != sctx->cur_ino) {
2509 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2514 gen = sctx->cur_inode_gen;
2515 mode = sctx->cur_inode_mode;
2516 rdev = sctx->cur_inode_rdev;
2519 if (S_ISREG(mode)) {
2520 cmd = BTRFS_SEND_C_MKFILE;
2521 } else if (S_ISDIR(mode)) {
2522 cmd = BTRFS_SEND_C_MKDIR;
2523 } else if (S_ISLNK(mode)) {
2524 cmd = BTRFS_SEND_C_SYMLINK;
2525 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2526 cmd = BTRFS_SEND_C_MKNOD;
2527 } else if (S_ISFIFO(mode)) {
2528 cmd = BTRFS_SEND_C_MKFIFO;
2529 } else if (S_ISSOCK(mode)) {
2530 cmd = BTRFS_SEND_C_MKSOCK;
2532 printk(KERN_WARNING "btrfs: unexpected inode type %o",
2533 (int)(mode & S_IFMT));
2538 ret = begin_cmd(sctx, cmd);
2542 ret = gen_unique_name(sctx, ino, gen, p);
2546 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2547 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2549 if (S_ISLNK(mode)) {
2551 ret = read_symlink(sctx->send_root, ino, p);
2554 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2555 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2556 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2557 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2558 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2561 ret = send_cmd(sctx);
2573 * We need some special handling for inodes that get processed before the parent
2574 * directory got created. See process_recorded_refs for details.
2575 * This function does the check if we already created the dir out of order.
2577 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2580 struct btrfs_path *path = NULL;
2581 struct btrfs_key key;
2582 struct btrfs_key found_key;
2583 struct btrfs_key di_key;
2584 struct extent_buffer *eb;
2585 struct btrfs_dir_item *di;
2588 path = alloc_path_for_send();
2595 key.type = BTRFS_DIR_INDEX_KEY;
2597 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2602 eb = path->nodes[0];
2603 slot = path->slots[0];
2604 if (slot >= btrfs_header_nritems(eb)) {
2605 ret = btrfs_next_leaf(sctx->send_root, path);
2608 } else if (ret > 0) {
2615 btrfs_item_key_to_cpu(eb, &found_key, slot);
2616 if (found_key.objectid != key.objectid ||
2617 found_key.type != key.type) {
2622 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2623 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2625 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2626 di_key.objectid < sctx->send_progress) {
2635 btrfs_free_path(path);
2640 * Only creates the inode if it is:
2641 * 1. Not a directory
2642 * 2. Or a directory which was not created already due to out of order
2643 * directories. See did_create_dir and process_recorded_refs for details.
2645 static int send_create_inode_if_needed(struct send_ctx *sctx)
2649 if (S_ISDIR(sctx->cur_inode_mode)) {
2650 ret = did_create_dir(sctx, sctx->cur_ino);
2659 ret = send_create_inode(sctx, sctx->cur_ino);
2667 struct recorded_ref {
2668 struct list_head list;
2671 struct fs_path *full_path;
2679 * We need to process new refs before deleted refs, but compare_tree gives us
2680 * everything mixed. So we first record all refs and later process them.
2681 * This function is a helper to record one ref.
2683 static int __record_ref(struct list_head *head, u64 dir,
2684 u64 dir_gen, struct fs_path *path)
2686 struct recorded_ref *ref;
2688 ref = kmalloc(sizeof(*ref), GFP_NOFS);
2693 ref->dir_gen = dir_gen;
2694 ref->full_path = path;
2696 ref->name = (char *)kbasename(ref->full_path->start);
2697 ref->name_len = ref->full_path->end - ref->name;
2698 ref->dir_path = ref->full_path->start;
2699 if (ref->name == ref->full_path->start)
2700 ref->dir_path_len = 0;
2702 ref->dir_path_len = ref->full_path->end -
2703 ref->full_path->start - 1 - ref->name_len;
2705 list_add_tail(&ref->list, head);
2709 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2711 struct recorded_ref *new;
2713 new = kmalloc(sizeof(*ref), GFP_NOFS);
2717 new->dir = ref->dir;
2718 new->dir_gen = ref->dir_gen;
2719 new->full_path = NULL;
2720 INIT_LIST_HEAD(&new->list);
2721 list_add_tail(&new->list, list);
2725 static void __free_recorded_refs(struct list_head *head)
2727 struct recorded_ref *cur;
2729 while (!list_empty(head)) {
2730 cur = list_entry(head->next, struct recorded_ref, list);
2731 fs_path_free(cur->full_path);
2732 list_del(&cur->list);
2737 static void free_recorded_refs(struct send_ctx *sctx)
2739 __free_recorded_refs(&sctx->new_refs);
2740 __free_recorded_refs(&sctx->deleted_refs);
2744 * Renames/moves a file/dir to its orphan name. Used when the first
2745 * ref of an unprocessed inode gets overwritten and for all non empty
2748 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2749 struct fs_path *path)
2752 struct fs_path *orphan;
2754 orphan = fs_path_alloc();
2758 ret = gen_unique_name(sctx, ino, gen, orphan);
2762 ret = send_rename(sctx, path, orphan);
2765 fs_path_free(orphan);
2769 static struct orphan_dir_info *
2770 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2772 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2773 struct rb_node *parent = NULL;
2774 struct orphan_dir_info *entry, *odi;
2776 odi = kmalloc(sizeof(*odi), GFP_NOFS);
2778 return ERR_PTR(-ENOMEM);
2784 entry = rb_entry(parent, struct orphan_dir_info, node);
2785 if (dir_ino < entry->ino) {
2787 } else if (dir_ino > entry->ino) {
2788 p = &(*p)->rb_right;
2795 rb_link_node(&odi->node, parent, p);
2796 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2800 static struct orphan_dir_info *
2801 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2803 struct rb_node *n = sctx->orphan_dirs.rb_node;
2804 struct orphan_dir_info *entry;
2807 entry = rb_entry(n, struct orphan_dir_info, node);
2808 if (dir_ino < entry->ino)
2810 else if (dir_ino > entry->ino)
2818 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
2820 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
2825 static void free_orphan_dir_info(struct send_ctx *sctx,
2826 struct orphan_dir_info *odi)
2830 rb_erase(&odi->node, &sctx->orphan_dirs);
2835 * Returns 1 if a directory can be removed at this point in time.
2836 * We check this by iterating all dir items and checking if the inode behind
2837 * the dir item was already processed.
2839 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2843 struct btrfs_root *root = sctx->parent_root;
2844 struct btrfs_path *path;
2845 struct btrfs_key key;
2846 struct btrfs_key found_key;
2847 struct btrfs_key loc;
2848 struct btrfs_dir_item *di;
2851 * Don't try to rmdir the top/root subvolume dir.
2853 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2856 path = alloc_path_for_send();
2861 key.type = BTRFS_DIR_INDEX_KEY;
2863 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2868 struct waiting_dir_move *dm;
2870 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2871 ret = btrfs_next_leaf(root, path);
2878 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2880 if (found_key.objectid != key.objectid ||
2881 found_key.type != key.type)
2884 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2885 struct btrfs_dir_item);
2886 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2888 dm = get_waiting_dir_move(sctx, loc.objectid);
2890 struct orphan_dir_info *odi;
2892 odi = add_orphan_dir_info(sctx, dir);
2898 dm->rmdir_ino = dir;
2903 if (loc.objectid > send_progress) {
2914 btrfs_free_path(path);
2918 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
2920 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
2922 return entry != NULL;
2925 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2927 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
2928 struct rb_node *parent = NULL;
2929 struct waiting_dir_move *entry, *dm;
2931 dm = kmalloc(sizeof(*dm), GFP_NOFS);
2939 entry = rb_entry(parent, struct waiting_dir_move, node);
2940 if (ino < entry->ino) {
2942 } else if (ino > entry->ino) {
2943 p = &(*p)->rb_right;
2950 rb_link_node(&dm->node, parent, p);
2951 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
2955 static struct waiting_dir_move *
2956 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2958 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
2959 struct waiting_dir_move *entry;
2962 entry = rb_entry(n, struct waiting_dir_move, node);
2963 if (ino < entry->ino)
2965 else if (ino > entry->ino)
2973 static void free_waiting_dir_move(struct send_ctx *sctx,
2974 struct waiting_dir_move *dm)
2978 rb_erase(&dm->node, &sctx->waiting_dir_moves);
2982 static int add_pending_dir_move(struct send_ctx *sctx,
2986 struct list_head *new_refs,
2987 struct list_head *deleted_refs)
2989 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
2990 struct rb_node *parent = NULL;
2991 struct pending_dir_move *entry = NULL, *pm;
2992 struct recorded_ref *cur;
2996 pm = kmalloc(sizeof(*pm), GFP_NOFS);
2999 pm->parent_ino = parent_ino;
3002 INIT_LIST_HEAD(&pm->list);
3003 INIT_LIST_HEAD(&pm->update_refs);
3004 RB_CLEAR_NODE(&pm->node);
3008 entry = rb_entry(parent, struct pending_dir_move, node);
3009 if (parent_ino < entry->parent_ino) {
3011 } else if (parent_ino > entry->parent_ino) {
3012 p = &(*p)->rb_right;
3019 list_for_each_entry(cur, deleted_refs, list) {
3020 ret = dup_ref(cur, &pm->update_refs);
3024 list_for_each_entry(cur, new_refs, list) {
3025 ret = dup_ref(cur, &pm->update_refs);
3030 ret = add_waiting_dir_move(sctx, pm->ino);
3035 list_add_tail(&pm->list, &entry->list);
3037 rb_link_node(&pm->node, parent, p);
3038 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3043 __free_recorded_refs(&pm->update_refs);
3049 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3052 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3053 struct pending_dir_move *entry;
3056 entry = rb_entry(n, struct pending_dir_move, node);
3057 if (parent_ino < entry->parent_ino)
3059 else if (parent_ino > entry->parent_ino)
3067 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3068 u64 ino, u64 gen, u64 *ancestor_ino)
3071 u64 parent_inode = 0;
3073 u64 start_ino = ino;
3076 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3077 fs_path_reset(name);
3079 if (is_waiting_for_rm(sctx, ino))
3081 if (is_waiting_for_move(sctx, ino)) {
3082 if (*ancestor_ino == 0)
3083 *ancestor_ino = ino;
3084 ret = get_first_ref(sctx->parent_root, ino,
3085 &parent_inode, &parent_gen, name);
3087 ret = __get_cur_name_and_parent(sctx, ino, gen,
3097 if (parent_inode == start_ino) {
3099 if (*ancestor_ino == 0)
3100 *ancestor_ino = ino;
3109 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3111 struct fs_path *from_path = NULL;
3112 struct fs_path *to_path = NULL;
3113 struct fs_path *name = NULL;
3114 u64 orig_progress = sctx->send_progress;
3115 struct recorded_ref *cur;
3116 u64 parent_ino, parent_gen;
3117 struct waiting_dir_move *dm = NULL;
3122 name = fs_path_alloc();
3123 from_path = fs_path_alloc();
3124 if (!name || !from_path) {
3129 dm = get_waiting_dir_move(sctx, pm->ino);
3131 rmdir_ino = dm->rmdir_ino;
3132 free_waiting_dir_move(sctx, dm);
3134 ret = get_first_ref(sctx->parent_root, pm->ino,
3135 &parent_ino, &parent_gen, name);
3139 ret = get_cur_path(sctx, parent_ino, parent_gen,
3143 ret = fs_path_add_path(from_path, name);
3147 sctx->send_progress = sctx->cur_ino + 1;
3148 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3150 LIST_HEAD(deleted_refs);
3151 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3152 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3153 &pm->update_refs, &deleted_refs);
3157 dm = get_waiting_dir_move(sctx, pm->ino);
3159 dm->rmdir_ino = rmdir_ino;
3163 fs_path_reset(name);
3166 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3170 ret = send_rename(sctx, from_path, to_path);
3175 struct orphan_dir_info *odi;
3177 odi = get_orphan_dir_info(sctx, rmdir_ino);
3179 /* already deleted */
3182 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1);
3188 name = fs_path_alloc();
3193 ret = get_cur_path(sctx, rmdir_ino, odi->gen, name);
3196 ret = send_rmdir(sctx, name);
3199 free_orphan_dir_info(sctx, odi);
3203 ret = send_utimes(sctx, pm->ino, pm->gen);
3208 * After rename/move, need to update the utimes of both new parent(s)
3209 * and old parent(s).
3211 list_for_each_entry(cur, &pm->update_refs, list) {
3212 if (cur->dir == rmdir_ino)
3214 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3221 fs_path_free(from_path);
3222 fs_path_free(to_path);
3223 sctx->send_progress = orig_progress;
3228 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3230 if (!list_empty(&m->list))
3232 if (!RB_EMPTY_NODE(&m->node))
3233 rb_erase(&m->node, &sctx->pending_dir_moves);
3234 __free_recorded_refs(&m->update_refs);
3238 static void tail_append_pending_moves(struct pending_dir_move *moves,
3239 struct list_head *stack)
3241 if (list_empty(&moves->list)) {
3242 list_add_tail(&moves->list, stack);
3245 list_splice_init(&moves->list, &list);
3246 list_add_tail(&moves->list, stack);
3247 list_splice_tail(&list, stack);
3251 static int apply_children_dir_moves(struct send_ctx *sctx)
3253 struct pending_dir_move *pm;
3254 struct list_head stack;
3255 u64 parent_ino = sctx->cur_ino;
3258 pm = get_pending_dir_moves(sctx, parent_ino);
3262 INIT_LIST_HEAD(&stack);
3263 tail_append_pending_moves(pm, &stack);
3265 while (!list_empty(&stack)) {
3266 pm = list_first_entry(&stack, struct pending_dir_move, list);
3267 parent_ino = pm->ino;
3268 ret = apply_dir_move(sctx, pm);
3269 free_pending_move(sctx, pm);
3272 pm = get_pending_dir_moves(sctx, parent_ino);
3274 tail_append_pending_moves(pm, &stack);
3279 while (!list_empty(&stack)) {
3280 pm = list_first_entry(&stack, struct pending_dir_move, list);
3281 free_pending_move(sctx, pm);
3286 static int wait_for_parent_move(struct send_ctx *sctx,
3287 struct recorded_ref *parent_ref)
3290 u64 ino = parent_ref->dir;
3291 u64 parent_ino_before, parent_ino_after;
3292 struct fs_path *path_before = NULL;
3293 struct fs_path *path_after = NULL;
3296 path_after = fs_path_alloc();
3297 path_before = fs_path_alloc();
3298 if (!path_after || !path_before) {
3304 * Our current directory inode may not yet be renamed/moved because some
3305 * ancestor (immediate or not) has to be renamed/moved first. So find if
3306 * such ancestor exists and make sure our own rename/move happens after
3307 * that ancestor is processed.
3309 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3310 if (is_waiting_for_move(sctx, ino)) {
3315 fs_path_reset(path_before);
3316 fs_path_reset(path_after);
3318 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3322 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3324 if (ret < 0 && ret != -ENOENT) {
3326 } else if (ret == -ENOENT) {
3331 len1 = fs_path_len(path_before);
3332 len2 = fs_path_len(path_after);
3333 if (ino > sctx->cur_ino &&
3334 (parent_ino_before != parent_ino_after || len1 != len2 ||
3335 memcmp(path_before->start, path_after->start, len1))) {
3339 ino = parent_ino_after;
3343 fs_path_free(path_before);
3344 fs_path_free(path_after);
3347 ret = add_pending_dir_move(sctx,
3349 sctx->cur_inode_gen,
3352 &sctx->deleted_refs);
3361 * This does all the move/link/unlink/rmdir magic.
3363 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3366 struct recorded_ref *cur;
3367 struct recorded_ref *cur2;
3368 struct list_head check_dirs;
3369 struct fs_path *valid_path = NULL;
3372 int did_overwrite = 0;
3374 u64 last_dir_ino_rm = 0;
3376 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
3379 * This should never happen as the root dir always has the same ref
3380 * which is always '..'
3382 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3383 INIT_LIST_HEAD(&check_dirs);
3385 valid_path = fs_path_alloc();
3392 * First, check if the first ref of the current inode was overwritten
3393 * before. If yes, we know that the current inode was already orphanized
3394 * and thus use the orphan name. If not, we can use get_cur_path to
3395 * get the path of the first ref as it would like while receiving at
3396 * this point in time.
3397 * New inodes are always orphan at the beginning, so force to use the
3398 * orphan name in this case.
3399 * The first ref is stored in valid_path and will be updated if it
3400 * gets moved around.
3402 if (!sctx->cur_inode_new) {
3403 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3404 sctx->cur_inode_gen);
3410 if (sctx->cur_inode_new || did_overwrite) {
3411 ret = gen_unique_name(sctx, sctx->cur_ino,
3412 sctx->cur_inode_gen, valid_path);
3417 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3423 list_for_each_entry(cur, &sctx->new_refs, list) {
3425 * We may have refs where the parent directory does not exist
3426 * yet. This happens if the parent directories inum is higher
3427 * the the current inum. To handle this case, we create the
3428 * parent directory out of order. But we need to check if this
3429 * did already happen before due to other refs in the same dir.
3431 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3434 if (ret == inode_state_will_create) {
3437 * First check if any of the current inodes refs did
3438 * already create the dir.
3440 list_for_each_entry(cur2, &sctx->new_refs, list) {
3443 if (cur2->dir == cur->dir) {
3450 * If that did not happen, check if a previous inode
3451 * did already create the dir.
3454 ret = did_create_dir(sctx, cur->dir);
3458 ret = send_create_inode(sctx, cur->dir);
3465 * Check if this new ref would overwrite the first ref of
3466 * another unprocessed inode. If yes, orphanize the
3467 * overwritten inode. If we find an overwritten ref that is
3468 * not the first ref, simply unlink it.
3470 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3471 cur->name, cur->name_len,
3472 &ow_inode, &ow_gen);
3476 ret = is_first_ref(sctx->parent_root,
3477 ow_inode, cur->dir, cur->name,
3482 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3487 ret = send_unlink(sctx, cur->full_path);
3494 * link/move the ref to the new place. If we have an orphan
3495 * inode, move it and update valid_path. If not, link or move
3496 * it depending on the inode mode.
3499 ret = send_rename(sctx, valid_path, cur->full_path);
3503 ret = fs_path_copy(valid_path, cur->full_path);
3507 if (S_ISDIR(sctx->cur_inode_mode)) {
3509 * Dirs can't be linked, so move it. For moved
3510 * dirs, we always have one new and one deleted
3511 * ref. The deleted ref is ignored later.
3513 ret = wait_for_parent_move(sctx, cur);
3519 ret = send_rename(sctx, valid_path,
3522 ret = fs_path_copy(valid_path,
3528 ret = send_link(sctx, cur->full_path,
3534 ret = dup_ref(cur, &check_dirs);
3539 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
3541 * Check if we can already rmdir the directory. If not,
3542 * orphanize it. For every dir item inside that gets deleted
3543 * later, we do this check again and rmdir it then if possible.
3544 * See the use of check_dirs for more details.
3546 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3551 ret = send_rmdir(sctx, valid_path);
3554 } else if (!is_orphan) {
3555 ret = orphanize_inode(sctx, sctx->cur_ino,
3556 sctx->cur_inode_gen, valid_path);
3562 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3563 ret = dup_ref(cur, &check_dirs);
3567 } else if (S_ISDIR(sctx->cur_inode_mode) &&
3568 !list_empty(&sctx->deleted_refs)) {
3570 * We have a moved dir. Add the old parent to check_dirs
3572 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
3574 ret = dup_ref(cur, &check_dirs);
3577 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
3579 * We have a non dir inode. Go through all deleted refs and
3580 * unlink them if they were not already overwritten by other
3583 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3584 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3585 sctx->cur_ino, sctx->cur_inode_gen,
3586 cur->name, cur->name_len);
3590 ret = send_unlink(sctx, cur->full_path);
3594 ret = dup_ref(cur, &check_dirs);
3599 * If the inode is still orphan, unlink the orphan. This may
3600 * happen when a previous inode did overwrite the first ref
3601 * of this inode and no new refs were added for the current
3602 * inode. Unlinking does not mean that the inode is deleted in
3603 * all cases. There may still be links to this inode in other
3607 ret = send_unlink(sctx, valid_path);
3614 * We did collect all parent dirs where cur_inode was once located. We
3615 * now go through all these dirs and check if they are pending for
3616 * deletion and if it's finally possible to perform the rmdir now.
3617 * We also update the inode stats of the parent dirs here.
3619 list_for_each_entry(cur, &check_dirs, list) {
3621 * In case we had refs into dirs that were not processed yet,
3622 * we don't need to do the utime and rmdir logic for these dirs.
3623 * The dir will be processed later.
3625 if (cur->dir > sctx->cur_ino)
3628 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3632 if (ret == inode_state_did_create ||
3633 ret == inode_state_no_change) {
3634 /* TODO delayed utimes */
3635 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3638 } else if (ret == inode_state_did_delete &&
3639 cur->dir != last_dir_ino_rm) {
3640 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
3645 ret = get_cur_path(sctx, cur->dir,
3646 cur->dir_gen, valid_path);
3649 ret = send_rmdir(sctx, valid_path);
3652 last_dir_ino_rm = cur->dir;
3660 __free_recorded_refs(&check_dirs);
3661 free_recorded_refs(sctx);
3662 fs_path_free(valid_path);
3666 static int record_ref(struct btrfs_root *root, int num, u64 dir, int index,
3667 struct fs_path *name, void *ctx, struct list_head *refs)
3670 struct send_ctx *sctx = ctx;
3674 p = fs_path_alloc();
3678 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
3683 ret = get_cur_path(sctx, dir, gen, p);
3686 ret = fs_path_add_path(p, name);
3690 ret = __record_ref(refs, dir, gen, p);
3698 static int __record_new_ref(int num, u64 dir, int index,
3699 struct fs_path *name,
3702 struct send_ctx *sctx = ctx;
3703 return record_ref(sctx->send_root, num, dir, index, name,
3704 ctx, &sctx->new_refs);
3708 static int __record_deleted_ref(int num, u64 dir, int index,
3709 struct fs_path *name,
3712 struct send_ctx *sctx = ctx;
3713 return record_ref(sctx->parent_root, num, dir, index, name,
3714 ctx, &sctx->deleted_refs);
3717 static int record_new_ref(struct send_ctx *sctx)
3721 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
3722 sctx->cmp_key, 0, __record_new_ref, sctx);
3731 static int record_deleted_ref(struct send_ctx *sctx)
3735 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
3736 sctx->cmp_key, 0, __record_deleted_ref, sctx);
3745 struct find_ref_ctx {
3748 struct btrfs_root *root;
3749 struct fs_path *name;
3753 static int __find_iref(int num, u64 dir, int index,
3754 struct fs_path *name,
3757 struct find_ref_ctx *ctx = ctx_;
3761 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
3762 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
3764 * To avoid doing extra lookups we'll only do this if everything
3767 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
3771 if (dir_gen != ctx->dir_gen)
3773 ctx->found_idx = num;
3779 static int find_iref(struct btrfs_root *root,
3780 struct btrfs_path *path,
3781 struct btrfs_key *key,
3782 u64 dir, u64 dir_gen, struct fs_path *name)
3785 struct find_ref_ctx ctx;
3789 ctx.dir_gen = dir_gen;
3793 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
3797 if (ctx.found_idx == -1)
3800 return ctx.found_idx;
3803 static int __record_changed_new_ref(int num, u64 dir, int index,
3804 struct fs_path *name,
3809 struct send_ctx *sctx = ctx;
3811 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
3816 ret = find_iref(sctx->parent_root, sctx->right_path,
3817 sctx->cmp_key, dir, dir_gen, name);
3819 ret = __record_new_ref(num, dir, index, name, sctx);
3826 static int __record_changed_deleted_ref(int num, u64 dir, int index,
3827 struct fs_path *name,
3832 struct send_ctx *sctx = ctx;
3834 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
3839 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
3840 dir, dir_gen, name);
3842 ret = __record_deleted_ref(num, dir, index, name, sctx);
3849 static int record_changed_ref(struct send_ctx *sctx)
3853 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
3854 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
3857 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
3858 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
3868 * Record and process all refs at once. Needed when an inode changes the
3869 * generation number, which means that it was deleted and recreated.
3871 static int process_all_refs(struct send_ctx *sctx,
3872 enum btrfs_compare_tree_result cmd)
3875 struct btrfs_root *root;
3876 struct btrfs_path *path;
3877 struct btrfs_key key;
3878 struct btrfs_key found_key;
3879 struct extent_buffer *eb;
3881 iterate_inode_ref_t cb;
3882 int pending_move = 0;
3884 path = alloc_path_for_send();
3888 if (cmd == BTRFS_COMPARE_TREE_NEW) {
3889 root = sctx->send_root;
3890 cb = __record_new_ref;
3891 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
3892 root = sctx->parent_root;
3893 cb = __record_deleted_ref;
3895 btrfs_err(sctx->send_root->fs_info,
3896 "Wrong command %d in process_all_refs", cmd);
3901 key.objectid = sctx->cmp_key->objectid;
3902 key.type = BTRFS_INODE_REF_KEY;
3904 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3909 eb = path->nodes[0];
3910 slot = path->slots[0];
3911 if (slot >= btrfs_header_nritems(eb)) {
3912 ret = btrfs_next_leaf(root, path);
3920 btrfs_item_key_to_cpu(eb, &found_key, slot);
3922 if (found_key.objectid != key.objectid ||
3923 (found_key.type != BTRFS_INODE_REF_KEY &&
3924 found_key.type != BTRFS_INODE_EXTREF_KEY))
3927 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
3933 btrfs_release_path(path);
3935 ret = process_recorded_refs(sctx, &pending_move);
3936 /* Only applicable to an incremental send. */
3937 ASSERT(pending_move == 0);
3940 btrfs_free_path(path);
3944 static int send_set_xattr(struct send_ctx *sctx,
3945 struct fs_path *path,
3946 const char *name, int name_len,
3947 const char *data, int data_len)
3951 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
3955 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3956 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3957 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
3959 ret = send_cmd(sctx);
3966 static int send_remove_xattr(struct send_ctx *sctx,
3967 struct fs_path *path,
3968 const char *name, int name_len)
3972 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
3976 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3977 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3979 ret = send_cmd(sctx);
3986 static int __process_new_xattr(int num, struct btrfs_key *di_key,
3987 const char *name, int name_len,
3988 const char *data, int data_len,
3992 struct send_ctx *sctx = ctx;
3994 posix_acl_xattr_header dummy_acl;
3996 p = fs_path_alloc();
4001 * This hack is needed because empty acl's are stored as zero byte
4002 * data in xattrs. Problem with that is, that receiving these zero byte
4003 * acl's will fail later. To fix this, we send a dummy acl list that
4004 * only contains the version number and no entries.
4006 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4007 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4008 if (data_len == 0) {
4009 dummy_acl.a_version =
4010 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4011 data = (char *)&dummy_acl;
4012 data_len = sizeof(dummy_acl);
4016 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4020 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4027 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4028 const char *name, int name_len,
4029 const char *data, int data_len,
4033 struct send_ctx *sctx = ctx;
4036 p = fs_path_alloc();
4040 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4044 ret = send_remove_xattr(sctx, p, name, name_len);
4051 static int process_new_xattr(struct send_ctx *sctx)
4055 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4056 sctx->cmp_key, __process_new_xattr, sctx);
4061 static int process_deleted_xattr(struct send_ctx *sctx)
4065 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4066 sctx->cmp_key, __process_deleted_xattr, sctx);
4071 struct find_xattr_ctx {
4079 static int __find_xattr(int num, struct btrfs_key *di_key,
4080 const char *name, int name_len,
4081 const char *data, int data_len,
4082 u8 type, void *vctx)
4084 struct find_xattr_ctx *ctx = vctx;
4086 if (name_len == ctx->name_len &&
4087 strncmp(name, ctx->name, name_len) == 0) {
4088 ctx->found_idx = num;
4089 ctx->found_data_len = data_len;
4090 ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
4091 if (!ctx->found_data)
4098 static int find_xattr(struct btrfs_root *root,
4099 struct btrfs_path *path,
4100 struct btrfs_key *key,
4101 const char *name, int name_len,
4102 char **data, int *data_len)
4105 struct find_xattr_ctx ctx;
4108 ctx.name_len = name_len;
4110 ctx.found_data = NULL;
4111 ctx.found_data_len = 0;
4113 ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
4117 if (ctx.found_idx == -1)
4120 *data = ctx.found_data;
4121 *data_len = ctx.found_data_len;
4123 kfree(ctx.found_data);
4125 return ctx.found_idx;
4129 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4130 const char *name, int name_len,
4131 const char *data, int data_len,
4135 struct send_ctx *sctx = ctx;
4136 char *found_data = NULL;
4137 int found_data_len = 0;
4139 ret = find_xattr(sctx->parent_root, sctx->right_path,
4140 sctx->cmp_key, name, name_len, &found_data,
4142 if (ret == -ENOENT) {
4143 ret = __process_new_xattr(num, di_key, name, name_len, data,
4144 data_len, type, ctx);
4145 } else if (ret >= 0) {
4146 if (data_len != found_data_len ||
4147 memcmp(data, found_data, data_len)) {
4148 ret = __process_new_xattr(num, di_key, name, name_len,
4149 data, data_len, type, ctx);
4159 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4160 const char *name, int name_len,
4161 const char *data, int data_len,
4165 struct send_ctx *sctx = ctx;
4167 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4168 name, name_len, NULL, NULL);
4170 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4171 data_len, type, ctx);
4178 static int process_changed_xattr(struct send_ctx *sctx)
4182 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4183 sctx->cmp_key, __process_changed_new_xattr, sctx);
4186 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4187 sctx->cmp_key, __process_changed_deleted_xattr, sctx);
4193 static int process_all_new_xattrs(struct send_ctx *sctx)
4196 struct btrfs_root *root;
4197 struct btrfs_path *path;
4198 struct btrfs_key key;
4199 struct btrfs_key found_key;
4200 struct extent_buffer *eb;
4203 path = alloc_path_for_send();
4207 root = sctx->send_root;
4209 key.objectid = sctx->cmp_key->objectid;
4210 key.type = BTRFS_XATTR_ITEM_KEY;
4212 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4217 eb = path->nodes[0];
4218 slot = path->slots[0];
4219 if (slot >= btrfs_header_nritems(eb)) {
4220 ret = btrfs_next_leaf(root, path);
4223 } else if (ret > 0) {
4230 btrfs_item_key_to_cpu(eb, &found_key, slot);
4231 if (found_key.objectid != key.objectid ||
4232 found_key.type != key.type) {
4237 ret = iterate_dir_item(root, path, &found_key,
4238 __process_new_xattr, sctx);
4246 btrfs_free_path(path);
4250 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4252 struct btrfs_root *root = sctx->send_root;
4253 struct btrfs_fs_info *fs_info = root->fs_info;
4254 struct inode *inode;
4257 struct btrfs_key key;
4258 pgoff_t index = offset >> PAGE_CACHE_SHIFT;
4260 unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
4263 key.objectid = sctx->cur_ino;
4264 key.type = BTRFS_INODE_ITEM_KEY;
4267 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4269 return PTR_ERR(inode);
4271 if (offset + len > i_size_read(inode)) {
4272 if (offset > i_size_read(inode))
4275 len = offset - i_size_read(inode);
4280 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
4282 /* initial readahead */
4283 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4284 file_ra_state_init(&sctx->ra, inode->i_mapping);
4285 btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index,
4286 last_index - index + 1);
4288 while (index <= last_index) {
4289 unsigned cur_len = min_t(unsigned, len,
4290 PAGE_CACHE_SIZE - pg_offset);
4291 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4297 if (!PageUptodate(page)) {
4298 btrfs_readpage(NULL, page);
4300 if (!PageUptodate(page)) {
4302 page_cache_release(page);
4309 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4312 page_cache_release(page);
4324 * Read some bytes from the current inode/file and send a write command to
4327 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4331 ssize_t num_read = 0;
4333 p = fs_path_alloc();
4337 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
4339 num_read = fill_read_buf(sctx, offset, len);
4340 if (num_read <= 0) {
4346 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4350 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4354 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4355 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4356 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
4358 ret = send_cmd(sctx);
4369 * Send a clone command to user space.
4371 static int send_clone(struct send_ctx *sctx,
4372 u64 offset, u32 len,
4373 struct clone_root *clone_root)
4379 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
4380 "clone_inode=%llu, clone_offset=%llu\n", offset, len,
4381 clone_root->root->objectid, clone_root->ino,
4382 clone_root->offset);
4384 p = fs_path_alloc();
4388 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
4392 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4396 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4397 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
4398 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4400 if (clone_root->root == sctx->send_root) {
4401 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
4402 &gen, NULL, NULL, NULL, NULL);
4405 ret = get_cur_path(sctx, clone_root->ino, gen, p);
4407 ret = get_inode_path(clone_root->root, clone_root->ino, p);
4412 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4413 clone_root->root->root_item.uuid);
4414 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
4415 le64_to_cpu(clone_root->root->root_item.ctransid));
4416 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
4417 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
4418 clone_root->offset);
4420 ret = send_cmd(sctx);
4429 * Send an update extent command to user space.
4431 static int send_update_extent(struct send_ctx *sctx,
4432 u64 offset, u32 len)
4437 p = fs_path_alloc();
4441 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
4445 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4449 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4450 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4451 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
4453 ret = send_cmd(sctx);
4461 static int send_hole(struct send_ctx *sctx, u64 end)
4463 struct fs_path *p = NULL;
4464 u64 offset = sctx->cur_inode_last_extent;
4468 p = fs_path_alloc();
4471 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4473 goto tlv_put_failure;
4474 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
4475 while (offset < end) {
4476 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
4478 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4481 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4482 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4483 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
4484 ret = send_cmd(sctx);
4494 static int send_write_or_clone(struct send_ctx *sctx,
4495 struct btrfs_path *path,
4496 struct btrfs_key *key,
4497 struct clone_root *clone_root)
4500 struct btrfs_file_extent_item *ei;
4501 u64 offset = key->offset;
4506 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
4508 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4509 struct btrfs_file_extent_item);
4510 type = btrfs_file_extent_type(path->nodes[0], ei);
4511 if (type == BTRFS_FILE_EXTENT_INLINE) {
4512 len = btrfs_file_extent_inline_len(path->nodes[0],
4513 path->slots[0], ei);
4515 * it is possible the inline item won't cover the whole page,
4516 * but there may be items after this page. Make
4517 * sure to send the whole thing
4519 len = PAGE_CACHE_ALIGN(len);
4521 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
4524 if (offset + len > sctx->cur_inode_size)
4525 len = sctx->cur_inode_size - offset;
4531 if (clone_root && IS_ALIGNED(offset + len, bs)) {
4532 ret = send_clone(sctx, offset, len, clone_root);
4533 } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
4534 ret = send_update_extent(sctx, offset, len);
4538 if (l > BTRFS_SEND_READ_SIZE)
4539 l = BTRFS_SEND_READ_SIZE;
4540 ret = send_write(sctx, pos + offset, l);
4553 static int is_extent_unchanged(struct send_ctx *sctx,
4554 struct btrfs_path *left_path,
4555 struct btrfs_key *ekey)
4558 struct btrfs_key key;
4559 struct btrfs_path *path = NULL;
4560 struct extent_buffer *eb;
4562 struct btrfs_key found_key;
4563 struct btrfs_file_extent_item *ei;
4568 u64 left_offset_fixed;
4576 path = alloc_path_for_send();
4580 eb = left_path->nodes[0];
4581 slot = left_path->slots[0];
4582 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
4583 left_type = btrfs_file_extent_type(eb, ei);
4585 if (left_type != BTRFS_FILE_EXTENT_REG) {
4589 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
4590 left_len = btrfs_file_extent_num_bytes(eb, ei);
4591 left_offset = btrfs_file_extent_offset(eb, ei);
4592 left_gen = btrfs_file_extent_generation(eb, ei);
4595 * Following comments will refer to these graphics. L is the left
4596 * extents which we are checking at the moment. 1-8 are the right
4597 * extents that we iterate.
4600 * |-1-|-2a-|-3-|-4-|-5-|-6-|
4603 * |--1--|-2b-|...(same as above)
4605 * Alternative situation. Happens on files where extents got split.
4607 * |-----------7-----------|-6-|
4609 * Alternative situation. Happens on files which got larger.
4612 * Nothing follows after 8.
4615 key.objectid = ekey->objectid;
4616 key.type = BTRFS_EXTENT_DATA_KEY;
4617 key.offset = ekey->offset;
4618 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
4627 * Handle special case where the right side has no extents at all.
4629 eb = path->nodes[0];
4630 slot = path->slots[0];
4631 btrfs_item_key_to_cpu(eb, &found_key, slot);
4632 if (found_key.objectid != key.objectid ||
4633 found_key.type != key.type) {
4634 /* If we're a hole then just pretend nothing changed */
4635 ret = (left_disknr) ? 0 : 1;
4640 * We're now on 2a, 2b or 7.
4643 while (key.offset < ekey->offset + left_len) {
4644 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
4645 right_type = btrfs_file_extent_type(eb, ei);
4646 if (right_type != BTRFS_FILE_EXTENT_REG) {
4651 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
4652 right_len = btrfs_file_extent_num_bytes(eb, ei);
4653 right_offset = btrfs_file_extent_offset(eb, ei);
4654 right_gen = btrfs_file_extent_generation(eb, ei);
4657 * Are we at extent 8? If yes, we know the extent is changed.
4658 * This may only happen on the first iteration.
4660 if (found_key.offset + right_len <= ekey->offset) {
4661 /* If we're a hole just pretend nothing changed */
4662 ret = (left_disknr) ? 0 : 1;
4666 left_offset_fixed = left_offset;
4667 if (key.offset < ekey->offset) {
4668 /* Fix the right offset for 2a and 7. */
4669 right_offset += ekey->offset - key.offset;
4671 /* Fix the left offset for all behind 2a and 2b */
4672 left_offset_fixed += key.offset - ekey->offset;
4676 * Check if we have the same extent.
4678 if (left_disknr != right_disknr ||
4679 left_offset_fixed != right_offset ||
4680 left_gen != right_gen) {
4686 * Go to the next extent.
4688 ret = btrfs_next_item(sctx->parent_root, path);
4692 eb = path->nodes[0];
4693 slot = path->slots[0];
4694 btrfs_item_key_to_cpu(eb, &found_key, slot);
4696 if (ret || found_key.objectid != key.objectid ||
4697 found_key.type != key.type) {
4698 key.offset += right_len;
4701 if (found_key.offset != key.offset + right_len) {
4709 * We're now behind the left extent (treat as unchanged) or at the end
4710 * of the right side (treat as changed).
4712 if (key.offset >= ekey->offset + left_len)
4719 btrfs_free_path(path);
4723 static int get_last_extent(struct send_ctx *sctx, u64 offset)
4725 struct btrfs_path *path;
4726 struct btrfs_root *root = sctx->send_root;
4727 struct btrfs_file_extent_item *fi;
4728 struct btrfs_key key;
4733 path = alloc_path_for_send();
4737 sctx->cur_inode_last_extent = 0;
4739 key.objectid = sctx->cur_ino;
4740 key.type = BTRFS_EXTENT_DATA_KEY;
4741 key.offset = offset;
4742 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
4746 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4747 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
4750 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
4751 struct btrfs_file_extent_item);
4752 type = btrfs_file_extent_type(path->nodes[0], fi);
4753 if (type == BTRFS_FILE_EXTENT_INLINE) {
4754 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
4755 path->slots[0], fi);
4756 extent_end = ALIGN(key.offset + size,
4757 sctx->send_root->sectorsize);
4759 extent_end = key.offset +
4760 btrfs_file_extent_num_bytes(path->nodes[0], fi);
4762 sctx->cur_inode_last_extent = extent_end;
4764 btrfs_free_path(path);
4768 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
4769 struct btrfs_key *key)
4771 struct btrfs_file_extent_item *fi;
4776 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
4779 if (sctx->cur_inode_last_extent == (u64)-1) {
4780 ret = get_last_extent(sctx, key->offset - 1);
4785 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
4786 struct btrfs_file_extent_item);
4787 type = btrfs_file_extent_type(path->nodes[0], fi);
4788 if (type == BTRFS_FILE_EXTENT_INLINE) {
4789 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
4790 path->slots[0], fi);
4791 extent_end = ALIGN(key->offset + size,
4792 sctx->send_root->sectorsize);
4794 extent_end = key->offset +
4795 btrfs_file_extent_num_bytes(path->nodes[0], fi);
4798 if (path->slots[0] == 0 &&
4799 sctx->cur_inode_last_extent < key->offset) {
4801 * We might have skipped entire leafs that contained only
4802 * file extent items for our current inode. These leafs have
4803 * a generation number smaller (older) than the one in the
4804 * current leaf and the leaf our last extent came from, and
4805 * are located between these 2 leafs.
4807 ret = get_last_extent(sctx, key->offset - 1);
4812 if (sctx->cur_inode_last_extent < key->offset)
4813 ret = send_hole(sctx, key->offset);
4814 sctx->cur_inode_last_extent = extent_end;
4818 static int process_extent(struct send_ctx *sctx,
4819 struct btrfs_path *path,
4820 struct btrfs_key *key)
4822 struct clone_root *found_clone = NULL;
4825 if (S_ISLNK(sctx->cur_inode_mode))
4828 if (sctx->parent_root && !sctx->cur_inode_new) {
4829 ret = is_extent_unchanged(sctx, path, key);
4837 struct btrfs_file_extent_item *ei;
4840 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4841 struct btrfs_file_extent_item);
4842 type = btrfs_file_extent_type(path->nodes[0], ei);
4843 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
4844 type == BTRFS_FILE_EXTENT_REG) {
4846 * The send spec does not have a prealloc command yet,
4847 * so just leave a hole for prealloc'ed extents until
4848 * we have enough commands queued up to justify rev'ing
4851 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
4856 /* Have a hole, just skip it. */
4857 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
4864 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
4865 sctx->cur_inode_size, &found_clone);
4866 if (ret != -ENOENT && ret < 0)
4869 ret = send_write_or_clone(sctx, path, key, found_clone);
4873 ret = maybe_send_hole(sctx, path, key);
4878 static int process_all_extents(struct send_ctx *sctx)
4881 struct btrfs_root *root;
4882 struct btrfs_path *path;
4883 struct btrfs_key key;
4884 struct btrfs_key found_key;
4885 struct extent_buffer *eb;
4888 root = sctx->send_root;
4889 path = alloc_path_for_send();
4893 key.objectid = sctx->cmp_key->objectid;
4894 key.type = BTRFS_EXTENT_DATA_KEY;
4896 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4901 eb = path->nodes[0];
4902 slot = path->slots[0];
4904 if (slot >= btrfs_header_nritems(eb)) {
4905 ret = btrfs_next_leaf(root, path);
4908 } else if (ret > 0) {
4915 btrfs_item_key_to_cpu(eb, &found_key, slot);
4917 if (found_key.objectid != key.objectid ||
4918 found_key.type != key.type) {
4923 ret = process_extent(sctx, path, &found_key);
4931 btrfs_free_path(path);
4935 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
4937 int *refs_processed)
4941 if (sctx->cur_ino == 0)
4943 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
4944 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
4946 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
4949 ret = process_recorded_refs(sctx, pending_move);
4953 *refs_processed = 1;
4958 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
4969 int pending_move = 0;
4970 int refs_processed = 0;
4972 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
4978 * We have processed the refs and thus need to advance send_progress.
4979 * Now, calls to get_cur_xxx will take the updated refs of the current
4980 * inode into account.
4982 * On the other hand, if our current inode is a directory and couldn't
4983 * be moved/renamed because its parent was renamed/moved too and it has
4984 * a higher inode number, we can only move/rename our current inode
4985 * after we moved/renamed its parent. Therefore in this case operate on
4986 * the old path (pre move/rename) of our current inode, and the
4987 * move/rename will be performed later.
4989 if (refs_processed && !pending_move)
4990 sctx->send_progress = sctx->cur_ino + 1;
4992 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
4994 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
4997 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
4998 &left_mode, &left_uid, &left_gid, NULL);
5002 if (!sctx->parent_root || sctx->cur_inode_new) {
5004 if (!S_ISLNK(sctx->cur_inode_mode))
5007 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
5008 NULL, NULL, &right_mode, &right_uid,
5013 if (left_uid != right_uid || left_gid != right_gid)
5015 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
5019 if (S_ISREG(sctx->cur_inode_mode)) {
5020 if (need_send_hole(sctx)) {
5021 if (sctx->cur_inode_last_extent == (u64)-1 ||
5022 sctx->cur_inode_last_extent <
5023 sctx->cur_inode_size) {
5024 ret = get_last_extent(sctx, (u64)-1);
5028 if (sctx->cur_inode_last_extent <
5029 sctx->cur_inode_size) {
5030 ret = send_hole(sctx, sctx->cur_inode_size);
5035 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5036 sctx->cur_inode_size);
5042 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5043 left_uid, left_gid);
5048 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5055 * If other directory inodes depended on our current directory
5056 * inode's move/rename, now do their move/rename operations.
5058 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
5059 ret = apply_children_dir_moves(sctx);
5063 * Need to send that every time, no matter if it actually
5064 * changed between the two trees as we have done changes to
5065 * the inode before. If our inode is a directory and it's
5066 * waiting to be moved/renamed, we will send its utimes when
5067 * it's moved/renamed, therefore we don't need to do it here.
5069 sctx->send_progress = sctx->cur_ino + 1;
5070 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
5079 static int changed_inode(struct send_ctx *sctx,
5080 enum btrfs_compare_tree_result result)
5083 struct btrfs_key *key = sctx->cmp_key;
5084 struct btrfs_inode_item *left_ii = NULL;
5085 struct btrfs_inode_item *right_ii = NULL;
5089 sctx->cur_ino = key->objectid;
5090 sctx->cur_inode_new_gen = 0;
5091 sctx->cur_inode_last_extent = (u64)-1;
5094 * Set send_progress to current inode. This will tell all get_cur_xxx
5095 * functions that the current inode's refs are not updated yet. Later,
5096 * when process_recorded_refs is finished, it is set to cur_ino + 1.
5098 sctx->send_progress = sctx->cur_ino;
5100 if (result == BTRFS_COMPARE_TREE_NEW ||
5101 result == BTRFS_COMPARE_TREE_CHANGED) {
5102 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
5103 sctx->left_path->slots[0],
5104 struct btrfs_inode_item);
5105 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
5108 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5109 sctx->right_path->slots[0],
5110 struct btrfs_inode_item);
5111 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5114 if (result == BTRFS_COMPARE_TREE_CHANGED) {
5115 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5116 sctx->right_path->slots[0],
5117 struct btrfs_inode_item);
5119 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5123 * The cur_ino = root dir case is special here. We can't treat
5124 * the inode as deleted+reused because it would generate a
5125 * stream that tries to delete/mkdir the root dir.
5127 if (left_gen != right_gen &&
5128 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5129 sctx->cur_inode_new_gen = 1;
5132 if (result == BTRFS_COMPARE_TREE_NEW) {
5133 sctx->cur_inode_gen = left_gen;
5134 sctx->cur_inode_new = 1;
5135 sctx->cur_inode_deleted = 0;
5136 sctx->cur_inode_size = btrfs_inode_size(
5137 sctx->left_path->nodes[0], left_ii);
5138 sctx->cur_inode_mode = btrfs_inode_mode(
5139 sctx->left_path->nodes[0], left_ii);
5140 sctx->cur_inode_rdev = btrfs_inode_rdev(
5141 sctx->left_path->nodes[0], left_ii);
5142 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5143 ret = send_create_inode_if_needed(sctx);
5144 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
5145 sctx->cur_inode_gen = right_gen;
5146 sctx->cur_inode_new = 0;
5147 sctx->cur_inode_deleted = 1;
5148 sctx->cur_inode_size = btrfs_inode_size(
5149 sctx->right_path->nodes[0], right_ii);
5150 sctx->cur_inode_mode = btrfs_inode_mode(
5151 sctx->right_path->nodes[0], right_ii);
5152 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
5154 * We need to do some special handling in case the inode was
5155 * reported as changed with a changed generation number. This
5156 * means that the original inode was deleted and new inode
5157 * reused the same inum. So we have to treat the old inode as
5158 * deleted and the new one as new.
5160 if (sctx->cur_inode_new_gen) {
5162 * First, process the inode as if it was deleted.
5164 sctx->cur_inode_gen = right_gen;
5165 sctx->cur_inode_new = 0;
5166 sctx->cur_inode_deleted = 1;
5167 sctx->cur_inode_size = btrfs_inode_size(
5168 sctx->right_path->nodes[0], right_ii);
5169 sctx->cur_inode_mode = btrfs_inode_mode(
5170 sctx->right_path->nodes[0], right_ii);
5171 ret = process_all_refs(sctx,
5172 BTRFS_COMPARE_TREE_DELETED);
5177 * Now process the inode as if it was new.
5179 sctx->cur_inode_gen = left_gen;
5180 sctx->cur_inode_new = 1;
5181 sctx->cur_inode_deleted = 0;
5182 sctx->cur_inode_size = btrfs_inode_size(
5183 sctx->left_path->nodes[0], left_ii);
5184 sctx->cur_inode_mode = btrfs_inode_mode(
5185 sctx->left_path->nodes[0], left_ii);
5186 sctx->cur_inode_rdev = btrfs_inode_rdev(
5187 sctx->left_path->nodes[0], left_ii);
5188 ret = send_create_inode_if_needed(sctx);
5192 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
5196 * Advance send_progress now as we did not get into
5197 * process_recorded_refs_if_needed in the new_gen case.
5199 sctx->send_progress = sctx->cur_ino + 1;
5202 * Now process all extents and xattrs of the inode as if
5203 * they were all new.
5205 ret = process_all_extents(sctx);
5208 ret = process_all_new_xattrs(sctx);
5212 sctx->cur_inode_gen = left_gen;
5213 sctx->cur_inode_new = 0;
5214 sctx->cur_inode_new_gen = 0;
5215 sctx->cur_inode_deleted = 0;
5216 sctx->cur_inode_size = btrfs_inode_size(
5217 sctx->left_path->nodes[0], left_ii);
5218 sctx->cur_inode_mode = btrfs_inode_mode(
5219 sctx->left_path->nodes[0], left_ii);
5228 * We have to process new refs before deleted refs, but compare_trees gives us
5229 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
5230 * first and later process them in process_recorded_refs.
5231 * For the cur_inode_new_gen case, we skip recording completely because
5232 * changed_inode did already initiate processing of refs. The reason for this is
5233 * that in this case, compare_tree actually compares the refs of 2 different
5234 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
5235 * refs of the right tree as deleted and all refs of the left tree as new.
5237 static int changed_ref(struct send_ctx *sctx,
5238 enum btrfs_compare_tree_result result)
5242 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5244 if (!sctx->cur_inode_new_gen &&
5245 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
5246 if (result == BTRFS_COMPARE_TREE_NEW)
5247 ret = record_new_ref(sctx);
5248 else if (result == BTRFS_COMPARE_TREE_DELETED)
5249 ret = record_deleted_ref(sctx);
5250 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5251 ret = record_changed_ref(sctx);
5258 * Process new/deleted/changed xattrs. We skip processing in the
5259 * cur_inode_new_gen case because changed_inode did already initiate processing
5260 * of xattrs. The reason is the same as in changed_ref
5262 static int changed_xattr(struct send_ctx *sctx,
5263 enum btrfs_compare_tree_result result)
5267 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5269 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5270 if (result == BTRFS_COMPARE_TREE_NEW)
5271 ret = process_new_xattr(sctx);
5272 else if (result == BTRFS_COMPARE_TREE_DELETED)
5273 ret = process_deleted_xattr(sctx);
5274 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5275 ret = process_changed_xattr(sctx);
5282 * Process new/deleted/changed extents. We skip processing in the
5283 * cur_inode_new_gen case because changed_inode did already initiate processing
5284 * of extents. The reason is the same as in changed_ref
5286 static int changed_extent(struct send_ctx *sctx,
5287 enum btrfs_compare_tree_result result)
5291 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5293 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5294 if (result != BTRFS_COMPARE_TREE_DELETED)
5295 ret = process_extent(sctx, sctx->left_path,
5302 static int dir_changed(struct send_ctx *sctx, u64 dir)
5304 u64 orig_gen, new_gen;
5307 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
5312 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
5317 return (orig_gen != new_gen) ? 1 : 0;
5320 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
5321 struct btrfs_key *key)
5323 struct btrfs_inode_extref *extref;
5324 struct extent_buffer *leaf;
5325 u64 dirid = 0, last_dirid = 0;
5332 /* Easy case, just check this one dirid */
5333 if (key->type == BTRFS_INODE_REF_KEY) {
5334 dirid = key->offset;
5336 ret = dir_changed(sctx, dirid);
5340 leaf = path->nodes[0];
5341 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
5342 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
5343 while (cur_offset < item_size) {
5344 extref = (struct btrfs_inode_extref *)(ptr +
5346 dirid = btrfs_inode_extref_parent(leaf, extref);
5347 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
5348 cur_offset += ref_name_len + sizeof(*extref);
5349 if (dirid == last_dirid)
5351 ret = dir_changed(sctx, dirid);
5361 * Updates compare related fields in sctx and simply forwards to the actual
5362 * changed_xxx functions.
5364 static int changed_cb(struct btrfs_root *left_root,
5365 struct btrfs_root *right_root,
5366 struct btrfs_path *left_path,
5367 struct btrfs_path *right_path,
5368 struct btrfs_key *key,
5369 enum btrfs_compare_tree_result result,
5373 struct send_ctx *sctx = ctx;
5375 if (result == BTRFS_COMPARE_TREE_SAME) {
5376 if (key->type == BTRFS_INODE_REF_KEY ||
5377 key->type == BTRFS_INODE_EXTREF_KEY) {
5378 ret = compare_refs(sctx, left_path, key);
5383 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
5384 return maybe_send_hole(sctx, left_path, key);
5388 result = BTRFS_COMPARE_TREE_CHANGED;
5392 sctx->left_path = left_path;
5393 sctx->right_path = right_path;
5394 sctx->cmp_key = key;
5396 ret = finish_inode_if_needed(sctx, 0);
5400 /* Ignore non-FS objects */
5401 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
5402 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
5405 if (key->type == BTRFS_INODE_ITEM_KEY)
5406 ret = changed_inode(sctx, result);
5407 else if (key->type == BTRFS_INODE_REF_KEY ||
5408 key->type == BTRFS_INODE_EXTREF_KEY)
5409 ret = changed_ref(sctx, result);
5410 else if (key->type == BTRFS_XATTR_ITEM_KEY)
5411 ret = changed_xattr(sctx, result);
5412 else if (key->type == BTRFS_EXTENT_DATA_KEY)
5413 ret = changed_extent(sctx, result);
5419 static int full_send_tree(struct send_ctx *sctx)
5422 struct btrfs_root *send_root = sctx->send_root;
5423 struct btrfs_key key;
5424 struct btrfs_key found_key;
5425 struct btrfs_path *path;
5426 struct extent_buffer *eb;
5429 path = alloc_path_for_send();
5433 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
5434 key.type = BTRFS_INODE_ITEM_KEY;
5437 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
5444 eb = path->nodes[0];
5445 slot = path->slots[0];
5446 btrfs_item_key_to_cpu(eb, &found_key, slot);
5448 ret = changed_cb(send_root, NULL, path, NULL,
5449 &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
5453 key.objectid = found_key.objectid;
5454 key.type = found_key.type;
5455 key.offset = found_key.offset + 1;
5457 ret = btrfs_next_item(send_root, path);
5467 ret = finish_inode_if_needed(sctx, 1);
5470 btrfs_free_path(path);
5474 static int send_subvol(struct send_ctx *sctx)
5478 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
5479 ret = send_header(sctx);
5484 ret = send_subvol_begin(sctx);
5488 if (sctx->parent_root) {
5489 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
5493 ret = finish_inode_if_needed(sctx, 1);
5497 ret = full_send_tree(sctx);
5503 free_recorded_refs(sctx);
5508 * If orphan cleanup did remove any orphans from a root, it means the tree
5509 * was modified and therefore the commit root is not the same as the current
5510 * root anymore. This is a problem, because send uses the commit root and
5511 * therefore can see inode items that don't exist in the current root anymore,
5512 * and for example make calls to btrfs_iget, which will do tree lookups based
5513 * on the current root and not on the commit root. Those lookups will fail,
5514 * returning a -ESTALE error, and making send fail with that error. So make
5515 * sure a send does not see any orphans we have just removed, and that it will
5516 * see the same inodes regardless of whether a transaction commit happened
5517 * before it started (meaning that the commit root will be the same as the
5518 * current root) or not.
5520 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
5523 struct btrfs_trans_handle *trans = NULL;
5526 if (sctx->parent_root &&
5527 sctx->parent_root->node != sctx->parent_root->commit_root)
5530 for (i = 0; i < sctx->clone_roots_cnt; i++)
5531 if (sctx->clone_roots[i].root->node !=
5532 sctx->clone_roots[i].root->commit_root)
5536 return btrfs_end_transaction(trans, sctx->send_root);
5541 /* Use any root, all fs roots will get their commit roots updated. */
5543 trans = btrfs_join_transaction(sctx->send_root);
5545 return PTR_ERR(trans);
5549 return btrfs_commit_transaction(trans, sctx->send_root);
5552 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
5554 spin_lock(&root->root_item_lock);
5555 root->send_in_progress--;
5557 * Not much left to do, we don't know why it's unbalanced and
5558 * can't blindly reset it to 0.
5560 if (root->send_in_progress < 0)
5561 btrfs_err(root->fs_info,
5562 "send_in_progres unbalanced %d root %llu",
5563 root->send_in_progress, root->root_key.objectid);
5564 spin_unlock(&root->root_item_lock);
5567 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
5570 struct btrfs_root *send_root;
5571 struct btrfs_root *clone_root;
5572 struct btrfs_fs_info *fs_info;
5573 struct btrfs_ioctl_send_args *arg = NULL;
5574 struct btrfs_key key;
5575 struct send_ctx *sctx = NULL;
5577 u64 *clone_sources_tmp = NULL;
5578 int clone_sources_to_rollback = 0;
5579 int sort_clone_roots = 0;
5582 if (!capable(CAP_SYS_ADMIN))
5585 send_root = BTRFS_I(file_inode(mnt_file))->root;
5586 fs_info = send_root->fs_info;
5589 * The subvolume must remain read-only during send, protect against
5590 * making it RW. This also protects against deletion.
5592 spin_lock(&send_root->root_item_lock);
5593 send_root->send_in_progress++;
5594 spin_unlock(&send_root->root_item_lock);
5597 * This is done when we lookup the root, it should already be complete
5598 * by the time we get here.
5600 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
5603 * Userspace tools do the checks and warn the user if it's
5606 if (!btrfs_root_readonly(send_root)) {
5611 arg = memdup_user(arg_, sizeof(*arg));
5618 if (!access_ok(VERIFY_READ, arg->clone_sources,
5619 sizeof(*arg->clone_sources) *
5620 arg->clone_sources_count)) {
5625 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
5630 sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
5636 INIT_LIST_HEAD(&sctx->new_refs);
5637 INIT_LIST_HEAD(&sctx->deleted_refs);
5638 INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
5639 INIT_LIST_HEAD(&sctx->name_cache_list);
5641 sctx->flags = arg->flags;
5643 sctx->send_filp = fget(arg->send_fd);
5644 if (!sctx->send_filp) {
5649 sctx->send_root = send_root;
5651 * Unlikely but possible, if the subvolume is marked for deletion but
5652 * is slow to remove the directory entry, send can still be started
5654 if (btrfs_root_dead(sctx->send_root)) {
5659 sctx->clone_roots_cnt = arg->clone_sources_count;
5661 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
5662 sctx->send_buf = vmalloc(sctx->send_max_size);
5663 if (!sctx->send_buf) {
5668 sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
5669 if (!sctx->read_buf) {
5674 sctx->pending_dir_moves = RB_ROOT;
5675 sctx->waiting_dir_moves = RB_ROOT;
5676 sctx->orphan_dirs = RB_ROOT;
5678 sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
5679 (arg->clone_sources_count + 1));
5680 if (!sctx->clone_roots) {
5685 if (arg->clone_sources_count) {
5686 clone_sources_tmp = vmalloc(arg->clone_sources_count *
5687 sizeof(*arg->clone_sources));
5688 if (!clone_sources_tmp) {
5693 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
5694 arg->clone_sources_count *
5695 sizeof(*arg->clone_sources));
5701 for (i = 0; i < arg->clone_sources_count; i++) {
5702 key.objectid = clone_sources_tmp[i];
5703 key.type = BTRFS_ROOT_ITEM_KEY;
5704 key.offset = (u64)-1;
5706 index = srcu_read_lock(&fs_info->subvol_srcu);
5708 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
5709 if (IS_ERR(clone_root)) {
5710 srcu_read_unlock(&fs_info->subvol_srcu, index);
5711 ret = PTR_ERR(clone_root);
5714 clone_sources_to_rollback = i + 1;
5715 spin_lock(&clone_root->root_item_lock);
5716 clone_root->send_in_progress++;
5717 if (!btrfs_root_readonly(clone_root)) {
5718 spin_unlock(&clone_root->root_item_lock);
5719 srcu_read_unlock(&fs_info->subvol_srcu, index);
5723 spin_unlock(&clone_root->root_item_lock);
5724 srcu_read_unlock(&fs_info->subvol_srcu, index);
5726 sctx->clone_roots[i].root = clone_root;
5728 vfree(clone_sources_tmp);
5729 clone_sources_tmp = NULL;
5732 if (arg->parent_root) {
5733 key.objectid = arg->parent_root;
5734 key.type = BTRFS_ROOT_ITEM_KEY;
5735 key.offset = (u64)-1;
5737 index = srcu_read_lock(&fs_info->subvol_srcu);
5739 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
5740 if (IS_ERR(sctx->parent_root)) {
5741 srcu_read_unlock(&fs_info->subvol_srcu, index);
5742 ret = PTR_ERR(sctx->parent_root);
5746 spin_lock(&sctx->parent_root->root_item_lock);
5747 sctx->parent_root->send_in_progress++;
5748 if (!btrfs_root_readonly(sctx->parent_root) ||
5749 btrfs_root_dead(sctx->parent_root)) {
5750 spin_unlock(&sctx->parent_root->root_item_lock);
5751 srcu_read_unlock(&fs_info->subvol_srcu, index);
5755 spin_unlock(&sctx->parent_root->root_item_lock);
5757 srcu_read_unlock(&fs_info->subvol_srcu, index);
5761 * Clones from send_root are allowed, but only if the clone source
5762 * is behind the current send position. This is checked while searching
5763 * for possible clone sources.
5765 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
5767 /* We do a bsearch later */
5768 sort(sctx->clone_roots, sctx->clone_roots_cnt,
5769 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
5771 sort_clone_roots = 1;
5773 ret = ensure_commit_roots_uptodate(sctx);
5777 current->journal_info = BTRFS_SEND_TRANS_STUB;
5778 ret = send_subvol(sctx);
5779 current->journal_info = NULL;
5783 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
5784 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
5787 ret = send_cmd(sctx);
5793 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
5794 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
5796 struct pending_dir_move *pm;
5798 n = rb_first(&sctx->pending_dir_moves);
5799 pm = rb_entry(n, struct pending_dir_move, node);
5800 while (!list_empty(&pm->list)) {
5801 struct pending_dir_move *pm2;
5803 pm2 = list_first_entry(&pm->list,
5804 struct pending_dir_move, list);
5805 free_pending_move(sctx, pm2);
5807 free_pending_move(sctx, pm);
5810 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
5811 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
5813 struct waiting_dir_move *dm;
5815 n = rb_first(&sctx->waiting_dir_moves);
5816 dm = rb_entry(n, struct waiting_dir_move, node);
5817 rb_erase(&dm->node, &sctx->waiting_dir_moves);
5821 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
5822 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
5824 struct orphan_dir_info *odi;
5826 n = rb_first(&sctx->orphan_dirs);
5827 odi = rb_entry(n, struct orphan_dir_info, node);
5828 free_orphan_dir_info(sctx, odi);
5831 if (sort_clone_roots) {
5832 for (i = 0; i < sctx->clone_roots_cnt; i++)
5833 btrfs_root_dec_send_in_progress(
5834 sctx->clone_roots[i].root);
5836 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
5837 btrfs_root_dec_send_in_progress(
5838 sctx->clone_roots[i].root);
5840 btrfs_root_dec_send_in_progress(send_root);
5842 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
5843 btrfs_root_dec_send_in_progress(sctx->parent_root);
5846 vfree(clone_sources_tmp);
5849 if (sctx->send_filp)
5850 fput(sctx->send_filp);
5852 vfree(sctx->clone_roots);
5853 vfree(sctx->send_buf);
5854 vfree(sctx->read_buf);
5856 name_cache_free(sctx);