2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
42 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
45 struct btrfs_path *btrfs_alloc_path(void)
47 struct btrfs_path *path;
48 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
59 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 if (!p->nodes[i] || !p->locks[i])
62 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 if (p->locks[i] == BTRFS_READ_LOCK)
64 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
71 * reset all the locked nodes in the patch to spinning locks.
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 struct extent_buffer *held, int held_rw)
83 #ifdef CONFIG_DEBUG_LOCK_ALLOC
84 /* lockdep really cares that we take all of these spinlocks
85 * in the right order. If any of the locks in the path are not
86 * currently blocking, it is going to complain. So, make really
87 * really sure by forcing the path to blocking before we clear
91 btrfs_set_lock_blocking_rw(held, held_rw);
92 if (held_rw == BTRFS_WRITE_LOCK)
93 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
94 else if (held_rw == BTRFS_READ_LOCK)
95 held_rw = BTRFS_READ_LOCK_BLOCKING;
97 btrfs_set_path_blocking(p);
100 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
101 if (p->nodes[i] && p->locks[i]) {
102 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
103 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
104 p->locks[i] = BTRFS_WRITE_LOCK;
105 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
106 p->locks[i] = BTRFS_READ_LOCK;
110 #ifdef CONFIG_DEBUG_LOCK_ALLOC
112 btrfs_clear_lock_blocking_rw(held, held_rw);
116 /* this also releases the path */
117 void btrfs_free_path(struct btrfs_path *p)
121 btrfs_release_path(p);
122 kmem_cache_free(btrfs_path_cachep, p);
126 * path release drops references on the extent buffers in the path
127 * and it drops any locks held by this path
129 * It is safe to call this on paths that no locks or extent buffers held.
131 noinline void btrfs_release_path(struct btrfs_path *p)
135 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
140 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
143 free_extent_buffer(p->nodes[i]);
149 * safely gets a reference on the root node of a tree. A lock
150 * is not taken, so a concurrent writer may put a different node
151 * at the root of the tree. See btrfs_lock_root_node for the
154 * The extent buffer returned by this has a reference taken, so
155 * it won't disappear. It may stop being the root of the tree
156 * at any time because there are no locks held.
158 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
160 struct extent_buffer *eb;
164 eb = rcu_dereference(root->node);
167 * RCU really hurts here, we could free up the root node because
168 * it was cow'ed but we may not get the new root node yet so do
169 * the inc_not_zero dance and if it doesn't work then
170 * synchronize_rcu and try again.
172 if (atomic_inc_not_zero(&eb->refs)) {
182 /* loop around taking references on and locking the root node of the
183 * tree until you end up with a lock on the root. A locked buffer
184 * is returned, with a reference held.
186 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
188 struct extent_buffer *eb;
191 eb = btrfs_root_node(root);
193 if (eb == root->node)
195 btrfs_tree_unlock(eb);
196 free_extent_buffer(eb);
201 /* loop around taking references on and locking the root node of the
202 * tree until you end up with a lock on the root. A locked buffer
203 * is returned, with a reference held.
205 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
207 struct extent_buffer *eb;
210 eb = btrfs_root_node(root);
211 btrfs_tree_read_lock(eb);
212 if (eb == root->node)
214 btrfs_tree_read_unlock(eb);
215 free_extent_buffer(eb);
220 /* cowonly root (everything not a reference counted cow subvolume), just get
221 * put onto a simple dirty list. transaction.c walks this to make sure they
222 * get properly updated on disk.
224 static void add_root_to_dirty_list(struct btrfs_root *root)
226 spin_lock(&root->fs_info->trans_lock);
227 if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) &&
228 list_empty(&root->dirty_list)) {
229 list_add(&root->dirty_list,
230 &root->fs_info->dirty_cowonly_roots);
232 spin_unlock(&root->fs_info->trans_lock);
236 * used by snapshot creation to make a copy of a root for a tree with
237 * a given objectid. The buffer with the new root node is returned in
238 * cow_ret, and this func returns zero on success or a negative error code.
240 int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 struct btrfs_root *root,
242 struct extent_buffer *buf,
243 struct extent_buffer **cow_ret, u64 new_root_objectid)
245 struct extent_buffer *cow;
248 struct btrfs_disk_key disk_key;
250 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
251 trans->transid != root->fs_info->running_transaction->transid);
252 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
253 trans->transid != root->last_trans);
255 level = btrfs_header_level(buf);
257 btrfs_item_key(buf, &disk_key, 0);
259 btrfs_node_key(buf, &disk_key, 0);
261 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
262 &disk_key, level, buf->start, 0);
266 copy_extent_buffer(cow, buf, 0, 0, cow->len);
267 btrfs_set_header_bytenr(cow, cow->start);
268 btrfs_set_header_generation(cow, trans->transid);
269 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
270 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
271 BTRFS_HEADER_FLAG_RELOC);
272 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
273 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
275 btrfs_set_header_owner(cow, new_root_objectid);
277 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
280 WARN_ON(btrfs_header_generation(buf) > trans->transid);
281 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
282 ret = btrfs_inc_ref(trans, root, cow, 1);
284 ret = btrfs_inc_ref(trans, root, cow, 0);
289 btrfs_mark_buffer_dirty(cow);
298 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
299 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
301 MOD_LOG_ROOT_REPLACE,
304 struct tree_mod_move {
309 struct tree_mod_root {
314 struct tree_mod_elem {
316 u64 index; /* shifted logical */
320 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
323 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
326 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
327 struct btrfs_disk_key key;
330 /* this is used for op == MOD_LOG_MOVE_KEYS */
331 struct tree_mod_move move;
333 /* this is used for op == MOD_LOG_ROOT_REPLACE */
334 struct tree_mod_root old_root;
337 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
339 read_lock(&fs_info->tree_mod_log_lock);
342 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
344 read_unlock(&fs_info->tree_mod_log_lock);
347 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
349 write_lock(&fs_info->tree_mod_log_lock);
352 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
354 write_unlock(&fs_info->tree_mod_log_lock);
358 * Pull a new tree mod seq number for our operation.
360 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
362 return atomic64_inc_return(&fs_info->tree_mod_seq);
366 * This adds a new blocker to the tree mod log's blocker list if the @elem
367 * passed does not already have a sequence number set. So when a caller expects
368 * to record tree modifications, it should ensure to set elem->seq to zero
369 * before calling btrfs_get_tree_mod_seq.
370 * Returns a fresh, unused tree log modification sequence number, even if no new
373 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
374 struct seq_list *elem)
376 tree_mod_log_write_lock(fs_info);
377 spin_lock(&fs_info->tree_mod_seq_lock);
379 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
380 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
382 spin_unlock(&fs_info->tree_mod_seq_lock);
383 tree_mod_log_write_unlock(fs_info);
388 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
389 struct seq_list *elem)
391 struct rb_root *tm_root;
392 struct rb_node *node;
393 struct rb_node *next;
394 struct seq_list *cur_elem;
395 struct tree_mod_elem *tm;
396 u64 min_seq = (u64)-1;
397 u64 seq_putting = elem->seq;
402 spin_lock(&fs_info->tree_mod_seq_lock);
403 list_del(&elem->list);
406 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
407 if (cur_elem->seq < min_seq) {
408 if (seq_putting > cur_elem->seq) {
410 * blocker with lower sequence number exists, we
411 * cannot remove anything from the log
413 spin_unlock(&fs_info->tree_mod_seq_lock);
416 min_seq = cur_elem->seq;
419 spin_unlock(&fs_info->tree_mod_seq_lock);
422 * anything that's lower than the lowest existing (read: blocked)
423 * sequence number can be removed from the tree.
425 tree_mod_log_write_lock(fs_info);
426 tm_root = &fs_info->tree_mod_log;
427 for (node = rb_first(tm_root); node; node = next) {
428 next = rb_next(node);
429 tm = container_of(node, struct tree_mod_elem, node);
430 if (tm->seq > min_seq)
432 rb_erase(node, tm_root);
435 tree_mod_log_write_unlock(fs_info);
439 * key order of the log:
442 * the index is the shifted logical of the *new* root node for root replace
443 * operations, or the shifted logical of the affected block for all other
446 * Note: must be called with write lock (tree_mod_log_write_lock).
449 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
451 struct rb_root *tm_root;
452 struct rb_node **new;
453 struct rb_node *parent = NULL;
454 struct tree_mod_elem *cur;
458 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
460 tm_root = &fs_info->tree_mod_log;
461 new = &tm_root->rb_node;
463 cur = container_of(*new, struct tree_mod_elem, node);
465 if (cur->index < tm->index)
466 new = &((*new)->rb_left);
467 else if (cur->index > tm->index)
468 new = &((*new)->rb_right);
469 else if (cur->seq < tm->seq)
470 new = &((*new)->rb_left);
471 else if (cur->seq > tm->seq)
472 new = &((*new)->rb_right);
477 rb_link_node(&tm->node, parent, new);
478 rb_insert_color(&tm->node, tm_root);
483 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
484 * returns zero with the tree_mod_log_lock acquired. The caller must hold
485 * this until all tree mod log insertions are recorded in the rb tree and then
486 * call tree_mod_log_write_unlock() to release.
488 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
489 struct extent_buffer *eb) {
491 if (list_empty(&(fs_info)->tree_mod_seq_list))
493 if (eb && btrfs_header_level(eb) == 0)
496 tree_mod_log_write_lock(fs_info);
497 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
498 tree_mod_log_write_unlock(fs_info);
505 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
506 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
507 struct extent_buffer *eb)
510 if (list_empty(&(fs_info)->tree_mod_seq_list))
512 if (eb && btrfs_header_level(eb) == 0)
518 static struct tree_mod_elem *
519 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
520 enum mod_log_op op, gfp_t flags)
522 struct tree_mod_elem *tm;
524 tm = kzalloc(sizeof(*tm), flags);
528 tm->index = eb->start >> PAGE_CACHE_SHIFT;
529 if (op != MOD_LOG_KEY_ADD) {
530 btrfs_node_key(eb, &tm->key, slot);
531 tm->blockptr = btrfs_node_blockptr(eb, slot);
535 tm->generation = btrfs_node_ptr_generation(eb, slot);
536 RB_CLEAR_NODE(&tm->node);
542 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
543 struct extent_buffer *eb, int slot,
544 enum mod_log_op op, gfp_t flags)
546 struct tree_mod_elem *tm;
549 if (!tree_mod_need_log(fs_info, eb))
552 tm = alloc_tree_mod_elem(eb, slot, op, flags);
556 if (tree_mod_dont_log(fs_info, eb)) {
561 ret = __tree_mod_log_insert(fs_info, tm);
562 tree_mod_log_write_unlock(fs_info);
570 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
571 struct extent_buffer *eb, int dst_slot, int src_slot,
572 int nr_items, gfp_t flags)
574 struct tree_mod_elem *tm = NULL;
575 struct tree_mod_elem **tm_list = NULL;
580 if (!tree_mod_need_log(fs_info, eb))
583 tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
587 tm = kzalloc(sizeof(*tm), flags);
593 tm->index = eb->start >> PAGE_CACHE_SHIFT;
595 tm->move.dst_slot = dst_slot;
596 tm->move.nr_items = nr_items;
597 tm->op = MOD_LOG_MOVE_KEYS;
599 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
600 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
601 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
608 if (tree_mod_dont_log(fs_info, eb))
613 * When we override something during the move, we log these removals.
614 * This can only happen when we move towards the beginning of the
615 * buffer, i.e. dst_slot < src_slot.
617 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
618 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
623 ret = __tree_mod_log_insert(fs_info, tm);
626 tree_mod_log_write_unlock(fs_info);
631 for (i = 0; i < nr_items; i++) {
632 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
633 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
637 tree_mod_log_write_unlock(fs_info);
645 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
646 struct tree_mod_elem **tm_list,
652 for (i = nritems - 1; i >= 0; i--) {
653 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
655 for (j = nritems - 1; j > i; j--)
656 rb_erase(&tm_list[j]->node,
657 &fs_info->tree_mod_log);
666 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
667 struct extent_buffer *old_root,
668 struct extent_buffer *new_root, gfp_t flags,
671 struct tree_mod_elem *tm = NULL;
672 struct tree_mod_elem **tm_list = NULL;
677 if (!tree_mod_need_log(fs_info, NULL))
680 if (log_removal && btrfs_header_level(old_root) > 0) {
681 nritems = btrfs_header_nritems(old_root);
682 tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
688 for (i = 0; i < nritems; i++) {
689 tm_list[i] = alloc_tree_mod_elem(old_root, i,
690 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
698 tm = kzalloc(sizeof(*tm), flags);
704 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
705 tm->old_root.logical = old_root->start;
706 tm->old_root.level = btrfs_header_level(old_root);
707 tm->generation = btrfs_header_generation(old_root);
708 tm->op = MOD_LOG_ROOT_REPLACE;
710 if (tree_mod_dont_log(fs_info, NULL))
714 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
716 ret = __tree_mod_log_insert(fs_info, tm);
718 tree_mod_log_write_unlock(fs_info);
727 for (i = 0; i < nritems; i++)
736 static struct tree_mod_elem *
737 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
740 struct rb_root *tm_root;
741 struct rb_node *node;
742 struct tree_mod_elem *cur = NULL;
743 struct tree_mod_elem *found = NULL;
744 u64 index = start >> PAGE_CACHE_SHIFT;
746 tree_mod_log_read_lock(fs_info);
747 tm_root = &fs_info->tree_mod_log;
748 node = tm_root->rb_node;
750 cur = container_of(node, struct tree_mod_elem, node);
751 if (cur->index < index) {
752 node = node->rb_left;
753 } else if (cur->index > index) {
754 node = node->rb_right;
755 } else if (cur->seq < min_seq) {
756 node = node->rb_left;
757 } else if (!smallest) {
758 /* we want the node with the highest seq */
760 BUG_ON(found->seq > cur->seq);
762 node = node->rb_left;
763 } else if (cur->seq > min_seq) {
764 /* we want the node with the smallest seq */
766 BUG_ON(found->seq < cur->seq);
768 node = node->rb_right;
774 tree_mod_log_read_unlock(fs_info);
780 * this returns the element from the log with the smallest time sequence
781 * value that's in the log (the oldest log item). any element with a time
782 * sequence lower than min_seq will be ignored.
784 static struct tree_mod_elem *
785 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
788 return __tree_mod_log_search(fs_info, start, min_seq, 1);
792 * this returns the element from the log with the largest time sequence
793 * value that's in the log (the most recent log item). any element with
794 * a time sequence lower than min_seq will be ignored.
796 static struct tree_mod_elem *
797 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
799 return __tree_mod_log_search(fs_info, start, min_seq, 0);
803 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
804 struct extent_buffer *src, unsigned long dst_offset,
805 unsigned long src_offset, int nr_items)
808 struct tree_mod_elem **tm_list = NULL;
809 struct tree_mod_elem **tm_list_add, **tm_list_rem;
813 if (!tree_mod_need_log(fs_info, NULL))
816 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
819 tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
824 tm_list_add = tm_list;
825 tm_list_rem = tm_list + nr_items;
826 for (i = 0; i < nr_items; i++) {
827 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
828 MOD_LOG_KEY_REMOVE, GFP_NOFS);
829 if (!tm_list_rem[i]) {
834 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
835 MOD_LOG_KEY_ADD, GFP_NOFS);
836 if (!tm_list_add[i]) {
842 if (tree_mod_dont_log(fs_info, NULL))
846 for (i = 0; i < nr_items; i++) {
847 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
850 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
855 tree_mod_log_write_unlock(fs_info);
861 for (i = 0; i < nr_items * 2; i++) {
862 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
863 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
867 tree_mod_log_write_unlock(fs_info);
874 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
875 int dst_offset, int src_offset, int nr_items)
878 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
884 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
885 struct extent_buffer *eb, int slot, int atomic)
889 ret = tree_mod_log_insert_key(fs_info, eb, slot,
891 atomic ? GFP_ATOMIC : GFP_NOFS);
896 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
898 struct tree_mod_elem **tm_list = NULL;
903 if (btrfs_header_level(eb) == 0)
906 if (!tree_mod_need_log(fs_info, NULL))
909 nritems = btrfs_header_nritems(eb);
910 tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
915 for (i = 0; i < nritems; i++) {
916 tm_list[i] = alloc_tree_mod_elem(eb, i,
917 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
924 if (tree_mod_dont_log(fs_info, eb))
927 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
928 tree_mod_log_write_unlock(fs_info);
936 for (i = 0; i < nritems; i++)
944 tree_mod_log_set_root_pointer(struct btrfs_root *root,
945 struct extent_buffer *new_root_node,
949 ret = tree_mod_log_insert_root(root->fs_info, root->node,
950 new_root_node, GFP_NOFS, log_removal);
955 * check if the tree block can be shared by multiple trees
957 int btrfs_block_can_be_shared(struct btrfs_root *root,
958 struct extent_buffer *buf)
961 * Tree blocks not in refernece counted trees and tree roots
962 * are never shared. If a block was allocated after the last
963 * snapshot and the block was not allocated by tree relocation,
964 * we know the block is not shared.
966 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
967 buf != root->node && buf != root->commit_root &&
968 (btrfs_header_generation(buf) <=
969 btrfs_root_last_snapshot(&root->root_item) ||
970 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
972 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
973 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
974 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
980 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
981 struct btrfs_root *root,
982 struct extent_buffer *buf,
983 struct extent_buffer *cow,
993 * Backrefs update rules:
995 * Always use full backrefs for extent pointers in tree block
996 * allocated by tree relocation.
998 * If a shared tree block is no longer referenced by its owner
999 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
1000 * use full backrefs for extent pointers in tree block.
1002 * If a tree block is been relocating
1003 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1004 * use full backrefs for extent pointers in tree block.
1005 * The reason for this is some operations (such as drop tree)
1006 * are only allowed for blocks use full backrefs.
1009 if (btrfs_block_can_be_shared(root, buf)) {
1010 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1011 btrfs_header_level(buf), 1,
1017 btrfs_std_error(root->fs_info, ret);
1022 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1023 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1024 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1029 owner = btrfs_header_owner(buf);
1030 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1031 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1034 if ((owner == root->root_key.objectid ||
1035 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1036 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1037 ret = btrfs_inc_ref(trans, root, buf, 1);
1038 BUG_ON(ret); /* -ENOMEM */
1040 if (root->root_key.objectid ==
1041 BTRFS_TREE_RELOC_OBJECTID) {
1042 ret = btrfs_dec_ref(trans, root, buf, 0);
1043 BUG_ON(ret); /* -ENOMEM */
1044 ret = btrfs_inc_ref(trans, root, cow, 1);
1045 BUG_ON(ret); /* -ENOMEM */
1047 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1050 if (root->root_key.objectid ==
1051 BTRFS_TREE_RELOC_OBJECTID)
1052 ret = btrfs_inc_ref(trans, root, cow, 1);
1054 ret = btrfs_inc_ref(trans, root, cow, 0);
1055 BUG_ON(ret); /* -ENOMEM */
1057 if (new_flags != 0) {
1058 int level = btrfs_header_level(buf);
1060 ret = btrfs_set_disk_extent_flags(trans, root,
1063 new_flags, level, 0);
1068 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1069 if (root->root_key.objectid ==
1070 BTRFS_TREE_RELOC_OBJECTID)
1071 ret = btrfs_inc_ref(trans, root, cow, 1);
1073 ret = btrfs_inc_ref(trans, root, cow, 0);
1074 BUG_ON(ret); /* -ENOMEM */
1075 ret = btrfs_dec_ref(trans, root, buf, 1);
1076 BUG_ON(ret); /* -ENOMEM */
1078 clean_tree_block(trans, root, buf);
1085 * does the dirty work in cow of a single block. The parent block (if
1086 * supplied) is updated to point to the new cow copy. The new buffer is marked
1087 * dirty and returned locked. If you modify the block it needs to be marked
1090 * search_start -- an allocation hint for the new block
1092 * empty_size -- a hint that you plan on doing more cow. This is the size in
1093 * bytes the allocator should try to find free next to the block it returns.
1094 * This is just a hint and may be ignored by the allocator.
1096 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1097 struct btrfs_root *root,
1098 struct extent_buffer *buf,
1099 struct extent_buffer *parent, int parent_slot,
1100 struct extent_buffer **cow_ret,
1101 u64 search_start, u64 empty_size)
1103 struct btrfs_disk_key disk_key;
1104 struct extent_buffer *cow;
1107 int unlock_orig = 0;
1110 if (*cow_ret == buf)
1113 btrfs_assert_tree_locked(buf);
1115 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1116 trans->transid != root->fs_info->running_transaction->transid);
1117 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1118 trans->transid != root->last_trans);
1120 level = btrfs_header_level(buf);
1123 btrfs_item_key(buf, &disk_key, 0);
1125 btrfs_node_key(buf, &disk_key, 0);
1127 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1129 parent_start = parent->start;
1135 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1136 root->root_key.objectid, &disk_key, level,
1137 search_start, empty_size);
1139 return PTR_ERR(cow);
1141 /* cow is set to blocking by btrfs_init_new_buffer */
1143 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1144 btrfs_set_header_bytenr(cow, cow->start);
1145 btrfs_set_header_generation(cow, trans->transid);
1146 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1147 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1148 BTRFS_HEADER_FLAG_RELOC);
1149 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1150 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1152 btrfs_set_header_owner(cow, root->root_key.objectid);
1154 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1157 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1159 btrfs_abort_transaction(trans, root, ret);
1163 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1164 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1169 if (buf == root->node) {
1170 WARN_ON(parent && parent != buf);
1171 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1172 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1173 parent_start = buf->start;
1177 extent_buffer_get(cow);
1178 tree_mod_log_set_root_pointer(root, cow, 1);
1179 rcu_assign_pointer(root->node, cow);
1181 btrfs_free_tree_block(trans, root, buf, parent_start,
1183 free_extent_buffer(buf);
1184 add_root_to_dirty_list(root);
1186 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1187 parent_start = parent->start;
1191 WARN_ON(trans->transid != btrfs_header_generation(parent));
1192 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1193 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1194 btrfs_set_node_blockptr(parent, parent_slot,
1196 btrfs_set_node_ptr_generation(parent, parent_slot,
1198 btrfs_mark_buffer_dirty(parent);
1200 ret = tree_mod_log_free_eb(root->fs_info, buf);
1202 btrfs_abort_transaction(trans, root, ret);
1206 btrfs_free_tree_block(trans, root, buf, parent_start,
1210 btrfs_tree_unlock(buf);
1211 free_extent_buffer_stale(buf);
1212 btrfs_mark_buffer_dirty(cow);
1218 * returns the logical address of the oldest predecessor of the given root.
1219 * entries older than time_seq are ignored.
1221 static struct tree_mod_elem *
1222 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1223 struct extent_buffer *eb_root, u64 time_seq)
1225 struct tree_mod_elem *tm;
1226 struct tree_mod_elem *found = NULL;
1227 u64 root_logical = eb_root->start;
1234 * the very last operation that's logged for a root is the replacement
1235 * operation (if it is replaced at all). this has the index of the *new*
1236 * root, making it the very first operation that's logged for this root.
1239 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1244 * if there are no tree operation for the oldest root, we simply
1245 * return it. this should only happen if that (old) root is at
1252 * if there's an operation that's not a root replacement, we
1253 * found the oldest version of our root. normally, we'll find a
1254 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1256 if (tm->op != MOD_LOG_ROOT_REPLACE)
1260 root_logical = tm->old_root.logical;
1264 /* if there's no old root to return, return what we found instead */
1272 * tm is a pointer to the first operation to rewind within eb. then, all
1273 * previous operations will be rewinded (until we reach something older than
1277 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1278 u64 time_seq, struct tree_mod_elem *first_tm)
1281 struct rb_node *next;
1282 struct tree_mod_elem *tm = first_tm;
1283 unsigned long o_dst;
1284 unsigned long o_src;
1285 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1287 n = btrfs_header_nritems(eb);
1288 tree_mod_log_read_lock(fs_info);
1289 while (tm && tm->seq >= time_seq) {
1291 * all the operations are recorded with the operator used for
1292 * the modification. as we're going backwards, we do the
1293 * opposite of each operation here.
1296 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1297 BUG_ON(tm->slot < n);
1299 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1300 case MOD_LOG_KEY_REMOVE:
1301 btrfs_set_node_key(eb, &tm->key, tm->slot);
1302 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1303 btrfs_set_node_ptr_generation(eb, tm->slot,
1307 case MOD_LOG_KEY_REPLACE:
1308 BUG_ON(tm->slot >= n);
1309 btrfs_set_node_key(eb, &tm->key, tm->slot);
1310 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1311 btrfs_set_node_ptr_generation(eb, tm->slot,
1314 case MOD_LOG_KEY_ADD:
1315 /* if a move operation is needed it's in the log */
1318 case MOD_LOG_MOVE_KEYS:
1319 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1320 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1321 memmove_extent_buffer(eb, o_dst, o_src,
1322 tm->move.nr_items * p_size);
1324 case MOD_LOG_ROOT_REPLACE:
1326 * this operation is special. for roots, this must be
1327 * handled explicitly before rewinding.
1328 * for non-roots, this operation may exist if the node
1329 * was a root: root A -> child B; then A gets empty and
1330 * B is promoted to the new root. in the mod log, we'll
1331 * have a root-replace operation for B, a tree block
1332 * that is no root. we simply ignore that operation.
1336 next = rb_next(&tm->node);
1339 tm = container_of(next, struct tree_mod_elem, node);
1340 if (tm->index != first_tm->index)
1343 tree_mod_log_read_unlock(fs_info);
1344 btrfs_set_header_nritems(eb, n);
1348 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1349 * is returned. If rewind operations happen, a fresh buffer is returned. The
1350 * returned buffer is always read-locked. If the returned buffer is not the
1351 * input buffer, the lock on the input buffer is released and the input buffer
1352 * is freed (its refcount is decremented).
1354 static struct extent_buffer *
1355 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1356 struct extent_buffer *eb, u64 time_seq)
1358 struct extent_buffer *eb_rewin;
1359 struct tree_mod_elem *tm;
1364 if (btrfs_header_level(eb) == 0)
1367 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1371 btrfs_set_path_blocking(path);
1372 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1374 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1375 BUG_ON(tm->slot != 0);
1376 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1377 fs_info->tree_root->nodesize);
1379 btrfs_tree_read_unlock_blocking(eb);
1380 free_extent_buffer(eb);
1383 btrfs_set_header_bytenr(eb_rewin, eb->start);
1384 btrfs_set_header_backref_rev(eb_rewin,
1385 btrfs_header_backref_rev(eb));
1386 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1387 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1389 eb_rewin = btrfs_clone_extent_buffer(eb);
1391 btrfs_tree_read_unlock_blocking(eb);
1392 free_extent_buffer(eb);
1397 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1398 btrfs_tree_read_unlock_blocking(eb);
1399 free_extent_buffer(eb);
1401 extent_buffer_get(eb_rewin);
1402 btrfs_tree_read_lock(eb_rewin);
1403 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1404 WARN_ON(btrfs_header_nritems(eb_rewin) >
1405 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1411 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1412 * value. If there are no changes, the current root->root_node is returned. If
1413 * anything changed in between, there's a fresh buffer allocated on which the
1414 * rewind operations are done. In any case, the returned buffer is read locked.
1415 * Returns NULL on error (with no locks held).
1417 static inline struct extent_buffer *
1418 get_old_root(struct btrfs_root *root, u64 time_seq)
1420 struct tree_mod_elem *tm;
1421 struct extent_buffer *eb = NULL;
1422 struct extent_buffer *eb_root;
1423 struct extent_buffer *old;
1424 struct tree_mod_root *old_root = NULL;
1425 u64 old_generation = 0;
1428 eb_root = btrfs_read_lock_root_node(root);
1429 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1433 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1434 old_root = &tm->old_root;
1435 old_generation = tm->generation;
1436 logical = old_root->logical;
1438 logical = eb_root->start;
1441 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1442 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1443 btrfs_tree_read_unlock(eb_root);
1444 free_extent_buffer(eb_root);
1445 old = read_tree_block(root, logical, 0);
1446 if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
1447 free_extent_buffer(old);
1448 btrfs_warn(root->fs_info,
1449 "failed to read tree block %llu from get_old_root", logical);
1451 eb = btrfs_clone_extent_buffer(old);
1452 free_extent_buffer(old);
1454 } else if (old_root) {
1455 btrfs_tree_read_unlock(eb_root);
1456 free_extent_buffer(eb_root);
1457 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1459 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1460 eb = btrfs_clone_extent_buffer(eb_root);
1461 btrfs_tree_read_unlock_blocking(eb_root);
1462 free_extent_buffer(eb_root);
1467 extent_buffer_get(eb);
1468 btrfs_tree_read_lock(eb);
1470 btrfs_set_header_bytenr(eb, eb->start);
1471 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1472 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1473 btrfs_set_header_level(eb, old_root->level);
1474 btrfs_set_header_generation(eb, old_generation);
1477 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1479 WARN_ON(btrfs_header_level(eb) != 0);
1480 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1485 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1487 struct tree_mod_elem *tm;
1489 struct extent_buffer *eb_root = btrfs_root_node(root);
1491 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1492 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1493 level = tm->old_root.level;
1495 level = btrfs_header_level(eb_root);
1497 free_extent_buffer(eb_root);
1502 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1503 struct btrfs_root *root,
1504 struct extent_buffer *buf)
1506 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1507 if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
1510 /* ensure we can see the force_cow */
1514 * We do not need to cow a block if
1515 * 1) this block is not created or changed in this transaction;
1516 * 2) this block does not belong to TREE_RELOC tree;
1517 * 3) the root is not forced COW.
1519 * What is forced COW:
1520 * when we create snapshot during commiting the transaction,
1521 * after we've finished coping src root, we must COW the shared
1522 * block to ensure the metadata consistency.
1524 if (btrfs_header_generation(buf) == trans->transid &&
1525 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1526 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1527 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1528 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1534 * cows a single block, see __btrfs_cow_block for the real work.
1535 * This version of it has extra checks so that a block isn't cow'd more than
1536 * once per transaction, as long as it hasn't been written yet
1538 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1539 struct btrfs_root *root, struct extent_buffer *buf,
1540 struct extent_buffer *parent, int parent_slot,
1541 struct extent_buffer **cow_ret)
1546 if (trans->transaction != root->fs_info->running_transaction)
1547 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1549 root->fs_info->running_transaction->transid);
1551 if (trans->transid != root->fs_info->generation)
1552 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1553 trans->transid, root->fs_info->generation);
1555 if (!should_cow_block(trans, root, buf)) {
1560 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1563 btrfs_set_lock_blocking(parent);
1564 btrfs_set_lock_blocking(buf);
1566 ret = __btrfs_cow_block(trans, root, buf, parent,
1567 parent_slot, cow_ret, search_start, 0);
1569 trace_btrfs_cow_block(root, buf, *cow_ret);
1575 * helper function for defrag to decide if two blocks pointed to by a
1576 * node are actually close by
1578 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1580 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1582 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1588 * compare two keys in a memcmp fashion
1590 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1592 struct btrfs_key k1;
1594 btrfs_disk_key_to_cpu(&k1, disk);
1596 return btrfs_comp_cpu_keys(&k1, k2);
1600 * same as comp_keys only with two btrfs_key's
1602 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1604 if (k1->objectid > k2->objectid)
1606 if (k1->objectid < k2->objectid)
1608 if (k1->type > k2->type)
1610 if (k1->type < k2->type)
1612 if (k1->offset > k2->offset)
1614 if (k1->offset < k2->offset)
1620 * this is used by the defrag code to go through all the
1621 * leaves pointed to by a node and reallocate them so that
1622 * disk order is close to key order
1624 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1625 struct btrfs_root *root, struct extent_buffer *parent,
1626 int start_slot, u64 *last_ret,
1627 struct btrfs_key *progress)
1629 struct extent_buffer *cur;
1632 u64 search_start = *last_ret;
1642 int progress_passed = 0;
1643 struct btrfs_disk_key disk_key;
1645 parent_level = btrfs_header_level(parent);
1647 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1648 WARN_ON(trans->transid != root->fs_info->generation);
1650 parent_nritems = btrfs_header_nritems(parent);
1651 blocksize = root->nodesize;
1652 end_slot = parent_nritems;
1654 if (parent_nritems == 1)
1657 btrfs_set_lock_blocking(parent);
1659 for (i = start_slot; i < end_slot; i++) {
1662 btrfs_node_key(parent, &disk_key, i);
1663 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1666 progress_passed = 1;
1667 blocknr = btrfs_node_blockptr(parent, i);
1668 gen = btrfs_node_ptr_generation(parent, i);
1669 if (last_block == 0)
1670 last_block = blocknr;
1673 other = btrfs_node_blockptr(parent, i - 1);
1674 close = close_blocks(blocknr, other, blocksize);
1676 if (!close && i < end_slot - 2) {
1677 other = btrfs_node_blockptr(parent, i + 1);
1678 close = close_blocks(blocknr, other, blocksize);
1681 last_block = blocknr;
1685 cur = btrfs_find_tree_block(root, blocknr);
1687 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1690 if (!cur || !uptodate) {
1692 cur = read_tree_block(root, blocknr, gen);
1693 if (!cur || !extent_buffer_uptodate(cur)) {
1694 free_extent_buffer(cur);
1697 } else if (!uptodate) {
1698 err = btrfs_read_buffer(cur, gen);
1700 free_extent_buffer(cur);
1705 if (search_start == 0)
1706 search_start = last_block;
1708 btrfs_tree_lock(cur);
1709 btrfs_set_lock_blocking(cur);
1710 err = __btrfs_cow_block(trans, root, cur, parent, i,
1713 (end_slot - i) * blocksize));
1715 btrfs_tree_unlock(cur);
1716 free_extent_buffer(cur);
1719 search_start = cur->start;
1720 last_block = cur->start;
1721 *last_ret = search_start;
1722 btrfs_tree_unlock(cur);
1723 free_extent_buffer(cur);
1729 * The leaf data grows from end-to-front in the node.
1730 * this returns the address of the start of the last item,
1731 * which is the stop of the leaf data stack
1733 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1734 struct extent_buffer *leaf)
1736 u32 nr = btrfs_header_nritems(leaf);
1738 return BTRFS_LEAF_DATA_SIZE(root);
1739 return btrfs_item_offset_nr(leaf, nr - 1);
1744 * search for key in the extent_buffer. The items start at offset p,
1745 * and they are item_size apart. There are 'max' items in p.
1747 * the slot in the array is returned via slot, and it points to
1748 * the place where you would insert key if it is not found in
1751 * slot may point to max if the key is bigger than all of the keys
1753 static noinline int generic_bin_search(struct extent_buffer *eb,
1755 int item_size, struct btrfs_key *key,
1762 struct btrfs_disk_key *tmp = NULL;
1763 struct btrfs_disk_key unaligned;
1764 unsigned long offset;
1766 unsigned long map_start = 0;
1767 unsigned long map_len = 0;
1770 while (low < high) {
1771 mid = (low + high) / 2;
1772 offset = p + mid * item_size;
1774 if (!kaddr || offset < map_start ||
1775 (offset + sizeof(struct btrfs_disk_key)) >
1776 map_start + map_len) {
1778 err = map_private_extent_buffer(eb, offset,
1779 sizeof(struct btrfs_disk_key),
1780 &kaddr, &map_start, &map_len);
1783 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1786 read_extent_buffer(eb, &unaligned,
1787 offset, sizeof(unaligned));
1792 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1795 ret = comp_keys(tmp, key);
1811 * simple bin_search frontend that does the right thing for
1814 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1815 int level, int *slot)
1818 return generic_bin_search(eb,
1819 offsetof(struct btrfs_leaf, items),
1820 sizeof(struct btrfs_item),
1821 key, btrfs_header_nritems(eb),
1824 return generic_bin_search(eb,
1825 offsetof(struct btrfs_node, ptrs),
1826 sizeof(struct btrfs_key_ptr),
1827 key, btrfs_header_nritems(eb),
1831 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1832 int level, int *slot)
1834 return bin_search(eb, key, level, slot);
1837 static void root_add_used(struct btrfs_root *root, u32 size)
1839 spin_lock(&root->accounting_lock);
1840 btrfs_set_root_used(&root->root_item,
1841 btrfs_root_used(&root->root_item) + size);
1842 spin_unlock(&root->accounting_lock);
1845 static void root_sub_used(struct btrfs_root *root, u32 size)
1847 spin_lock(&root->accounting_lock);
1848 btrfs_set_root_used(&root->root_item,
1849 btrfs_root_used(&root->root_item) - size);
1850 spin_unlock(&root->accounting_lock);
1853 /* given a node and slot number, this reads the blocks it points to. The
1854 * extent buffer is returned with a reference taken (but unlocked).
1855 * NULL is returned on error.
1857 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1858 struct extent_buffer *parent, int slot)
1860 int level = btrfs_header_level(parent);
1861 struct extent_buffer *eb;
1865 if (slot >= btrfs_header_nritems(parent))
1870 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1871 btrfs_node_ptr_generation(parent, slot));
1872 if (eb && !extent_buffer_uptodate(eb)) {
1873 free_extent_buffer(eb);
1881 * node level balancing, used to make sure nodes are in proper order for
1882 * item deletion. We balance from the top down, so we have to make sure
1883 * that a deletion won't leave an node completely empty later on.
1885 static noinline int balance_level(struct btrfs_trans_handle *trans,
1886 struct btrfs_root *root,
1887 struct btrfs_path *path, int level)
1889 struct extent_buffer *right = NULL;
1890 struct extent_buffer *mid;
1891 struct extent_buffer *left = NULL;
1892 struct extent_buffer *parent = NULL;
1896 int orig_slot = path->slots[level];
1902 mid = path->nodes[level];
1904 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1905 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1906 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1908 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1910 if (level < BTRFS_MAX_LEVEL - 1) {
1911 parent = path->nodes[level + 1];
1912 pslot = path->slots[level + 1];
1916 * deal with the case where there is only one pointer in the root
1917 * by promoting the node below to a root
1920 struct extent_buffer *child;
1922 if (btrfs_header_nritems(mid) != 1)
1925 /* promote the child to a root */
1926 child = read_node_slot(root, mid, 0);
1929 btrfs_std_error(root->fs_info, ret);
1933 btrfs_tree_lock(child);
1934 btrfs_set_lock_blocking(child);
1935 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1937 btrfs_tree_unlock(child);
1938 free_extent_buffer(child);
1942 tree_mod_log_set_root_pointer(root, child, 1);
1943 rcu_assign_pointer(root->node, child);
1945 add_root_to_dirty_list(root);
1946 btrfs_tree_unlock(child);
1948 path->locks[level] = 0;
1949 path->nodes[level] = NULL;
1950 clean_tree_block(trans, root, mid);
1951 btrfs_tree_unlock(mid);
1952 /* once for the path */
1953 free_extent_buffer(mid);
1955 root_sub_used(root, mid->len);
1956 btrfs_free_tree_block(trans, root, mid, 0, 1);
1957 /* once for the root ptr */
1958 free_extent_buffer_stale(mid);
1961 if (btrfs_header_nritems(mid) >
1962 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1965 left = read_node_slot(root, parent, pslot - 1);
1967 btrfs_tree_lock(left);
1968 btrfs_set_lock_blocking(left);
1969 wret = btrfs_cow_block(trans, root, left,
1970 parent, pslot - 1, &left);
1976 right = read_node_slot(root, parent, pslot + 1);
1978 btrfs_tree_lock(right);
1979 btrfs_set_lock_blocking(right);
1980 wret = btrfs_cow_block(trans, root, right,
1981 parent, pslot + 1, &right);
1988 /* first, try to make some room in the middle buffer */
1990 orig_slot += btrfs_header_nritems(left);
1991 wret = push_node_left(trans, root, left, mid, 1);
1997 * then try to empty the right most buffer into the middle
2000 wret = push_node_left(trans, root, mid, right, 1);
2001 if (wret < 0 && wret != -ENOSPC)
2003 if (btrfs_header_nritems(right) == 0) {
2004 clean_tree_block(trans, root, right);
2005 btrfs_tree_unlock(right);
2006 del_ptr(root, path, level + 1, pslot + 1);
2007 root_sub_used(root, right->len);
2008 btrfs_free_tree_block(trans, root, right, 0, 1);
2009 free_extent_buffer_stale(right);
2012 struct btrfs_disk_key right_key;
2013 btrfs_node_key(right, &right_key, 0);
2014 tree_mod_log_set_node_key(root->fs_info, parent,
2016 btrfs_set_node_key(parent, &right_key, pslot + 1);
2017 btrfs_mark_buffer_dirty(parent);
2020 if (btrfs_header_nritems(mid) == 1) {
2022 * we're not allowed to leave a node with one item in the
2023 * tree during a delete. A deletion from lower in the tree
2024 * could try to delete the only pointer in this node.
2025 * So, pull some keys from the left.
2026 * There has to be a left pointer at this point because
2027 * otherwise we would have pulled some pointers from the
2032 btrfs_std_error(root->fs_info, ret);
2035 wret = balance_node_right(trans, root, mid, left);
2041 wret = push_node_left(trans, root, left, mid, 1);
2047 if (btrfs_header_nritems(mid) == 0) {
2048 clean_tree_block(trans, root, mid);
2049 btrfs_tree_unlock(mid);
2050 del_ptr(root, path, level + 1, pslot);
2051 root_sub_used(root, mid->len);
2052 btrfs_free_tree_block(trans, root, mid, 0, 1);
2053 free_extent_buffer_stale(mid);
2056 /* update the parent key to reflect our changes */
2057 struct btrfs_disk_key mid_key;
2058 btrfs_node_key(mid, &mid_key, 0);
2059 tree_mod_log_set_node_key(root->fs_info, parent,
2061 btrfs_set_node_key(parent, &mid_key, pslot);
2062 btrfs_mark_buffer_dirty(parent);
2065 /* update the path */
2067 if (btrfs_header_nritems(left) > orig_slot) {
2068 extent_buffer_get(left);
2069 /* left was locked after cow */
2070 path->nodes[level] = left;
2071 path->slots[level + 1] -= 1;
2072 path->slots[level] = orig_slot;
2074 btrfs_tree_unlock(mid);
2075 free_extent_buffer(mid);
2078 orig_slot -= btrfs_header_nritems(left);
2079 path->slots[level] = orig_slot;
2082 /* double check we haven't messed things up */
2084 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2088 btrfs_tree_unlock(right);
2089 free_extent_buffer(right);
2092 if (path->nodes[level] != left)
2093 btrfs_tree_unlock(left);
2094 free_extent_buffer(left);
2099 /* Node balancing for insertion. Here we only split or push nodes around
2100 * when they are completely full. This is also done top down, so we
2101 * have to be pessimistic.
2103 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2104 struct btrfs_root *root,
2105 struct btrfs_path *path, int level)
2107 struct extent_buffer *right = NULL;
2108 struct extent_buffer *mid;
2109 struct extent_buffer *left = NULL;
2110 struct extent_buffer *parent = NULL;
2114 int orig_slot = path->slots[level];
2119 mid = path->nodes[level];
2120 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2122 if (level < BTRFS_MAX_LEVEL - 1) {
2123 parent = path->nodes[level + 1];
2124 pslot = path->slots[level + 1];
2130 left = read_node_slot(root, parent, pslot - 1);
2132 /* first, try to make some room in the middle buffer */
2136 btrfs_tree_lock(left);
2137 btrfs_set_lock_blocking(left);
2139 left_nr = btrfs_header_nritems(left);
2140 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2143 ret = btrfs_cow_block(trans, root, left, parent,
2148 wret = push_node_left(trans, root,
2155 struct btrfs_disk_key disk_key;
2156 orig_slot += left_nr;
2157 btrfs_node_key(mid, &disk_key, 0);
2158 tree_mod_log_set_node_key(root->fs_info, parent,
2160 btrfs_set_node_key(parent, &disk_key, pslot);
2161 btrfs_mark_buffer_dirty(parent);
2162 if (btrfs_header_nritems(left) > orig_slot) {
2163 path->nodes[level] = left;
2164 path->slots[level + 1] -= 1;
2165 path->slots[level] = orig_slot;
2166 btrfs_tree_unlock(mid);
2167 free_extent_buffer(mid);
2170 btrfs_header_nritems(left);
2171 path->slots[level] = orig_slot;
2172 btrfs_tree_unlock(left);
2173 free_extent_buffer(left);
2177 btrfs_tree_unlock(left);
2178 free_extent_buffer(left);
2180 right = read_node_slot(root, parent, pslot + 1);
2183 * then try to empty the right most buffer into the middle
2188 btrfs_tree_lock(right);
2189 btrfs_set_lock_blocking(right);
2191 right_nr = btrfs_header_nritems(right);
2192 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2195 ret = btrfs_cow_block(trans, root, right,
2201 wret = balance_node_right(trans, root,
2208 struct btrfs_disk_key disk_key;
2210 btrfs_node_key(right, &disk_key, 0);
2211 tree_mod_log_set_node_key(root->fs_info, parent,
2213 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2214 btrfs_mark_buffer_dirty(parent);
2216 if (btrfs_header_nritems(mid) <= orig_slot) {
2217 path->nodes[level] = right;
2218 path->slots[level + 1] += 1;
2219 path->slots[level] = orig_slot -
2220 btrfs_header_nritems(mid);
2221 btrfs_tree_unlock(mid);
2222 free_extent_buffer(mid);
2224 btrfs_tree_unlock(right);
2225 free_extent_buffer(right);
2229 btrfs_tree_unlock(right);
2230 free_extent_buffer(right);
2236 * readahead one full node of leaves, finding things that are close
2237 * to the block in 'slot', and triggering ra on them.
2239 static void reada_for_search(struct btrfs_root *root,
2240 struct btrfs_path *path,
2241 int level, int slot, u64 objectid)
2243 struct extent_buffer *node;
2244 struct btrfs_disk_key disk_key;
2250 int direction = path->reada;
2251 struct extent_buffer *eb;
2259 if (!path->nodes[level])
2262 node = path->nodes[level];
2264 search = btrfs_node_blockptr(node, slot);
2265 blocksize = root->nodesize;
2266 eb = btrfs_find_tree_block(root, search);
2268 free_extent_buffer(eb);
2274 nritems = btrfs_header_nritems(node);
2278 if (direction < 0) {
2282 } else if (direction > 0) {
2287 if (path->reada < 0 && objectid) {
2288 btrfs_node_key(node, &disk_key, nr);
2289 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2292 search = btrfs_node_blockptr(node, nr);
2293 if ((search <= target && target - search <= 65536) ||
2294 (search > target && search - target <= 65536)) {
2295 gen = btrfs_node_ptr_generation(node, nr);
2296 readahead_tree_block(root, search, blocksize);
2300 if ((nread > 65536 || nscan > 32))
2305 static noinline void reada_for_balance(struct btrfs_root *root,
2306 struct btrfs_path *path, int level)
2310 struct extent_buffer *parent;
2311 struct extent_buffer *eb;
2317 parent = path->nodes[level + 1];
2321 nritems = btrfs_header_nritems(parent);
2322 slot = path->slots[level + 1];
2323 blocksize = root->nodesize;
2326 block1 = btrfs_node_blockptr(parent, slot - 1);
2327 gen = btrfs_node_ptr_generation(parent, slot - 1);
2328 eb = btrfs_find_tree_block(root, block1);
2330 * if we get -eagain from btrfs_buffer_uptodate, we
2331 * don't want to return eagain here. That will loop
2334 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2336 free_extent_buffer(eb);
2338 if (slot + 1 < nritems) {
2339 block2 = btrfs_node_blockptr(parent, slot + 1);
2340 gen = btrfs_node_ptr_generation(parent, slot + 1);
2341 eb = btrfs_find_tree_block(root, block2);
2342 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2344 free_extent_buffer(eb);
2348 readahead_tree_block(root, block1, blocksize);
2350 readahead_tree_block(root, block2, blocksize);
2355 * when we walk down the tree, it is usually safe to unlock the higher layers
2356 * in the tree. The exceptions are when our path goes through slot 0, because
2357 * operations on the tree might require changing key pointers higher up in the
2360 * callers might also have set path->keep_locks, which tells this code to keep
2361 * the lock if the path points to the last slot in the block. This is part of
2362 * walking through the tree, and selecting the next slot in the higher block.
2364 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2365 * if lowest_unlock is 1, level 0 won't be unlocked
2367 static noinline void unlock_up(struct btrfs_path *path, int level,
2368 int lowest_unlock, int min_write_lock_level,
2369 int *write_lock_level)
2372 int skip_level = level;
2374 struct extent_buffer *t;
2376 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2377 if (!path->nodes[i])
2379 if (!path->locks[i])
2381 if (!no_skips && path->slots[i] == 0) {
2385 if (!no_skips && path->keep_locks) {
2388 nritems = btrfs_header_nritems(t);
2389 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2394 if (skip_level < i && i >= lowest_unlock)
2398 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2399 btrfs_tree_unlock_rw(t, path->locks[i]);
2401 if (write_lock_level &&
2402 i > min_write_lock_level &&
2403 i <= *write_lock_level) {
2404 *write_lock_level = i - 1;
2411 * This releases any locks held in the path starting at level and
2412 * going all the way up to the root.
2414 * btrfs_search_slot will keep the lock held on higher nodes in a few
2415 * corner cases, such as COW of the block at slot zero in the node. This
2416 * ignores those rules, and it should only be called when there are no
2417 * more updates to be done higher up in the tree.
2419 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2423 if (path->keep_locks)
2426 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2427 if (!path->nodes[i])
2429 if (!path->locks[i])
2431 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2437 * helper function for btrfs_search_slot. The goal is to find a block
2438 * in cache without setting the path to blocking. If we find the block
2439 * we return zero and the path is unchanged.
2441 * If we can't find the block, we set the path blocking and do some
2442 * reada. -EAGAIN is returned and the search must be repeated.
2445 read_block_for_search(struct btrfs_trans_handle *trans,
2446 struct btrfs_root *root, struct btrfs_path *p,
2447 struct extent_buffer **eb_ret, int level, int slot,
2448 struct btrfs_key *key, u64 time_seq)
2452 struct extent_buffer *b = *eb_ret;
2453 struct extent_buffer *tmp;
2456 blocknr = btrfs_node_blockptr(b, slot);
2457 gen = btrfs_node_ptr_generation(b, slot);
2459 tmp = btrfs_find_tree_block(root, blocknr);
2461 /* first we do an atomic uptodate check */
2462 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2467 /* the pages were up to date, but we failed
2468 * the generation number check. Do a full
2469 * read for the generation number that is correct.
2470 * We must do this without dropping locks so
2471 * we can trust our generation number
2473 btrfs_set_path_blocking(p);
2475 /* now we're allowed to do a blocking uptodate check */
2476 ret = btrfs_read_buffer(tmp, gen);
2481 free_extent_buffer(tmp);
2482 btrfs_release_path(p);
2487 * reduce lock contention at high levels
2488 * of the btree by dropping locks before
2489 * we read. Don't release the lock on the current
2490 * level because we need to walk this node to figure
2491 * out which blocks to read.
2493 btrfs_unlock_up_safe(p, level + 1);
2494 btrfs_set_path_blocking(p);
2496 free_extent_buffer(tmp);
2498 reada_for_search(root, p, level, slot, key->objectid);
2500 btrfs_release_path(p);
2503 tmp = read_tree_block(root, blocknr, 0);
2506 * If the read above didn't mark this buffer up to date,
2507 * it will never end up being up to date. Set ret to EIO now
2508 * and give up so that our caller doesn't loop forever
2511 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2513 free_extent_buffer(tmp);
2519 * helper function for btrfs_search_slot. This does all of the checks
2520 * for node-level blocks and does any balancing required based on
2523 * If no extra work was required, zero is returned. If we had to
2524 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2528 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2529 struct btrfs_root *root, struct btrfs_path *p,
2530 struct extent_buffer *b, int level, int ins_len,
2531 int *write_lock_level)
2534 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2535 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2538 if (*write_lock_level < level + 1) {
2539 *write_lock_level = level + 1;
2540 btrfs_release_path(p);
2544 btrfs_set_path_blocking(p);
2545 reada_for_balance(root, p, level);
2546 sret = split_node(trans, root, p, level);
2547 btrfs_clear_path_blocking(p, NULL, 0);
2554 b = p->nodes[level];
2555 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2556 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2559 if (*write_lock_level < level + 1) {
2560 *write_lock_level = level + 1;
2561 btrfs_release_path(p);
2565 btrfs_set_path_blocking(p);
2566 reada_for_balance(root, p, level);
2567 sret = balance_level(trans, root, p, level);
2568 btrfs_clear_path_blocking(p, NULL, 0);
2574 b = p->nodes[level];
2576 btrfs_release_path(p);
2579 BUG_ON(btrfs_header_nritems(b) == 1);
2589 static void key_search_validate(struct extent_buffer *b,
2590 struct btrfs_key *key,
2593 #ifdef CONFIG_BTRFS_ASSERT
2594 struct btrfs_disk_key disk_key;
2596 btrfs_cpu_key_to_disk(&disk_key, key);
2599 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2600 offsetof(struct btrfs_leaf, items[0].key),
2603 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2604 offsetof(struct btrfs_node, ptrs[0].key),
2609 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2610 int level, int *prev_cmp, int *slot)
2612 if (*prev_cmp != 0) {
2613 *prev_cmp = bin_search(b, key, level, slot);
2617 key_search_validate(b, key, level);
2623 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
2624 u64 iobjectid, u64 ioff, u8 key_type,
2625 struct btrfs_key *found_key)
2628 struct btrfs_key key;
2629 struct extent_buffer *eb;
2630 struct btrfs_path *path;
2632 key.type = key_type;
2633 key.objectid = iobjectid;
2636 if (found_path == NULL) {
2637 path = btrfs_alloc_path();
2643 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2644 if ((ret < 0) || (found_key == NULL)) {
2645 if (path != found_path)
2646 btrfs_free_path(path);
2650 eb = path->nodes[0];
2651 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2652 ret = btrfs_next_leaf(fs_root, path);
2655 eb = path->nodes[0];
2658 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2659 if (found_key->type != key.type ||
2660 found_key->objectid != key.objectid)
2667 * look for key in the tree. path is filled in with nodes along the way
2668 * if key is found, we return zero and you can find the item in the leaf
2669 * level of the path (level 0)
2671 * If the key isn't found, the path points to the slot where it should
2672 * be inserted, and 1 is returned. If there are other errors during the
2673 * search a negative error number is returned.
2675 * if ins_len > 0, nodes and leaves will be split as we walk down the
2676 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2679 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2680 *root, struct btrfs_key *key, struct btrfs_path *p, int
2683 struct extent_buffer *b;
2688 int lowest_unlock = 1;
2690 /* everything at write_lock_level or lower must be write locked */
2691 int write_lock_level = 0;
2692 u8 lowest_level = 0;
2693 int min_write_lock_level;
2696 lowest_level = p->lowest_level;
2697 WARN_ON(lowest_level && ins_len > 0);
2698 WARN_ON(p->nodes[0] != NULL);
2699 BUG_ON(!cow && ins_len);
2704 /* when we are removing items, we might have to go up to level
2705 * two as we update tree pointers Make sure we keep write
2706 * for those levels as well
2708 write_lock_level = 2;
2709 } else if (ins_len > 0) {
2711 * for inserting items, make sure we have a write lock on
2712 * level 1 so we can update keys
2714 write_lock_level = 1;
2718 write_lock_level = -1;
2720 if (cow && (p->keep_locks || p->lowest_level))
2721 write_lock_level = BTRFS_MAX_LEVEL;
2723 min_write_lock_level = write_lock_level;
2728 * we try very hard to do read locks on the root
2730 root_lock = BTRFS_READ_LOCK;
2732 if (p->search_commit_root) {
2734 * the commit roots are read only
2735 * so we always do read locks
2737 if (p->need_commit_sem)
2738 down_read(&root->fs_info->commit_root_sem);
2739 b = root->commit_root;
2740 extent_buffer_get(b);
2741 level = btrfs_header_level(b);
2742 if (p->need_commit_sem)
2743 up_read(&root->fs_info->commit_root_sem);
2744 if (!p->skip_locking)
2745 btrfs_tree_read_lock(b);
2747 if (p->skip_locking) {
2748 b = btrfs_root_node(root);
2749 level = btrfs_header_level(b);
2751 /* we don't know the level of the root node
2752 * until we actually have it read locked
2754 b = btrfs_read_lock_root_node(root);
2755 level = btrfs_header_level(b);
2756 if (level <= write_lock_level) {
2757 /* whoops, must trade for write lock */
2758 btrfs_tree_read_unlock(b);
2759 free_extent_buffer(b);
2760 b = btrfs_lock_root_node(root);
2761 root_lock = BTRFS_WRITE_LOCK;
2763 /* the level might have changed, check again */
2764 level = btrfs_header_level(b);
2768 p->nodes[level] = b;
2769 if (!p->skip_locking)
2770 p->locks[level] = root_lock;
2773 level = btrfs_header_level(b);
2776 * setup the path here so we can release it under lock
2777 * contention with the cow code
2781 * if we don't really need to cow this block
2782 * then we don't want to set the path blocking,
2783 * so we test it here
2785 if (!should_cow_block(trans, root, b))
2789 * must have write locks on this node and the
2792 if (level > write_lock_level ||
2793 (level + 1 > write_lock_level &&
2794 level + 1 < BTRFS_MAX_LEVEL &&
2795 p->nodes[level + 1])) {
2796 write_lock_level = level + 1;
2797 btrfs_release_path(p);
2801 btrfs_set_path_blocking(p);
2802 err = btrfs_cow_block(trans, root, b,
2803 p->nodes[level + 1],
2804 p->slots[level + 1], &b);
2811 p->nodes[level] = b;
2812 btrfs_clear_path_blocking(p, NULL, 0);
2815 * we have a lock on b and as long as we aren't changing
2816 * the tree, there is no way to for the items in b to change.
2817 * It is safe to drop the lock on our parent before we
2818 * go through the expensive btree search on b.
2820 * If we're inserting or deleting (ins_len != 0), then we might
2821 * be changing slot zero, which may require changing the parent.
2822 * So, we can't drop the lock until after we know which slot
2823 * we're operating on.
2825 if (!ins_len && !p->keep_locks) {
2828 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2829 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2834 ret = key_search(b, key, level, &prev_cmp, &slot);
2838 if (ret && slot > 0) {
2842 p->slots[level] = slot;
2843 err = setup_nodes_for_search(trans, root, p, b, level,
2844 ins_len, &write_lock_level);
2851 b = p->nodes[level];
2852 slot = p->slots[level];
2855 * slot 0 is special, if we change the key
2856 * we have to update the parent pointer
2857 * which means we must have a write lock
2860 if (slot == 0 && ins_len &&
2861 write_lock_level < level + 1) {
2862 write_lock_level = level + 1;
2863 btrfs_release_path(p);
2867 unlock_up(p, level, lowest_unlock,
2868 min_write_lock_level, &write_lock_level);
2870 if (level == lowest_level) {
2876 err = read_block_for_search(trans, root, p,
2877 &b, level, slot, key, 0);
2885 if (!p->skip_locking) {
2886 level = btrfs_header_level(b);
2887 if (level <= write_lock_level) {
2888 err = btrfs_try_tree_write_lock(b);
2890 btrfs_set_path_blocking(p);
2892 btrfs_clear_path_blocking(p, b,
2895 p->locks[level] = BTRFS_WRITE_LOCK;
2897 err = btrfs_try_tree_read_lock(b);
2899 btrfs_set_path_blocking(p);
2900 btrfs_tree_read_lock(b);
2901 btrfs_clear_path_blocking(p, b,
2904 p->locks[level] = BTRFS_READ_LOCK;
2906 p->nodes[level] = b;
2909 p->slots[level] = slot;
2911 btrfs_leaf_free_space(root, b) < ins_len) {
2912 if (write_lock_level < 1) {
2913 write_lock_level = 1;
2914 btrfs_release_path(p);
2918 btrfs_set_path_blocking(p);
2919 err = split_leaf(trans, root, key,
2920 p, ins_len, ret == 0);
2921 btrfs_clear_path_blocking(p, NULL, 0);
2929 if (!p->search_for_split)
2930 unlock_up(p, level, lowest_unlock,
2931 min_write_lock_level, &write_lock_level);
2938 * we don't really know what they plan on doing with the path
2939 * from here on, so for now just mark it as blocking
2941 if (!p->leave_spinning)
2942 btrfs_set_path_blocking(p);
2944 btrfs_release_path(p);
2949 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2950 * current state of the tree together with the operations recorded in the tree
2951 * modification log to search for the key in a previous version of this tree, as
2952 * denoted by the time_seq parameter.
2954 * Naturally, there is no support for insert, delete or cow operations.
2956 * The resulting path and return value will be set up as if we called
2957 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2959 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2960 struct btrfs_path *p, u64 time_seq)
2962 struct extent_buffer *b;
2967 int lowest_unlock = 1;
2968 u8 lowest_level = 0;
2971 lowest_level = p->lowest_level;
2972 WARN_ON(p->nodes[0] != NULL);
2974 if (p->search_commit_root) {
2976 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2980 b = get_old_root(root, time_seq);
2981 level = btrfs_header_level(b);
2982 p->locks[level] = BTRFS_READ_LOCK;
2985 level = btrfs_header_level(b);
2986 p->nodes[level] = b;
2987 btrfs_clear_path_blocking(p, NULL, 0);
2990 * we have a lock on b and as long as we aren't changing
2991 * the tree, there is no way to for the items in b to change.
2992 * It is safe to drop the lock on our parent before we
2993 * go through the expensive btree search on b.
2995 btrfs_unlock_up_safe(p, level + 1);
2998 * Since we can unwind eb's we want to do a real search every
3002 ret = key_search(b, key, level, &prev_cmp, &slot);
3006 if (ret && slot > 0) {
3010 p->slots[level] = slot;
3011 unlock_up(p, level, lowest_unlock, 0, NULL);
3013 if (level == lowest_level) {
3019 err = read_block_for_search(NULL, root, p, &b, level,
3020 slot, key, time_seq);
3028 level = btrfs_header_level(b);
3029 err = btrfs_try_tree_read_lock(b);
3031 btrfs_set_path_blocking(p);
3032 btrfs_tree_read_lock(b);
3033 btrfs_clear_path_blocking(p, b,
3036 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3041 p->locks[level] = BTRFS_READ_LOCK;
3042 p->nodes[level] = b;
3044 p->slots[level] = slot;
3045 unlock_up(p, level, lowest_unlock, 0, NULL);
3051 if (!p->leave_spinning)
3052 btrfs_set_path_blocking(p);
3054 btrfs_release_path(p);
3060 * helper to use instead of search slot if no exact match is needed but
3061 * instead the next or previous item should be returned.
3062 * When find_higher is true, the next higher item is returned, the next lower
3064 * When return_any and find_higher are both true, and no higher item is found,
3065 * return the next lower instead.
3066 * When return_any is true and find_higher is false, and no lower item is found,
3067 * return the next higher instead.
3068 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3071 int btrfs_search_slot_for_read(struct btrfs_root *root,
3072 struct btrfs_key *key, struct btrfs_path *p,
3073 int find_higher, int return_any)
3076 struct extent_buffer *leaf;
3079 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3083 * a return value of 1 means the path is at the position where the
3084 * item should be inserted. Normally this is the next bigger item,
3085 * but in case the previous item is the last in a leaf, path points
3086 * to the first free slot in the previous leaf, i.e. at an invalid
3092 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3093 ret = btrfs_next_leaf(root, p);
3099 * no higher item found, return the next
3104 btrfs_release_path(p);
3108 if (p->slots[0] == 0) {
3109 ret = btrfs_prev_leaf(root, p);
3114 if (p->slots[0] == btrfs_header_nritems(leaf))
3121 * no lower item found, return the next
3126 btrfs_release_path(p);
3136 * adjust the pointers going up the tree, starting at level
3137 * making sure the right key of each node is points to 'key'.
3138 * This is used after shifting pointers to the left, so it stops
3139 * fixing up pointers when a given leaf/node is not in slot 0 of the
3143 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
3144 struct btrfs_disk_key *key, int level)
3147 struct extent_buffer *t;
3149 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3150 int tslot = path->slots[i];
3151 if (!path->nodes[i])
3154 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
3155 btrfs_set_node_key(t, key, tslot);
3156 btrfs_mark_buffer_dirty(path->nodes[i]);
3165 * This function isn't completely safe. It's the caller's responsibility
3166 * that the new key won't break the order
3168 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
3169 struct btrfs_key *new_key)
3171 struct btrfs_disk_key disk_key;
3172 struct extent_buffer *eb;
3175 eb = path->nodes[0];
3176 slot = path->slots[0];
3178 btrfs_item_key(eb, &disk_key, slot - 1);
3179 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3181 if (slot < btrfs_header_nritems(eb) - 1) {
3182 btrfs_item_key(eb, &disk_key, slot + 1);
3183 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3186 btrfs_cpu_key_to_disk(&disk_key, new_key);
3187 btrfs_set_item_key(eb, &disk_key, slot);
3188 btrfs_mark_buffer_dirty(eb);
3190 fixup_low_keys(root, path, &disk_key, 1);
3194 * try to push data from one node into the next node left in the
3197 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3198 * error, and > 0 if there was no room in the left hand block.
3200 static int push_node_left(struct btrfs_trans_handle *trans,
3201 struct btrfs_root *root, struct extent_buffer *dst,
3202 struct extent_buffer *src, int empty)
3209 src_nritems = btrfs_header_nritems(src);
3210 dst_nritems = btrfs_header_nritems(dst);
3211 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3212 WARN_ON(btrfs_header_generation(src) != trans->transid);
3213 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3215 if (!empty && src_nritems <= 8)
3218 if (push_items <= 0)
3222 push_items = min(src_nritems, push_items);
3223 if (push_items < src_nritems) {
3224 /* leave at least 8 pointers in the node if
3225 * we aren't going to empty it
3227 if (src_nritems - push_items < 8) {
3228 if (push_items <= 8)
3234 push_items = min(src_nritems - 8, push_items);
3236 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3239 btrfs_abort_transaction(trans, root, ret);
3242 copy_extent_buffer(dst, src,
3243 btrfs_node_key_ptr_offset(dst_nritems),
3244 btrfs_node_key_ptr_offset(0),
3245 push_items * sizeof(struct btrfs_key_ptr));
3247 if (push_items < src_nritems) {
3249 * don't call tree_mod_log_eb_move here, key removal was already
3250 * fully logged by tree_mod_log_eb_copy above.
3252 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3253 btrfs_node_key_ptr_offset(push_items),
3254 (src_nritems - push_items) *
3255 sizeof(struct btrfs_key_ptr));
3257 btrfs_set_header_nritems(src, src_nritems - push_items);
3258 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3259 btrfs_mark_buffer_dirty(src);
3260 btrfs_mark_buffer_dirty(dst);
3266 * try to push data from one node into the next node right in the
3269 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3270 * error, and > 0 if there was no room in the right hand block.
3272 * this will only push up to 1/2 the contents of the left node over
3274 static int balance_node_right(struct btrfs_trans_handle *trans,
3275 struct btrfs_root *root,
3276 struct extent_buffer *dst,
3277 struct extent_buffer *src)
3285 WARN_ON(btrfs_header_generation(src) != trans->transid);
3286 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3288 src_nritems = btrfs_header_nritems(src);
3289 dst_nritems = btrfs_header_nritems(dst);
3290 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3291 if (push_items <= 0)
3294 if (src_nritems < 4)
3297 max_push = src_nritems / 2 + 1;
3298 /* don't try to empty the node */
3299 if (max_push >= src_nritems)
3302 if (max_push < push_items)
3303 push_items = max_push;
3305 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3306 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3307 btrfs_node_key_ptr_offset(0),
3309 sizeof(struct btrfs_key_ptr));
3311 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3312 src_nritems - push_items, push_items);
3314 btrfs_abort_transaction(trans, root, ret);
3317 copy_extent_buffer(dst, src,
3318 btrfs_node_key_ptr_offset(0),
3319 btrfs_node_key_ptr_offset(src_nritems - push_items),
3320 push_items * sizeof(struct btrfs_key_ptr));
3322 btrfs_set_header_nritems(src, src_nritems - push_items);
3323 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3325 btrfs_mark_buffer_dirty(src);
3326 btrfs_mark_buffer_dirty(dst);
3332 * helper function to insert a new root level in the tree.
3333 * A new node is allocated, and a single item is inserted to
3334 * point to the existing root
3336 * returns zero on success or < 0 on failure.
3338 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3339 struct btrfs_root *root,
3340 struct btrfs_path *path, int level)
3343 struct extent_buffer *lower;
3344 struct extent_buffer *c;
3345 struct extent_buffer *old;
3346 struct btrfs_disk_key lower_key;
3348 BUG_ON(path->nodes[level]);
3349 BUG_ON(path->nodes[level-1] != root->node);
3351 lower = path->nodes[level-1];
3353 btrfs_item_key(lower, &lower_key, 0);
3355 btrfs_node_key(lower, &lower_key, 0);
3357 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3358 &lower_key, level, root->node->start, 0);
3362 root_add_used(root, root->nodesize);
3364 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3365 btrfs_set_header_nritems(c, 1);
3366 btrfs_set_header_level(c, level);
3367 btrfs_set_header_bytenr(c, c->start);
3368 btrfs_set_header_generation(c, trans->transid);
3369 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3370 btrfs_set_header_owner(c, root->root_key.objectid);
3372 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3375 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3376 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3378 btrfs_set_node_key(c, &lower_key, 0);
3379 btrfs_set_node_blockptr(c, 0, lower->start);
3380 lower_gen = btrfs_header_generation(lower);
3381 WARN_ON(lower_gen != trans->transid);
3383 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3385 btrfs_mark_buffer_dirty(c);
3388 tree_mod_log_set_root_pointer(root, c, 0);
3389 rcu_assign_pointer(root->node, c);
3391 /* the super has an extra ref to root->node */
3392 free_extent_buffer(old);
3394 add_root_to_dirty_list(root);
3395 extent_buffer_get(c);
3396 path->nodes[level] = c;
3397 path->locks[level] = BTRFS_WRITE_LOCK;
3398 path->slots[level] = 0;
3403 * worker function to insert a single pointer in a node.
3404 * the node should have enough room for the pointer already
3406 * slot and level indicate where you want the key to go, and
3407 * blocknr is the block the key points to.
3409 static void insert_ptr(struct btrfs_trans_handle *trans,
3410 struct btrfs_root *root, struct btrfs_path *path,
3411 struct btrfs_disk_key *key, u64 bytenr,
3412 int slot, int level)
3414 struct extent_buffer *lower;
3418 BUG_ON(!path->nodes[level]);
3419 btrfs_assert_tree_locked(path->nodes[level]);
3420 lower = path->nodes[level];
3421 nritems = btrfs_header_nritems(lower);
3422 BUG_ON(slot > nritems);
3423 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3424 if (slot != nritems) {
3426 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3427 slot, nritems - slot);
3428 memmove_extent_buffer(lower,
3429 btrfs_node_key_ptr_offset(slot + 1),
3430 btrfs_node_key_ptr_offset(slot),
3431 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3434 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3435 MOD_LOG_KEY_ADD, GFP_NOFS);
3438 btrfs_set_node_key(lower, key, slot);
3439 btrfs_set_node_blockptr(lower, slot, bytenr);
3440 WARN_ON(trans->transid == 0);
3441 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3442 btrfs_set_header_nritems(lower, nritems + 1);
3443 btrfs_mark_buffer_dirty(lower);
3447 * split the node at the specified level in path in two.
3448 * The path is corrected to point to the appropriate node after the split
3450 * Before splitting this tries to make some room in the node by pushing
3451 * left and right, if either one works, it returns right away.
3453 * returns 0 on success and < 0 on failure
3455 static noinline int split_node(struct btrfs_trans_handle *trans,
3456 struct btrfs_root *root,
3457 struct btrfs_path *path, int level)
3459 struct extent_buffer *c;
3460 struct extent_buffer *split;
3461 struct btrfs_disk_key disk_key;
3466 c = path->nodes[level];
3467 WARN_ON(btrfs_header_generation(c) != trans->transid);
3468 if (c == root->node) {
3470 * trying to split the root, lets make a new one
3472 * tree mod log: We don't log_removal old root in
3473 * insert_new_root, because that root buffer will be kept as a
3474 * normal node. We are going to log removal of half of the
3475 * elements below with tree_mod_log_eb_copy. We're holding a
3476 * tree lock on the buffer, which is why we cannot race with
3477 * other tree_mod_log users.
3479 ret = insert_new_root(trans, root, path, level + 1);
3483 ret = push_nodes_for_insert(trans, root, path, level);
3484 c = path->nodes[level];
3485 if (!ret && btrfs_header_nritems(c) <
3486 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3492 c_nritems = btrfs_header_nritems(c);
3493 mid = (c_nritems + 1) / 2;
3494 btrfs_node_key(c, &disk_key, mid);
3496 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3497 &disk_key, level, c->start, 0);
3499 return PTR_ERR(split);
3501 root_add_used(root, root->nodesize);
3503 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3504 btrfs_set_header_level(split, btrfs_header_level(c));
3505 btrfs_set_header_bytenr(split, split->start);
3506 btrfs_set_header_generation(split, trans->transid);
3507 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3508 btrfs_set_header_owner(split, root->root_key.objectid);
3509 write_extent_buffer(split, root->fs_info->fsid,
3510 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3511 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3512 btrfs_header_chunk_tree_uuid(split),
3515 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3516 mid, c_nritems - mid);
3518 btrfs_abort_transaction(trans, root, ret);
3521 copy_extent_buffer(split, c,
3522 btrfs_node_key_ptr_offset(0),
3523 btrfs_node_key_ptr_offset(mid),
3524 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3525 btrfs_set_header_nritems(split, c_nritems - mid);
3526 btrfs_set_header_nritems(c, mid);
3529 btrfs_mark_buffer_dirty(c);
3530 btrfs_mark_buffer_dirty(split);
3532 insert_ptr(trans, root, path, &disk_key, split->start,
3533 path->slots[level + 1] + 1, level + 1);
3535 if (path->slots[level] >= mid) {
3536 path->slots[level] -= mid;
3537 btrfs_tree_unlock(c);
3538 free_extent_buffer(c);
3539 path->nodes[level] = split;
3540 path->slots[level + 1] += 1;
3542 btrfs_tree_unlock(split);
3543 free_extent_buffer(split);
3549 * how many bytes are required to store the items in a leaf. start
3550 * and nr indicate which items in the leaf to check. This totals up the
3551 * space used both by the item structs and the item data
3553 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3555 struct btrfs_item *start_item;
3556 struct btrfs_item *end_item;
3557 struct btrfs_map_token token;
3559 int nritems = btrfs_header_nritems(l);
3560 int end = min(nritems, start + nr) - 1;
3564 btrfs_init_map_token(&token);
3565 start_item = btrfs_item_nr(start);
3566 end_item = btrfs_item_nr(end);
3567 data_len = btrfs_token_item_offset(l, start_item, &token) +
3568 btrfs_token_item_size(l, start_item, &token);
3569 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3570 data_len += sizeof(struct btrfs_item) * nr;
3571 WARN_ON(data_len < 0);
3576 * The space between the end of the leaf items and
3577 * the start of the leaf data. IOW, how much room
3578 * the leaf has left for both items and data
3580 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3581 struct extent_buffer *leaf)
3583 int nritems = btrfs_header_nritems(leaf);
3585 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3587 btrfs_crit(root->fs_info,
3588 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3589 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3590 leaf_space_used(leaf, 0, nritems), nritems);
3596 * min slot controls the lowest index we're willing to push to the
3597 * right. We'll push up to and including min_slot, but no lower
3599 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3600 struct btrfs_root *root,
3601 struct btrfs_path *path,
3602 int data_size, int empty,
3603 struct extent_buffer *right,
3604 int free_space, u32 left_nritems,
3607 struct extent_buffer *left = path->nodes[0];
3608 struct extent_buffer *upper = path->nodes[1];
3609 struct btrfs_map_token token;
3610 struct btrfs_disk_key disk_key;
3615 struct btrfs_item *item;
3621 btrfs_init_map_token(&token);
3626 nr = max_t(u32, 1, min_slot);
3628 if (path->slots[0] >= left_nritems)
3629 push_space += data_size;
3631 slot = path->slots[1];
3632 i = left_nritems - 1;
3634 item = btrfs_item_nr(i);
3636 if (!empty && push_items > 0) {
3637 if (path->slots[0] > i)
3639 if (path->slots[0] == i) {
3640 int space = btrfs_leaf_free_space(root, left);
3641 if (space + push_space * 2 > free_space)
3646 if (path->slots[0] == i)
3647 push_space += data_size;
3649 this_item_size = btrfs_item_size(left, item);
3650 if (this_item_size + sizeof(*item) + push_space > free_space)
3654 push_space += this_item_size + sizeof(*item);
3660 if (push_items == 0)
3663 WARN_ON(!empty && push_items == left_nritems);
3665 /* push left to right */
3666 right_nritems = btrfs_header_nritems(right);
3668 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3669 push_space -= leaf_data_end(root, left);
3671 /* make room in the right data area */
3672 data_end = leaf_data_end(root, right);
3673 memmove_extent_buffer(right,
3674 btrfs_leaf_data(right) + data_end - push_space,
3675 btrfs_leaf_data(right) + data_end,
3676 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3678 /* copy from the left data area */
3679 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3680 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3681 btrfs_leaf_data(left) + leaf_data_end(root, left),
3684 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3685 btrfs_item_nr_offset(0),
3686 right_nritems * sizeof(struct btrfs_item));
3688 /* copy the items from left to right */
3689 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3690 btrfs_item_nr_offset(left_nritems - push_items),
3691 push_items * sizeof(struct btrfs_item));
3693 /* update the item pointers */
3694 right_nritems += push_items;
3695 btrfs_set_header_nritems(right, right_nritems);
3696 push_space = BTRFS_LEAF_DATA_SIZE(root);
3697 for (i = 0; i < right_nritems; i++) {
3698 item = btrfs_item_nr(i);
3699 push_space -= btrfs_token_item_size(right, item, &token);
3700 btrfs_set_token_item_offset(right, item, push_space, &token);
3703 left_nritems -= push_items;
3704 btrfs_set_header_nritems(left, left_nritems);
3707 btrfs_mark_buffer_dirty(left);
3709 clean_tree_block(trans, root, left);
3711 btrfs_mark_buffer_dirty(right);
3713 btrfs_item_key(right, &disk_key, 0);
3714 btrfs_set_node_key(upper, &disk_key, slot + 1);
3715 btrfs_mark_buffer_dirty(upper);
3717 /* then fixup the leaf pointer in the path */
3718 if (path->slots[0] >= left_nritems) {
3719 path->slots[0] -= left_nritems;
3720 if (btrfs_header_nritems(path->nodes[0]) == 0)
3721 clean_tree_block(trans, root, path->nodes[0]);
3722 btrfs_tree_unlock(path->nodes[0]);
3723 free_extent_buffer(path->nodes[0]);
3724 path->nodes[0] = right;
3725 path->slots[1] += 1;
3727 btrfs_tree_unlock(right);
3728 free_extent_buffer(right);
3733 btrfs_tree_unlock(right);
3734 free_extent_buffer(right);
3739 * push some data in the path leaf to the right, trying to free up at
3740 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3742 * returns 1 if the push failed because the other node didn't have enough
3743 * room, 0 if everything worked out and < 0 if there were major errors.
3745 * this will push starting from min_slot to the end of the leaf. It won't
3746 * push any slot lower than min_slot
3748 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3749 *root, struct btrfs_path *path,
3750 int min_data_size, int data_size,
3751 int empty, u32 min_slot)
3753 struct extent_buffer *left = path->nodes[0];
3754 struct extent_buffer *right;
3755 struct extent_buffer *upper;
3761 if (!path->nodes[1])
3764 slot = path->slots[1];
3765 upper = path->nodes[1];
3766 if (slot >= btrfs_header_nritems(upper) - 1)
3769 btrfs_assert_tree_locked(path->nodes[1]);
3771 right = read_node_slot(root, upper, slot + 1);
3775 btrfs_tree_lock(right);
3776 btrfs_set_lock_blocking(right);
3778 free_space = btrfs_leaf_free_space(root, right);
3779 if (free_space < data_size)
3782 /* cow and double check */
3783 ret = btrfs_cow_block(trans, root, right, upper,
3788 free_space = btrfs_leaf_free_space(root, right);
3789 if (free_space < data_size)
3792 left_nritems = btrfs_header_nritems(left);
3793 if (left_nritems == 0)
3796 if (path->slots[0] == left_nritems && !empty) {
3797 /* Key greater than all keys in the leaf, right neighbor has
3798 * enough room for it and we're not emptying our leaf to delete
3799 * it, therefore use right neighbor to insert the new item and
3800 * no need to touch/dirty our left leaft. */
3801 btrfs_tree_unlock(left);
3802 free_extent_buffer(left);
3803 path->nodes[0] = right;
3809 return __push_leaf_right(trans, root, path, min_data_size, empty,
3810 right, free_space, left_nritems, min_slot);
3812 btrfs_tree_unlock(right);
3813 free_extent_buffer(right);
3818 * push some data in the path leaf to the left, trying to free up at
3819 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3821 * max_slot can put a limit on how far into the leaf we'll push items. The
3822 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3825 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3826 struct btrfs_root *root,
3827 struct btrfs_path *path, int data_size,
3828 int empty, struct extent_buffer *left,
3829 int free_space, u32 right_nritems,
3832 struct btrfs_disk_key disk_key;
3833 struct extent_buffer *right = path->nodes[0];
3837 struct btrfs_item *item;
3838 u32 old_left_nritems;
3842 u32 old_left_item_size;
3843 struct btrfs_map_token token;
3845 btrfs_init_map_token(&token);
3848 nr = min(right_nritems, max_slot);
3850 nr = min(right_nritems - 1, max_slot);
3852 for (i = 0; i < nr; i++) {
3853 item = btrfs_item_nr(i);
3855 if (!empty && push_items > 0) {
3856 if (path->slots[0] < i)
3858 if (path->slots[0] == i) {
3859 int space = btrfs_leaf_free_space(root, right);
3860 if (space + push_space * 2 > free_space)
3865 if (path->slots[0] == i)
3866 push_space += data_size;
3868 this_item_size = btrfs_item_size(right, item);
3869 if (this_item_size + sizeof(*item) + push_space > free_space)
3873 push_space += this_item_size + sizeof(*item);
3876 if (push_items == 0) {
3880 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3882 /* push data from right to left */
3883 copy_extent_buffer(left, right,
3884 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3885 btrfs_item_nr_offset(0),
3886 push_items * sizeof(struct btrfs_item));
3888 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3889 btrfs_item_offset_nr(right, push_items - 1);
3891 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3892 leaf_data_end(root, left) - push_space,
3893 btrfs_leaf_data(right) +
3894 btrfs_item_offset_nr(right, push_items - 1),
3896 old_left_nritems = btrfs_header_nritems(left);
3897 BUG_ON(old_left_nritems <= 0);
3899 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3900 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3903 item = btrfs_item_nr(i);
3905 ioff = btrfs_token_item_offset(left, item, &token);
3906 btrfs_set_token_item_offset(left, item,
3907 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3910 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3912 /* fixup right node */
3913 if (push_items > right_nritems)
3914 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3917 if (push_items < right_nritems) {
3918 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3919 leaf_data_end(root, right);
3920 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3921 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3922 btrfs_leaf_data(right) +
3923 leaf_data_end(root, right), push_space);
3925 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3926 btrfs_item_nr_offset(push_items),
3927 (btrfs_header_nritems(right) - push_items) *
3928 sizeof(struct btrfs_item));
3930 right_nritems -= push_items;
3931 btrfs_set_header_nritems(right, right_nritems);
3932 push_space = BTRFS_LEAF_DATA_SIZE(root);
3933 for (i = 0; i < right_nritems; i++) {
3934 item = btrfs_item_nr(i);
3936 push_space = push_space - btrfs_token_item_size(right,
3938 btrfs_set_token_item_offset(right, item, push_space, &token);
3941 btrfs_mark_buffer_dirty(left);
3943 btrfs_mark_buffer_dirty(right);
3945 clean_tree_block(trans, root, right);
3947 btrfs_item_key(right, &disk_key, 0);
3948 fixup_low_keys(root, path, &disk_key, 1);
3950 /* then fixup the leaf pointer in the path */
3951 if (path->slots[0] < push_items) {
3952 path->slots[0] += old_left_nritems;
3953 btrfs_tree_unlock(path->nodes[0]);
3954 free_extent_buffer(path->nodes[0]);
3955 path->nodes[0] = left;
3956 path->slots[1] -= 1;
3958 btrfs_tree_unlock(left);
3959 free_extent_buffer(left);
3960 path->slots[0] -= push_items;
3962 BUG_ON(path->slots[0] < 0);
3965 btrfs_tree_unlock(left);
3966 free_extent_buffer(left);
3971 * push some data in the path leaf to the left, trying to free up at
3972 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3974 * max_slot can put a limit on how far into the leaf we'll push items. The
3975 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3978 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3979 *root, struct btrfs_path *path, int min_data_size,
3980 int data_size, int empty, u32 max_slot)
3982 struct extent_buffer *right = path->nodes[0];
3983 struct extent_buffer *left;
3989 slot = path->slots[1];
3992 if (!path->nodes[1])
3995 right_nritems = btrfs_header_nritems(right);
3996 if (right_nritems == 0)
3999 btrfs_assert_tree_locked(path->nodes[1]);
4001 left = read_node_slot(root, path->nodes[1], slot - 1);
4005 btrfs_tree_lock(left);
4006 btrfs_set_lock_blocking(left);
4008 free_space = btrfs_leaf_free_space(root, left);
4009 if (free_space < data_size) {
4014 /* cow and double check */
4015 ret = btrfs_cow_block(trans, root, left,
4016 path->nodes[1], slot - 1, &left);
4018 /* we hit -ENOSPC, but it isn't fatal here */
4024 free_space = btrfs_leaf_free_space(root, left);
4025 if (free_space < data_size) {
4030 return __push_leaf_left(trans, root, path, min_data_size,
4031 empty, left, free_space, right_nritems,
4034 btrfs_tree_unlock(left);
4035 free_extent_buffer(left);
4040 * split the path's leaf in two, making sure there is at least data_size
4041 * available for the resulting leaf level of the path.
4043 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4044 struct btrfs_root *root,
4045 struct btrfs_path *path,
4046 struct extent_buffer *l,
4047 struct extent_buffer *right,
4048 int slot, int mid, int nritems)
4053 struct btrfs_disk_key disk_key;
4054 struct btrfs_map_token token;
4056 btrfs_init_map_token(&token);
4058 nritems = nritems - mid;
4059 btrfs_set_header_nritems(right, nritems);
4060 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4062 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4063 btrfs_item_nr_offset(mid),
4064 nritems * sizeof(struct btrfs_item));
4066 copy_extent_buffer(right, l,
4067 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4068 data_copy_size, btrfs_leaf_data(l) +
4069 leaf_data_end(root, l), data_copy_size);
4071 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4072 btrfs_item_end_nr(l, mid);
4074 for (i = 0; i < nritems; i++) {
4075 struct btrfs_item *item = btrfs_item_nr(i);
4078 ioff = btrfs_token_item_offset(right, item, &token);
4079 btrfs_set_token_item_offset(right, item,
4080 ioff + rt_data_off, &token);
4083 btrfs_set_header_nritems(l, mid);
4084 btrfs_item_key(right, &disk_key, 0);
4085 insert_ptr(trans, root, path, &disk_key, right->start,
4086 path->slots[1] + 1, 1);
4088 btrfs_mark_buffer_dirty(right);
4089 btrfs_mark_buffer_dirty(l);
4090 BUG_ON(path->slots[0] != slot);
4093 btrfs_tree_unlock(path->nodes[0]);
4094 free_extent_buffer(path->nodes[0]);
4095 path->nodes[0] = right;
4096 path->slots[0] -= mid;
4097 path->slots[1] += 1;
4099 btrfs_tree_unlock(right);
4100 free_extent_buffer(right);
4103 BUG_ON(path->slots[0] < 0);
4107 * double splits happen when we need to insert a big item in the middle
4108 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4109 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4112 * We avoid this by trying to push the items on either side of our target
4113 * into the adjacent leaves. If all goes well we can avoid the double split
4116 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4117 struct btrfs_root *root,
4118 struct btrfs_path *path,
4125 int space_needed = data_size;
4127 slot = path->slots[0];
4128 if (slot < btrfs_header_nritems(path->nodes[0]))
4129 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4132 * try to push all the items after our slot into the
4135 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4142 nritems = btrfs_header_nritems(path->nodes[0]);
4144 * our goal is to get our slot at the start or end of a leaf. If
4145 * we've done so we're done
4147 if (path->slots[0] == 0 || path->slots[0] == nritems)
4150 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4153 /* try to push all the items before our slot into the next leaf */
4154 slot = path->slots[0];
4155 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4168 * split the path's leaf in two, making sure there is at least data_size
4169 * available for the resulting leaf level of the path.
4171 * returns 0 if all went well and < 0 on failure.
4173 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4174 struct btrfs_root *root,
4175 struct btrfs_key *ins_key,
4176 struct btrfs_path *path, int data_size,
4179 struct btrfs_disk_key disk_key;
4180 struct extent_buffer *l;
4184 struct extent_buffer *right;
4188 int num_doubles = 0;
4189 int tried_avoid_double = 0;
4192 slot = path->slots[0];
4193 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4194 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4197 /* first try to make some room by pushing left and right */
4198 if (data_size && path->nodes[1]) {
4199 int space_needed = data_size;
4201 if (slot < btrfs_header_nritems(l))
4202 space_needed -= btrfs_leaf_free_space(root, l);
4204 wret = push_leaf_right(trans, root, path, space_needed,
4205 space_needed, 0, 0);
4209 wret = push_leaf_left(trans, root, path, space_needed,
4210 space_needed, 0, (u32)-1);
4216 /* did the pushes work? */
4217 if (btrfs_leaf_free_space(root, l) >= data_size)
4221 if (!path->nodes[1]) {
4222 ret = insert_new_root(trans, root, path, 1);
4229 slot = path->slots[0];
4230 nritems = btrfs_header_nritems(l);
4231 mid = (nritems + 1) / 2;
4235 leaf_space_used(l, mid, nritems - mid) + data_size >
4236 BTRFS_LEAF_DATA_SIZE(root)) {
4237 if (slot >= nritems) {
4241 if (mid != nritems &&
4242 leaf_space_used(l, mid, nritems - mid) +
4243 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4244 if (data_size && !tried_avoid_double)
4245 goto push_for_double;
4251 if (leaf_space_used(l, 0, mid) + data_size >
4252 BTRFS_LEAF_DATA_SIZE(root)) {
4253 if (!extend && data_size && slot == 0) {
4255 } else if ((extend || !data_size) && slot == 0) {
4259 if (mid != nritems &&
4260 leaf_space_used(l, mid, nritems - mid) +
4261 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4262 if (data_size && !tried_avoid_double)
4263 goto push_for_double;
4271 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4273 btrfs_item_key(l, &disk_key, mid);
4275 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4276 &disk_key, 0, l->start, 0);
4278 return PTR_ERR(right);
4280 root_add_used(root, root->nodesize);
4282 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4283 btrfs_set_header_bytenr(right, right->start);
4284 btrfs_set_header_generation(right, trans->transid);
4285 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4286 btrfs_set_header_owner(right, root->root_key.objectid);
4287 btrfs_set_header_level(right, 0);
4288 write_extent_buffer(right, root->fs_info->fsid,
4289 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4291 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4292 btrfs_header_chunk_tree_uuid(right),
4297 btrfs_set_header_nritems(right, 0);
4298 insert_ptr(trans, root, path, &disk_key, right->start,
4299 path->slots[1] + 1, 1);
4300 btrfs_tree_unlock(path->nodes[0]);
4301 free_extent_buffer(path->nodes[0]);
4302 path->nodes[0] = right;
4304 path->slots[1] += 1;
4306 btrfs_set_header_nritems(right, 0);
4307 insert_ptr(trans, root, path, &disk_key, right->start,
4309 btrfs_tree_unlock(path->nodes[0]);
4310 free_extent_buffer(path->nodes[0]);
4311 path->nodes[0] = right;
4313 if (path->slots[1] == 0)
4314 fixup_low_keys(root, path, &disk_key, 1);
4316 btrfs_mark_buffer_dirty(right);
4320 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4323 BUG_ON(num_doubles != 0);
4331 push_for_double_split(trans, root, path, data_size);
4332 tried_avoid_double = 1;
4333 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4338 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4339 struct btrfs_root *root,
4340 struct btrfs_path *path, int ins_len)
4342 struct btrfs_key key;
4343 struct extent_buffer *leaf;
4344 struct btrfs_file_extent_item *fi;
4349 leaf = path->nodes[0];
4350 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4352 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4353 key.type != BTRFS_EXTENT_CSUM_KEY);
4355 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4358 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4359 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4360 fi = btrfs_item_ptr(leaf, path->slots[0],
4361 struct btrfs_file_extent_item);
4362 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4364 btrfs_release_path(path);
4366 path->keep_locks = 1;
4367 path->search_for_split = 1;
4368 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4369 path->search_for_split = 0;
4374 leaf = path->nodes[0];
4375 /* if our item isn't there or got smaller, return now */
4376 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4379 /* the leaf has changed, it now has room. return now */
4380 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4383 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4384 fi = btrfs_item_ptr(leaf, path->slots[0],
4385 struct btrfs_file_extent_item);
4386 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4390 btrfs_set_path_blocking(path);
4391 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4395 path->keep_locks = 0;
4396 btrfs_unlock_up_safe(path, 1);
4399 path->keep_locks = 0;
4403 static noinline int split_item(struct btrfs_trans_handle *trans,
4404 struct btrfs_root *root,
4405 struct btrfs_path *path,
4406 struct btrfs_key *new_key,
4407 unsigned long split_offset)
4409 struct extent_buffer *leaf;
4410 struct btrfs_item *item;
4411 struct btrfs_item *new_item;
4417 struct btrfs_disk_key disk_key;
4419 leaf = path->nodes[0];
4420 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4422 btrfs_set_path_blocking(path);
4424 item = btrfs_item_nr(path->slots[0]);
4425 orig_offset = btrfs_item_offset(leaf, item);
4426 item_size = btrfs_item_size(leaf, item);
4428 buf = kmalloc(item_size, GFP_NOFS);
4432 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4433 path->slots[0]), item_size);
4435 slot = path->slots[0] + 1;
4436 nritems = btrfs_header_nritems(leaf);
4437 if (slot != nritems) {
4438 /* shift the items */
4439 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4440 btrfs_item_nr_offset(slot),
4441 (nritems - slot) * sizeof(struct btrfs_item));
4444 btrfs_cpu_key_to_disk(&disk_key, new_key);
4445 btrfs_set_item_key(leaf, &disk_key, slot);
4447 new_item = btrfs_item_nr(slot);
4449 btrfs_set_item_offset(leaf, new_item, orig_offset);
4450 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4452 btrfs_set_item_offset(leaf, item,
4453 orig_offset + item_size - split_offset);
4454 btrfs_set_item_size(leaf, item, split_offset);
4456 btrfs_set_header_nritems(leaf, nritems + 1);
4458 /* write the data for the start of the original item */
4459 write_extent_buffer(leaf, buf,
4460 btrfs_item_ptr_offset(leaf, path->slots[0]),
4463 /* write the data for the new item */
4464 write_extent_buffer(leaf, buf + split_offset,
4465 btrfs_item_ptr_offset(leaf, slot),
4466 item_size - split_offset);
4467 btrfs_mark_buffer_dirty(leaf);
4469 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4475 * This function splits a single item into two items,
4476 * giving 'new_key' to the new item and splitting the
4477 * old one at split_offset (from the start of the item).
4479 * The path may be released by this operation. After
4480 * the split, the path is pointing to the old item. The
4481 * new item is going to be in the same node as the old one.
4483 * Note, the item being split must be smaller enough to live alone on
4484 * a tree block with room for one extra struct btrfs_item
4486 * This allows us to split the item in place, keeping a lock on the
4487 * leaf the entire time.
4489 int btrfs_split_item(struct btrfs_trans_handle *trans,
4490 struct btrfs_root *root,
4491 struct btrfs_path *path,
4492 struct btrfs_key *new_key,
4493 unsigned long split_offset)
4496 ret = setup_leaf_for_split(trans, root, path,
4497 sizeof(struct btrfs_item));
4501 ret = split_item(trans, root, path, new_key, split_offset);
4506 * This function duplicate a item, giving 'new_key' to the new item.
4507 * It guarantees both items live in the same tree leaf and the new item
4508 * is contiguous with the original item.
4510 * This allows us to split file extent in place, keeping a lock on the
4511 * leaf the entire time.
4513 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4514 struct btrfs_root *root,
4515 struct btrfs_path *path,
4516 struct btrfs_key *new_key)
4518 struct extent_buffer *leaf;
4522 leaf = path->nodes[0];
4523 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4524 ret = setup_leaf_for_split(trans, root, path,
4525 item_size + sizeof(struct btrfs_item));
4530 setup_items_for_insert(root, path, new_key, &item_size,
4531 item_size, item_size +
4532 sizeof(struct btrfs_item), 1);
4533 leaf = path->nodes[0];
4534 memcpy_extent_buffer(leaf,
4535 btrfs_item_ptr_offset(leaf, path->slots[0]),
4536 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4542 * make the item pointed to by the path smaller. new_size indicates
4543 * how small to make it, and from_end tells us if we just chop bytes
4544 * off the end of the item or if we shift the item to chop bytes off
4547 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4548 u32 new_size, int from_end)
4551 struct extent_buffer *leaf;
4552 struct btrfs_item *item;
4554 unsigned int data_end;
4555 unsigned int old_data_start;
4556 unsigned int old_size;
4557 unsigned int size_diff;
4559 struct btrfs_map_token token;
4561 btrfs_init_map_token(&token);
4563 leaf = path->nodes[0];
4564 slot = path->slots[0];
4566 old_size = btrfs_item_size_nr(leaf, slot);
4567 if (old_size == new_size)
4570 nritems = btrfs_header_nritems(leaf);
4571 data_end = leaf_data_end(root, leaf);
4573 old_data_start = btrfs_item_offset_nr(leaf, slot);
4575 size_diff = old_size - new_size;
4578 BUG_ON(slot >= nritems);
4581 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4583 /* first correct the data pointers */
4584 for (i = slot; i < nritems; i++) {
4586 item = btrfs_item_nr(i);
4588 ioff = btrfs_token_item_offset(leaf, item, &token);
4589 btrfs_set_token_item_offset(leaf, item,
4590 ioff + size_diff, &token);
4593 /* shift the data */
4595 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4596 data_end + size_diff, btrfs_leaf_data(leaf) +
4597 data_end, old_data_start + new_size - data_end);
4599 struct btrfs_disk_key disk_key;
4602 btrfs_item_key(leaf, &disk_key, slot);
4604 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4606 struct btrfs_file_extent_item *fi;
4608 fi = btrfs_item_ptr(leaf, slot,
4609 struct btrfs_file_extent_item);
4610 fi = (struct btrfs_file_extent_item *)(
4611 (unsigned long)fi - size_diff);
4613 if (btrfs_file_extent_type(leaf, fi) ==
4614 BTRFS_FILE_EXTENT_INLINE) {
4615 ptr = btrfs_item_ptr_offset(leaf, slot);
4616 memmove_extent_buffer(leaf, ptr,
4618 offsetof(struct btrfs_file_extent_item,
4623 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4624 data_end + size_diff, btrfs_leaf_data(leaf) +
4625 data_end, old_data_start - data_end);
4627 offset = btrfs_disk_key_offset(&disk_key);
4628 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4629 btrfs_set_item_key(leaf, &disk_key, slot);
4631 fixup_low_keys(root, path, &disk_key, 1);
4634 item = btrfs_item_nr(slot);
4635 btrfs_set_item_size(leaf, item, new_size);
4636 btrfs_mark_buffer_dirty(leaf);
4638 if (btrfs_leaf_free_space(root, leaf) < 0) {
4639 btrfs_print_leaf(root, leaf);
4645 * make the item pointed to by the path bigger, data_size is the added size.
4647 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4651 struct extent_buffer *leaf;
4652 struct btrfs_item *item;
4654 unsigned int data_end;
4655 unsigned int old_data;
4656 unsigned int old_size;
4658 struct btrfs_map_token token;
4660 btrfs_init_map_token(&token);
4662 leaf = path->nodes[0];
4664 nritems = btrfs_header_nritems(leaf);
4665 data_end = leaf_data_end(root, leaf);
4667 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4668 btrfs_print_leaf(root, leaf);
4671 slot = path->slots[0];
4672 old_data = btrfs_item_end_nr(leaf, slot);
4675 if (slot >= nritems) {
4676 btrfs_print_leaf(root, leaf);
4677 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4683 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4685 /* first correct the data pointers */
4686 for (i = slot; i < nritems; i++) {
4688 item = btrfs_item_nr(i);
4690 ioff = btrfs_token_item_offset(leaf, item, &token);
4691 btrfs_set_token_item_offset(leaf, item,
4692 ioff - data_size, &token);
4695 /* shift the data */
4696 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4697 data_end - data_size, btrfs_leaf_data(leaf) +
4698 data_end, old_data - data_end);
4700 data_end = old_data;
4701 old_size = btrfs_item_size_nr(leaf, slot);
4702 item = btrfs_item_nr(slot);
4703 btrfs_set_item_size(leaf, item, old_size + data_size);
4704 btrfs_mark_buffer_dirty(leaf);
4706 if (btrfs_leaf_free_space(root, leaf) < 0) {
4707 btrfs_print_leaf(root, leaf);
4713 * this is a helper for btrfs_insert_empty_items, the main goal here is
4714 * to save stack depth by doing the bulk of the work in a function
4715 * that doesn't call btrfs_search_slot
4717 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4718 struct btrfs_key *cpu_key, u32 *data_size,
4719 u32 total_data, u32 total_size, int nr)
4721 struct btrfs_item *item;
4724 unsigned int data_end;
4725 struct btrfs_disk_key disk_key;
4726 struct extent_buffer *leaf;
4728 struct btrfs_map_token token;
4730 if (path->slots[0] == 0) {
4731 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4732 fixup_low_keys(root, path, &disk_key, 1);
4734 btrfs_unlock_up_safe(path, 1);
4736 btrfs_init_map_token(&token);
4738 leaf = path->nodes[0];
4739 slot = path->slots[0];
4741 nritems = btrfs_header_nritems(leaf);
4742 data_end = leaf_data_end(root, leaf);
4744 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4745 btrfs_print_leaf(root, leaf);
4746 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4747 total_size, btrfs_leaf_free_space(root, leaf));
4751 if (slot != nritems) {
4752 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4754 if (old_data < data_end) {
4755 btrfs_print_leaf(root, leaf);
4756 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4757 slot, old_data, data_end);
4761 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4763 /* first correct the data pointers */
4764 for (i = slot; i < nritems; i++) {
4767 item = btrfs_item_nr( i);
4768 ioff = btrfs_token_item_offset(leaf, item, &token);
4769 btrfs_set_token_item_offset(leaf, item,
4770 ioff - total_data, &token);
4772 /* shift the items */
4773 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4774 btrfs_item_nr_offset(slot),
4775 (nritems - slot) * sizeof(struct btrfs_item));
4777 /* shift the data */
4778 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4779 data_end - total_data, btrfs_leaf_data(leaf) +
4780 data_end, old_data - data_end);
4781 data_end = old_data;
4784 /* setup the item for the new data */
4785 for (i = 0; i < nr; i++) {
4786 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4787 btrfs_set_item_key(leaf, &disk_key, slot + i);
4788 item = btrfs_item_nr(slot + i);
4789 btrfs_set_token_item_offset(leaf, item,
4790 data_end - data_size[i], &token);
4791 data_end -= data_size[i];
4792 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4795 btrfs_set_header_nritems(leaf, nritems + nr);
4796 btrfs_mark_buffer_dirty(leaf);
4798 if (btrfs_leaf_free_space(root, leaf) < 0) {
4799 btrfs_print_leaf(root, leaf);
4805 * Given a key and some data, insert items into the tree.
4806 * This does all the path init required, making room in the tree if needed.
4808 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4809 struct btrfs_root *root,
4810 struct btrfs_path *path,
4811 struct btrfs_key *cpu_key, u32 *data_size,
4820 for (i = 0; i < nr; i++)
4821 total_data += data_size[i];
4823 total_size = total_data + (nr * sizeof(struct btrfs_item));
4824 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4830 slot = path->slots[0];
4833 setup_items_for_insert(root, path, cpu_key, data_size,
4834 total_data, total_size, nr);
4839 * Given a key and some data, insert an item into the tree.
4840 * This does all the path init required, making room in the tree if needed.
4842 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4843 *root, struct btrfs_key *cpu_key, void *data, u32
4847 struct btrfs_path *path;
4848 struct extent_buffer *leaf;
4851 path = btrfs_alloc_path();
4854 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4856 leaf = path->nodes[0];
4857 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4858 write_extent_buffer(leaf, data, ptr, data_size);
4859 btrfs_mark_buffer_dirty(leaf);
4861 btrfs_free_path(path);
4866 * delete the pointer from a given node.
4868 * the tree should have been previously balanced so the deletion does not
4871 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4872 int level, int slot)
4874 struct extent_buffer *parent = path->nodes[level];
4878 nritems = btrfs_header_nritems(parent);
4879 if (slot != nritems - 1) {
4881 tree_mod_log_eb_move(root->fs_info, parent, slot,
4882 slot + 1, nritems - slot - 1);
4883 memmove_extent_buffer(parent,
4884 btrfs_node_key_ptr_offset(slot),
4885 btrfs_node_key_ptr_offset(slot + 1),
4886 sizeof(struct btrfs_key_ptr) *
4887 (nritems - slot - 1));
4889 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4890 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4895 btrfs_set_header_nritems(parent, nritems);
4896 if (nritems == 0 && parent == root->node) {
4897 BUG_ON(btrfs_header_level(root->node) != 1);
4898 /* just turn the root into a leaf and break */
4899 btrfs_set_header_level(root->node, 0);
4900 } else if (slot == 0) {
4901 struct btrfs_disk_key disk_key;
4903 btrfs_node_key(parent, &disk_key, 0);
4904 fixup_low_keys(root, path, &disk_key, level + 1);
4906 btrfs_mark_buffer_dirty(parent);
4910 * a helper function to delete the leaf pointed to by path->slots[1] and
4913 * This deletes the pointer in path->nodes[1] and frees the leaf
4914 * block extent. zero is returned if it all worked out, < 0 otherwise.
4916 * The path must have already been setup for deleting the leaf, including
4917 * all the proper balancing. path->nodes[1] must be locked.
4919 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4920 struct btrfs_root *root,
4921 struct btrfs_path *path,
4922 struct extent_buffer *leaf)
4924 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4925 del_ptr(root, path, 1, path->slots[1]);
4928 * btrfs_free_extent is expensive, we want to make sure we
4929 * aren't holding any locks when we call it
4931 btrfs_unlock_up_safe(path, 0);
4933 root_sub_used(root, leaf->len);
4935 extent_buffer_get(leaf);
4936 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4937 free_extent_buffer_stale(leaf);
4940 * delete the item at the leaf level in path. If that empties
4941 * the leaf, remove it from the tree
4943 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4944 struct btrfs_path *path, int slot, int nr)
4946 struct extent_buffer *leaf;
4947 struct btrfs_item *item;
4954 struct btrfs_map_token token;
4956 btrfs_init_map_token(&token);
4958 leaf = path->nodes[0];
4959 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4961 for (i = 0; i < nr; i++)
4962 dsize += btrfs_item_size_nr(leaf, slot + i);
4964 nritems = btrfs_header_nritems(leaf);
4966 if (slot + nr != nritems) {
4967 int data_end = leaf_data_end(root, leaf);
4969 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4971 btrfs_leaf_data(leaf) + data_end,
4972 last_off - data_end);
4974 for (i = slot + nr; i < nritems; i++) {
4977 item = btrfs_item_nr(i);
4978 ioff = btrfs_token_item_offset(leaf, item, &token);
4979 btrfs_set_token_item_offset(leaf, item,
4980 ioff + dsize, &token);
4983 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4984 btrfs_item_nr_offset(slot + nr),
4985 sizeof(struct btrfs_item) *
4986 (nritems - slot - nr));
4988 btrfs_set_header_nritems(leaf, nritems - nr);
4991 /* delete the leaf if we've emptied it */
4993 if (leaf == root->node) {
4994 btrfs_set_header_level(leaf, 0);
4996 btrfs_set_path_blocking(path);
4997 clean_tree_block(trans, root, leaf);
4998 btrfs_del_leaf(trans, root, path, leaf);
5001 int used = leaf_space_used(leaf, 0, nritems);
5003 struct btrfs_disk_key disk_key;
5005 btrfs_item_key(leaf, &disk_key, 0);
5006 fixup_low_keys(root, path, &disk_key, 1);
5009 /* delete the leaf if it is mostly empty */
5010 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5011 /* push_leaf_left fixes the path.
5012 * make sure the path still points to our leaf
5013 * for possible call to del_ptr below
5015 slot = path->slots[1];
5016 extent_buffer_get(leaf);
5018 btrfs_set_path_blocking(path);
5019 wret = push_leaf_left(trans, root, path, 1, 1,
5021 if (wret < 0 && wret != -ENOSPC)
5024 if (path->nodes[0] == leaf &&
5025 btrfs_header_nritems(leaf)) {
5026 wret = push_leaf_right(trans, root, path, 1,
5028 if (wret < 0 && wret != -ENOSPC)
5032 if (btrfs_header_nritems(leaf) == 0) {
5033 path->slots[1] = slot;
5034 btrfs_del_leaf(trans, root, path, leaf);
5035 free_extent_buffer(leaf);
5038 /* if we're still in the path, make sure
5039 * we're dirty. Otherwise, one of the
5040 * push_leaf functions must have already
5041 * dirtied this buffer
5043 if (path->nodes[0] == leaf)
5044 btrfs_mark_buffer_dirty(leaf);
5045 free_extent_buffer(leaf);
5048 btrfs_mark_buffer_dirty(leaf);
5055 * search the tree again to find a leaf with lesser keys
5056 * returns 0 if it found something or 1 if there are no lesser leaves.
5057 * returns < 0 on io errors.
5059 * This may release the path, and so you may lose any locks held at the
5062 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5064 struct btrfs_key key;
5065 struct btrfs_disk_key found_key;
5068 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5070 if (key.offset > 0) {
5072 } else if (key.type > 0) {
5074 key.offset = (u64)-1;
5075 } else if (key.objectid > 0) {
5078 key.offset = (u64)-1;
5083 btrfs_release_path(path);
5084 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5087 btrfs_item_key(path->nodes[0], &found_key, 0);
5088 ret = comp_keys(&found_key, &key);
5090 * We might have had an item with the previous key in the tree right
5091 * before we released our path. And after we released our path, that
5092 * item might have been pushed to the first slot (0) of the leaf we
5093 * were holding due to a tree balance. Alternatively, an item with the
5094 * previous key can exist as the only element of a leaf (big fat item).
5095 * Therefore account for these 2 cases, so that our callers (like
5096 * btrfs_previous_item) don't miss an existing item with a key matching
5097 * the previous key we computed above.
5105 * A helper function to walk down the tree starting at min_key, and looking
5106 * for nodes or leaves that are have a minimum transaction id.
5107 * This is used by the btree defrag code, and tree logging
5109 * This does not cow, but it does stuff the starting key it finds back
5110 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5111 * key and get a writable path.
5113 * This does lock as it descends, and path->keep_locks should be set
5114 * to 1 by the caller.
5116 * This honors path->lowest_level to prevent descent past a given level
5119 * min_trans indicates the oldest transaction that you are interested
5120 * in walking through. Any nodes or leaves older than min_trans are
5121 * skipped over (without reading them).
5123 * returns zero if something useful was found, < 0 on error and 1 if there
5124 * was nothing in the tree that matched the search criteria.
5126 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5127 struct btrfs_path *path,
5130 struct extent_buffer *cur;
5131 struct btrfs_key found_key;
5137 int keep_locks = path->keep_locks;
5139 path->keep_locks = 1;
5141 cur = btrfs_read_lock_root_node(root);
5142 level = btrfs_header_level(cur);
5143 WARN_ON(path->nodes[level]);
5144 path->nodes[level] = cur;
5145 path->locks[level] = BTRFS_READ_LOCK;
5147 if (btrfs_header_generation(cur) < min_trans) {
5152 nritems = btrfs_header_nritems(cur);
5153 level = btrfs_header_level(cur);
5154 sret = bin_search(cur, min_key, level, &slot);
5156 /* at the lowest level, we're done, setup the path and exit */
5157 if (level == path->lowest_level) {
5158 if (slot >= nritems)
5161 path->slots[level] = slot;
5162 btrfs_item_key_to_cpu(cur, &found_key, slot);
5165 if (sret && slot > 0)
5168 * check this node pointer against the min_trans parameters.
5169 * If it is too old, old, skip to the next one.
5171 while (slot < nritems) {
5174 gen = btrfs_node_ptr_generation(cur, slot);
5175 if (gen < min_trans) {
5183 * we didn't find a candidate key in this node, walk forward
5184 * and find another one
5186 if (slot >= nritems) {
5187 path->slots[level] = slot;
5188 btrfs_set_path_blocking(path);
5189 sret = btrfs_find_next_key(root, path, min_key, level,
5192 btrfs_release_path(path);
5198 /* save our key for returning back */
5199 btrfs_node_key_to_cpu(cur, &found_key, slot);
5200 path->slots[level] = slot;
5201 if (level == path->lowest_level) {
5205 btrfs_set_path_blocking(path);
5206 cur = read_node_slot(root, cur, slot);
5207 BUG_ON(!cur); /* -ENOMEM */
5209 btrfs_tree_read_lock(cur);
5211 path->locks[level - 1] = BTRFS_READ_LOCK;
5212 path->nodes[level - 1] = cur;
5213 unlock_up(path, level, 1, 0, NULL);
5214 btrfs_clear_path_blocking(path, NULL, 0);
5217 path->keep_locks = keep_locks;
5219 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5220 btrfs_set_path_blocking(path);
5221 memcpy(min_key, &found_key, sizeof(found_key));
5226 static void tree_move_down(struct btrfs_root *root,
5227 struct btrfs_path *path,
5228 int *level, int root_level)
5230 BUG_ON(*level == 0);
5231 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5232 path->slots[*level]);
5233 path->slots[*level - 1] = 0;
5237 static int tree_move_next_or_upnext(struct btrfs_root *root,
5238 struct btrfs_path *path,
5239 int *level, int root_level)
5243 nritems = btrfs_header_nritems(path->nodes[*level]);
5245 path->slots[*level]++;
5247 while (path->slots[*level] >= nritems) {
5248 if (*level == root_level)
5252 path->slots[*level] = 0;
5253 free_extent_buffer(path->nodes[*level]);
5254 path->nodes[*level] = NULL;
5256 path->slots[*level]++;
5258 nritems = btrfs_header_nritems(path->nodes[*level]);
5265 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5268 static int tree_advance(struct btrfs_root *root,
5269 struct btrfs_path *path,
5270 int *level, int root_level,
5272 struct btrfs_key *key)
5276 if (*level == 0 || !allow_down) {
5277 ret = tree_move_next_or_upnext(root, path, level, root_level);
5279 tree_move_down(root, path, level, root_level);
5284 btrfs_item_key_to_cpu(path->nodes[*level], key,
5285 path->slots[*level]);
5287 btrfs_node_key_to_cpu(path->nodes[*level], key,
5288 path->slots[*level]);
5293 static int tree_compare_item(struct btrfs_root *left_root,
5294 struct btrfs_path *left_path,
5295 struct btrfs_path *right_path,
5300 unsigned long off1, off2;
5302 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5303 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5307 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5308 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5309 right_path->slots[0]);
5311 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5313 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5320 #define ADVANCE_ONLY_NEXT -1
5323 * This function compares two trees and calls the provided callback for
5324 * every changed/new/deleted item it finds.
5325 * If shared tree blocks are encountered, whole subtrees are skipped, making
5326 * the compare pretty fast on snapshotted subvolumes.
5328 * This currently works on commit roots only. As commit roots are read only,
5329 * we don't do any locking. The commit roots are protected with transactions.
5330 * Transactions are ended and rejoined when a commit is tried in between.
5332 * This function checks for modifications done to the trees while comparing.
5333 * If it detects a change, it aborts immediately.
5335 int btrfs_compare_trees(struct btrfs_root *left_root,
5336 struct btrfs_root *right_root,
5337 btrfs_changed_cb_t changed_cb, void *ctx)
5341 struct btrfs_path *left_path = NULL;
5342 struct btrfs_path *right_path = NULL;
5343 struct btrfs_key left_key;
5344 struct btrfs_key right_key;
5345 char *tmp_buf = NULL;
5346 int left_root_level;
5347 int right_root_level;
5350 int left_end_reached;
5351 int right_end_reached;
5359 left_path = btrfs_alloc_path();
5364 right_path = btrfs_alloc_path();
5370 tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
5376 left_path->search_commit_root = 1;
5377 left_path->skip_locking = 1;
5378 right_path->search_commit_root = 1;
5379 right_path->skip_locking = 1;
5382 * Strategy: Go to the first items of both trees. Then do
5384 * If both trees are at level 0
5385 * Compare keys of current items
5386 * If left < right treat left item as new, advance left tree
5388 * If left > right treat right item as deleted, advance right tree
5390 * If left == right do deep compare of items, treat as changed if
5391 * needed, advance both trees and repeat
5392 * If both trees are at the same level but not at level 0
5393 * Compare keys of current nodes/leafs
5394 * If left < right advance left tree and repeat
5395 * If left > right advance right tree and repeat
5396 * If left == right compare blockptrs of the next nodes/leafs
5397 * If they match advance both trees but stay at the same level
5399 * If they don't match advance both trees while allowing to go
5401 * If tree levels are different
5402 * Advance the tree that needs it and repeat
5404 * Advancing a tree means:
5405 * If we are at level 0, try to go to the next slot. If that's not
5406 * possible, go one level up and repeat. Stop when we found a level
5407 * where we could go to the next slot. We may at this point be on a
5410 * If we are not at level 0 and not on shared tree blocks, go one
5413 * If we are not at level 0 and on shared tree blocks, go one slot to
5414 * the right if possible or go up and right.
5417 down_read(&left_root->fs_info->commit_root_sem);
5418 left_level = btrfs_header_level(left_root->commit_root);
5419 left_root_level = left_level;
5420 left_path->nodes[left_level] = left_root->commit_root;
5421 extent_buffer_get(left_path->nodes[left_level]);
5423 right_level = btrfs_header_level(right_root->commit_root);
5424 right_root_level = right_level;
5425 right_path->nodes[right_level] = right_root->commit_root;
5426 extent_buffer_get(right_path->nodes[right_level]);
5427 up_read(&left_root->fs_info->commit_root_sem);
5429 if (left_level == 0)
5430 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5431 &left_key, left_path->slots[left_level]);
5433 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5434 &left_key, left_path->slots[left_level]);
5435 if (right_level == 0)
5436 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5437 &right_key, right_path->slots[right_level]);
5439 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5440 &right_key, right_path->slots[right_level]);
5442 left_end_reached = right_end_reached = 0;
5443 advance_left = advance_right = 0;
5446 if (advance_left && !left_end_reached) {
5447 ret = tree_advance(left_root, left_path, &left_level,
5449 advance_left != ADVANCE_ONLY_NEXT,
5452 left_end_reached = ADVANCE;
5455 if (advance_right && !right_end_reached) {
5456 ret = tree_advance(right_root, right_path, &right_level,
5458 advance_right != ADVANCE_ONLY_NEXT,
5461 right_end_reached = ADVANCE;
5465 if (left_end_reached && right_end_reached) {
5468 } else if (left_end_reached) {
5469 if (right_level == 0) {
5470 ret = changed_cb(left_root, right_root,
5471 left_path, right_path,
5473 BTRFS_COMPARE_TREE_DELETED,
5478 advance_right = ADVANCE;
5480 } else if (right_end_reached) {
5481 if (left_level == 0) {
5482 ret = changed_cb(left_root, right_root,
5483 left_path, right_path,
5485 BTRFS_COMPARE_TREE_NEW,
5490 advance_left = ADVANCE;
5494 if (left_level == 0 && right_level == 0) {
5495 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5497 ret = changed_cb(left_root, right_root,
5498 left_path, right_path,
5500 BTRFS_COMPARE_TREE_NEW,
5504 advance_left = ADVANCE;
5505 } else if (cmp > 0) {
5506 ret = changed_cb(left_root, right_root,
5507 left_path, right_path,
5509 BTRFS_COMPARE_TREE_DELETED,
5513 advance_right = ADVANCE;
5515 enum btrfs_compare_tree_result cmp;
5517 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5518 ret = tree_compare_item(left_root, left_path,
5519 right_path, tmp_buf);
5521 cmp = BTRFS_COMPARE_TREE_CHANGED;
5523 cmp = BTRFS_COMPARE_TREE_SAME;
5524 ret = changed_cb(left_root, right_root,
5525 left_path, right_path,
5526 &left_key, cmp, ctx);
5529 advance_left = ADVANCE;
5530 advance_right = ADVANCE;
5532 } else if (left_level == right_level) {
5533 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5535 advance_left = ADVANCE;
5536 } else if (cmp > 0) {
5537 advance_right = ADVANCE;
5539 left_blockptr = btrfs_node_blockptr(
5540 left_path->nodes[left_level],
5541 left_path->slots[left_level]);
5542 right_blockptr = btrfs_node_blockptr(
5543 right_path->nodes[right_level],
5544 right_path->slots[right_level]);
5545 left_gen = btrfs_node_ptr_generation(
5546 left_path->nodes[left_level],
5547 left_path->slots[left_level]);
5548 right_gen = btrfs_node_ptr_generation(
5549 right_path->nodes[right_level],
5550 right_path->slots[right_level]);
5551 if (left_blockptr == right_blockptr &&
5552 left_gen == right_gen) {
5554 * As we're on a shared block, don't
5555 * allow to go deeper.
5557 advance_left = ADVANCE_ONLY_NEXT;
5558 advance_right = ADVANCE_ONLY_NEXT;
5560 advance_left = ADVANCE;
5561 advance_right = ADVANCE;
5564 } else if (left_level < right_level) {
5565 advance_right = ADVANCE;
5567 advance_left = ADVANCE;
5572 btrfs_free_path(left_path);
5573 btrfs_free_path(right_path);
5579 * this is similar to btrfs_next_leaf, but does not try to preserve
5580 * and fixup the path. It looks for and returns the next key in the
5581 * tree based on the current path and the min_trans parameters.
5583 * 0 is returned if another key is found, < 0 if there are any errors
5584 * and 1 is returned if there are no higher keys in the tree
5586 * path->keep_locks should be set to 1 on the search made before
5587 * calling this function.
5589 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5590 struct btrfs_key *key, int level, u64 min_trans)
5593 struct extent_buffer *c;
5595 WARN_ON(!path->keep_locks);
5596 while (level < BTRFS_MAX_LEVEL) {
5597 if (!path->nodes[level])
5600 slot = path->slots[level] + 1;
5601 c = path->nodes[level];
5603 if (slot >= btrfs_header_nritems(c)) {
5606 struct btrfs_key cur_key;
5607 if (level + 1 >= BTRFS_MAX_LEVEL ||
5608 !path->nodes[level + 1])
5611 if (path->locks[level + 1]) {
5616 slot = btrfs_header_nritems(c) - 1;
5618 btrfs_item_key_to_cpu(c, &cur_key, slot);
5620 btrfs_node_key_to_cpu(c, &cur_key, slot);
5622 orig_lowest = path->lowest_level;
5623 btrfs_release_path(path);
5624 path->lowest_level = level;
5625 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5627 path->lowest_level = orig_lowest;
5631 c = path->nodes[level];
5632 slot = path->slots[level];
5639 btrfs_item_key_to_cpu(c, key, slot);
5641 u64 gen = btrfs_node_ptr_generation(c, slot);
5643 if (gen < min_trans) {
5647 btrfs_node_key_to_cpu(c, key, slot);
5655 * search the tree again to find a leaf with greater keys
5656 * returns 0 if it found something or 1 if there are no greater leaves.
5657 * returns < 0 on io errors.
5659 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5661 return btrfs_next_old_leaf(root, path, 0);
5664 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5669 struct extent_buffer *c;
5670 struct extent_buffer *next;
5671 struct btrfs_key key;
5674 int old_spinning = path->leave_spinning;
5675 int next_rw_lock = 0;
5677 nritems = btrfs_header_nritems(path->nodes[0]);
5681 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5686 btrfs_release_path(path);
5688 path->keep_locks = 1;
5689 path->leave_spinning = 1;
5692 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5694 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5695 path->keep_locks = 0;
5700 nritems = btrfs_header_nritems(path->nodes[0]);
5702 * by releasing the path above we dropped all our locks. A balance
5703 * could have added more items next to the key that used to be
5704 * at the very end of the block. So, check again here and
5705 * advance the path if there are now more items available.
5707 if (nritems > 0 && path->slots[0] < nritems - 1) {
5714 * So the above check misses one case:
5715 * - after releasing the path above, someone has removed the item that
5716 * used to be at the very end of the block, and balance between leafs
5717 * gets another one with bigger key.offset to replace it.
5719 * This one should be returned as well, or we can get leaf corruption
5720 * later(esp. in __btrfs_drop_extents()).
5722 * And a bit more explanation about this check,
5723 * with ret > 0, the key isn't found, the path points to the slot
5724 * where it should be inserted, so the path->slots[0] item must be the
5727 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5732 while (level < BTRFS_MAX_LEVEL) {
5733 if (!path->nodes[level]) {
5738 slot = path->slots[level] + 1;
5739 c = path->nodes[level];
5740 if (slot >= btrfs_header_nritems(c)) {
5742 if (level == BTRFS_MAX_LEVEL) {
5750 btrfs_tree_unlock_rw(next, next_rw_lock);
5751 free_extent_buffer(next);
5755 next_rw_lock = path->locks[level];
5756 ret = read_block_for_search(NULL, root, path, &next, level,
5762 btrfs_release_path(path);
5766 if (!path->skip_locking) {
5767 ret = btrfs_try_tree_read_lock(next);
5768 if (!ret && time_seq) {
5770 * If we don't get the lock, we may be racing
5771 * with push_leaf_left, holding that lock while
5772 * itself waiting for the leaf we've currently
5773 * locked. To solve this situation, we give up
5774 * on our lock and cycle.
5776 free_extent_buffer(next);
5777 btrfs_release_path(path);
5782 btrfs_set_path_blocking(path);
5783 btrfs_tree_read_lock(next);
5784 btrfs_clear_path_blocking(path, next,
5787 next_rw_lock = BTRFS_READ_LOCK;
5791 path->slots[level] = slot;
5794 c = path->nodes[level];
5795 if (path->locks[level])
5796 btrfs_tree_unlock_rw(c, path->locks[level]);
5798 free_extent_buffer(c);
5799 path->nodes[level] = next;
5800 path->slots[level] = 0;
5801 if (!path->skip_locking)
5802 path->locks[level] = next_rw_lock;
5806 ret = read_block_for_search(NULL, root, path, &next, level,
5812 btrfs_release_path(path);
5816 if (!path->skip_locking) {
5817 ret = btrfs_try_tree_read_lock(next);
5819 btrfs_set_path_blocking(path);
5820 btrfs_tree_read_lock(next);
5821 btrfs_clear_path_blocking(path, next,
5824 next_rw_lock = BTRFS_READ_LOCK;
5829 unlock_up(path, 0, 1, 0, NULL);
5830 path->leave_spinning = old_spinning;
5832 btrfs_set_path_blocking(path);
5838 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5839 * searching until it gets past min_objectid or finds an item of 'type'
5841 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5843 int btrfs_previous_item(struct btrfs_root *root,
5844 struct btrfs_path *path, u64 min_objectid,
5847 struct btrfs_key found_key;
5848 struct extent_buffer *leaf;
5853 if (path->slots[0] == 0) {
5854 btrfs_set_path_blocking(path);
5855 ret = btrfs_prev_leaf(root, path);
5861 leaf = path->nodes[0];
5862 nritems = btrfs_header_nritems(leaf);
5865 if (path->slots[0] == nritems)
5868 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5869 if (found_key.objectid < min_objectid)
5871 if (found_key.type == type)
5873 if (found_key.objectid == min_objectid &&
5874 found_key.type < type)
5881 * search in extent tree to find a previous Metadata/Data extent item with
5884 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5886 int btrfs_previous_extent_item(struct btrfs_root *root,
5887 struct btrfs_path *path, u64 min_objectid)
5889 struct btrfs_key found_key;
5890 struct extent_buffer *leaf;
5895 if (path->slots[0] == 0) {
5896 btrfs_set_path_blocking(path);
5897 ret = btrfs_prev_leaf(root, path);
5903 leaf = path->nodes[0];
5904 nritems = btrfs_header_nritems(leaf);
5907 if (path->slots[0] == nritems)
5910 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5911 if (found_key.objectid < min_objectid)
5913 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5914 found_key.type == BTRFS_METADATA_ITEM_KEY)
5916 if (found_key.objectid == min_objectid &&
5917 found_key.type < BTRFS_EXTENT_ITEM_KEY)