2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
42 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
45 struct btrfs_path *btrfs_alloc_path(void)
47 struct btrfs_path *path;
48 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
59 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 if (!p->nodes[i] || !p->locks[i])
62 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 if (p->locks[i] == BTRFS_READ_LOCK)
64 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
71 * reset all the locked nodes in the patch to spinning locks.
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 struct extent_buffer *held, int held_rw)
84 btrfs_set_lock_blocking_rw(held, held_rw);
85 if (held_rw == BTRFS_WRITE_LOCK)
86 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
87 else if (held_rw == BTRFS_READ_LOCK)
88 held_rw = BTRFS_READ_LOCK_BLOCKING;
90 btrfs_set_path_blocking(p);
92 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
93 if (p->nodes[i] && p->locks[i]) {
94 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
95 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
96 p->locks[i] = BTRFS_WRITE_LOCK;
97 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
98 p->locks[i] = BTRFS_READ_LOCK;
103 btrfs_clear_lock_blocking_rw(held, held_rw);
106 /* this also releases the path */
107 void btrfs_free_path(struct btrfs_path *p)
111 btrfs_release_path(p);
112 kmem_cache_free(btrfs_path_cachep, p);
116 * path release drops references on the extent buffers in the path
117 * and it drops any locks held by this path
119 * It is safe to call this on paths that no locks or extent buffers held.
121 noinline void btrfs_release_path(struct btrfs_path *p)
125 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
130 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
133 free_extent_buffer(p->nodes[i]);
139 * safely gets a reference on the root node of a tree. A lock
140 * is not taken, so a concurrent writer may put a different node
141 * at the root of the tree. See btrfs_lock_root_node for the
144 * The extent buffer returned by this has a reference taken, so
145 * it won't disappear. It may stop being the root of the tree
146 * at any time because there are no locks held.
148 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
150 struct extent_buffer *eb;
154 eb = rcu_dereference(root->node);
157 * RCU really hurts here, we could free up the root node because
158 * it was cow'ed but we may not get the new root node yet so do
159 * the inc_not_zero dance and if it doesn't work then
160 * synchronize_rcu and try again.
162 if (atomic_inc_not_zero(&eb->refs)) {
172 /* loop around taking references on and locking the root node of the
173 * tree until you end up with a lock on the root. A locked buffer
174 * is returned, with a reference held.
176 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
178 struct extent_buffer *eb;
181 eb = btrfs_root_node(root);
183 if (eb == root->node)
185 btrfs_tree_unlock(eb);
186 free_extent_buffer(eb);
191 /* loop around taking references on and locking the root node of the
192 * tree until you end up with a lock on the root. A locked buffer
193 * is returned, with a reference held.
195 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
197 struct extent_buffer *eb;
200 eb = btrfs_root_node(root);
201 btrfs_tree_read_lock(eb);
202 if (eb == root->node)
204 btrfs_tree_read_unlock(eb);
205 free_extent_buffer(eb);
210 /* cowonly root (everything not a reference counted cow subvolume), just get
211 * put onto a simple dirty list. transaction.c walks this to make sure they
212 * get properly updated on disk.
214 static void add_root_to_dirty_list(struct btrfs_root *root)
216 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
217 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
220 spin_lock(&root->fs_info->trans_lock);
221 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
222 /* Want the extent tree to be the last on the list */
223 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
224 list_move_tail(&root->dirty_list,
225 &root->fs_info->dirty_cowonly_roots);
227 list_move(&root->dirty_list,
228 &root->fs_info->dirty_cowonly_roots);
230 spin_unlock(&root->fs_info->trans_lock);
234 * used by snapshot creation to make a copy of a root for a tree with
235 * a given objectid. The buffer with the new root node is returned in
236 * cow_ret, and this func returns zero on success or a negative error code.
238 int btrfs_copy_root(struct btrfs_trans_handle *trans,
239 struct btrfs_root *root,
240 struct extent_buffer *buf,
241 struct extent_buffer **cow_ret, u64 new_root_objectid)
243 struct extent_buffer *cow;
246 struct btrfs_disk_key disk_key;
248 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
249 trans->transid != root->fs_info->running_transaction->transid);
250 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
251 trans->transid != root->last_trans);
253 level = btrfs_header_level(buf);
255 btrfs_item_key(buf, &disk_key, 0);
257 btrfs_node_key(buf, &disk_key, 0);
259 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
260 &disk_key, level, buf->start, 0);
264 copy_extent_buffer(cow, buf, 0, 0, cow->len);
265 btrfs_set_header_bytenr(cow, cow->start);
266 btrfs_set_header_generation(cow, trans->transid);
267 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
268 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
269 BTRFS_HEADER_FLAG_RELOC);
270 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
271 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
273 btrfs_set_header_owner(cow, new_root_objectid);
275 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
278 WARN_ON(btrfs_header_generation(buf) > trans->transid);
279 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
280 ret = btrfs_inc_ref(trans, root, cow, 1);
282 ret = btrfs_inc_ref(trans, root, cow, 0);
287 btrfs_mark_buffer_dirty(cow);
296 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
297 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
299 MOD_LOG_ROOT_REPLACE,
302 struct tree_mod_move {
307 struct tree_mod_root {
312 struct tree_mod_elem {
314 u64 index; /* shifted logical */
318 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
321 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
324 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
325 struct btrfs_disk_key key;
328 /* this is used for op == MOD_LOG_MOVE_KEYS */
329 struct tree_mod_move move;
331 /* this is used for op == MOD_LOG_ROOT_REPLACE */
332 struct tree_mod_root old_root;
335 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
337 read_lock(&fs_info->tree_mod_log_lock);
340 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
342 read_unlock(&fs_info->tree_mod_log_lock);
345 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
347 write_lock(&fs_info->tree_mod_log_lock);
350 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
352 write_unlock(&fs_info->tree_mod_log_lock);
356 * Pull a new tree mod seq number for our operation.
358 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
360 return atomic64_inc_return(&fs_info->tree_mod_seq);
364 * This adds a new blocker to the tree mod log's blocker list if the @elem
365 * passed does not already have a sequence number set. So when a caller expects
366 * to record tree modifications, it should ensure to set elem->seq to zero
367 * before calling btrfs_get_tree_mod_seq.
368 * Returns a fresh, unused tree log modification sequence number, even if no new
371 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
372 struct seq_list *elem)
374 tree_mod_log_write_lock(fs_info);
375 spin_lock(&fs_info->tree_mod_seq_lock);
377 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
378 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
380 spin_unlock(&fs_info->tree_mod_seq_lock);
381 tree_mod_log_write_unlock(fs_info);
386 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
387 struct seq_list *elem)
389 struct rb_root *tm_root;
390 struct rb_node *node;
391 struct rb_node *next;
392 struct seq_list *cur_elem;
393 struct tree_mod_elem *tm;
394 u64 min_seq = (u64)-1;
395 u64 seq_putting = elem->seq;
400 spin_lock(&fs_info->tree_mod_seq_lock);
401 list_del(&elem->list);
404 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
405 if (cur_elem->seq < min_seq) {
406 if (seq_putting > cur_elem->seq) {
408 * blocker with lower sequence number exists, we
409 * cannot remove anything from the log
411 spin_unlock(&fs_info->tree_mod_seq_lock);
414 min_seq = cur_elem->seq;
417 spin_unlock(&fs_info->tree_mod_seq_lock);
420 * anything that's lower than the lowest existing (read: blocked)
421 * sequence number can be removed from the tree.
423 tree_mod_log_write_lock(fs_info);
424 tm_root = &fs_info->tree_mod_log;
425 for (node = rb_first(tm_root); node; node = next) {
426 next = rb_next(node);
427 tm = container_of(node, struct tree_mod_elem, node);
428 if (tm->seq > min_seq)
430 rb_erase(node, tm_root);
433 tree_mod_log_write_unlock(fs_info);
437 * key order of the log:
440 * the index is the shifted logical of the *new* root node for root replace
441 * operations, or the shifted logical of the affected block for all other
444 * Note: must be called with write lock (tree_mod_log_write_lock).
447 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
449 struct rb_root *tm_root;
450 struct rb_node **new;
451 struct rb_node *parent = NULL;
452 struct tree_mod_elem *cur;
456 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
458 tm_root = &fs_info->tree_mod_log;
459 new = &tm_root->rb_node;
461 cur = container_of(*new, struct tree_mod_elem, node);
463 if (cur->index < tm->index)
464 new = &((*new)->rb_left);
465 else if (cur->index > tm->index)
466 new = &((*new)->rb_right);
467 else if (cur->seq < tm->seq)
468 new = &((*new)->rb_left);
469 else if (cur->seq > tm->seq)
470 new = &((*new)->rb_right);
475 rb_link_node(&tm->node, parent, new);
476 rb_insert_color(&tm->node, tm_root);
481 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
482 * returns zero with the tree_mod_log_lock acquired. The caller must hold
483 * this until all tree mod log insertions are recorded in the rb tree and then
484 * call tree_mod_log_write_unlock() to release.
486 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
487 struct extent_buffer *eb) {
489 if (list_empty(&(fs_info)->tree_mod_seq_list))
491 if (eb && btrfs_header_level(eb) == 0)
494 tree_mod_log_write_lock(fs_info);
495 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
496 tree_mod_log_write_unlock(fs_info);
503 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
504 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
505 struct extent_buffer *eb)
508 if (list_empty(&(fs_info)->tree_mod_seq_list))
510 if (eb && btrfs_header_level(eb) == 0)
516 static struct tree_mod_elem *
517 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
518 enum mod_log_op op, gfp_t flags)
520 struct tree_mod_elem *tm;
522 tm = kzalloc(sizeof(*tm), flags);
526 tm->index = eb->start >> PAGE_CACHE_SHIFT;
527 if (op != MOD_LOG_KEY_ADD) {
528 btrfs_node_key(eb, &tm->key, slot);
529 tm->blockptr = btrfs_node_blockptr(eb, slot);
533 tm->generation = btrfs_node_ptr_generation(eb, slot);
534 RB_CLEAR_NODE(&tm->node);
540 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
541 struct extent_buffer *eb, int slot,
542 enum mod_log_op op, gfp_t flags)
544 struct tree_mod_elem *tm;
547 if (!tree_mod_need_log(fs_info, eb))
550 tm = alloc_tree_mod_elem(eb, slot, op, flags);
554 if (tree_mod_dont_log(fs_info, eb)) {
559 ret = __tree_mod_log_insert(fs_info, tm);
560 tree_mod_log_write_unlock(fs_info);
568 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
569 struct extent_buffer *eb, int dst_slot, int src_slot,
570 int nr_items, gfp_t flags)
572 struct tree_mod_elem *tm = NULL;
573 struct tree_mod_elem **tm_list = NULL;
578 if (!tree_mod_need_log(fs_info, eb))
581 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
585 tm = kzalloc(sizeof(*tm), flags);
591 tm->index = eb->start >> PAGE_CACHE_SHIFT;
593 tm->move.dst_slot = dst_slot;
594 tm->move.nr_items = nr_items;
595 tm->op = MOD_LOG_MOVE_KEYS;
597 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
598 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
599 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
606 if (tree_mod_dont_log(fs_info, eb))
611 * When we override something during the move, we log these removals.
612 * This can only happen when we move towards the beginning of the
613 * buffer, i.e. dst_slot < src_slot.
615 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
616 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
621 ret = __tree_mod_log_insert(fs_info, tm);
624 tree_mod_log_write_unlock(fs_info);
629 for (i = 0; i < nr_items; i++) {
630 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
631 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
635 tree_mod_log_write_unlock(fs_info);
643 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
644 struct tree_mod_elem **tm_list,
650 for (i = nritems - 1; i >= 0; i--) {
651 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
653 for (j = nritems - 1; j > i; j--)
654 rb_erase(&tm_list[j]->node,
655 &fs_info->tree_mod_log);
664 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
665 struct extent_buffer *old_root,
666 struct extent_buffer *new_root, gfp_t flags,
669 struct tree_mod_elem *tm = NULL;
670 struct tree_mod_elem **tm_list = NULL;
675 if (!tree_mod_need_log(fs_info, NULL))
678 if (log_removal && btrfs_header_level(old_root) > 0) {
679 nritems = btrfs_header_nritems(old_root);
680 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
686 for (i = 0; i < nritems; i++) {
687 tm_list[i] = alloc_tree_mod_elem(old_root, i,
688 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
696 tm = kzalloc(sizeof(*tm), flags);
702 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
703 tm->old_root.logical = old_root->start;
704 tm->old_root.level = btrfs_header_level(old_root);
705 tm->generation = btrfs_header_generation(old_root);
706 tm->op = MOD_LOG_ROOT_REPLACE;
708 if (tree_mod_dont_log(fs_info, NULL))
712 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
714 ret = __tree_mod_log_insert(fs_info, tm);
716 tree_mod_log_write_unlock(fs_info);
725 for (i = 0; i < nritems; i++)
734 static struct tree_mod_elem *
735 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
738 struct rb_root *tm_root;
739 struct rb_node *node;
740 struct tree_mod_elem *cur = NULL;
741 struct tree_mod_elem *found = NULL;
742 u64 index = start >> PAGE_CACHE_SHIFT;
744 tree_mod_log_read_lock(fs_info);
745 tm_root = &fs_info->tree_mod_log;
746 node = tm_root->rb_node;
748 cur = container_of(node, struct tree_mod_elem, node);
749 if (cur->index < index) {
750 node = node->rb_left;
751 } else if (cur->index > index) {
752 node = node->rb_right;
753 } else if (cur->seq < min_seq) {
754 node = node->rb_left;
755 } else if (!smallest) {
756 /* we want the node with the highest seq */
758 BUG_ON(found->seq > cur->seq);
760 node = node->rb_left;
761 } else if (cur->seq > min_seq) {
762 /* we want the node with the smallest seq */
764 BUG_ON(found->seq < cur->seq);
766 node = node->rb_right;
772 tree_mod_log_read_unlock(fs_info);
778 * this returns the element from the log with the smallest time sequence
779 * value that's in the log (the oldest log item). any element with a time
780 * sequence lower than min_seq will be ignored.
782 static struct tree_mod_elem *
783 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
786 return __tree_mod_log_search(fs_info, start, min_seq, 1);
790 * this returns the element from the log with the largest time sequence
791 * value that's in the log (the most recent log item). any element with
792 * a time sequence lower than min_seq will be ignored.
794 static struct tree_mod_elem *
795 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
797 return __tree_mod_log_search(fs_info, start, min_seq, 0);
801 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
802 struct extent_buffer *src, unsigned long dst_offset,
803 unsigned long src_offset, int nr_items)
806 struct tree_mod_elem **tm_list = NULL;
807 struct tree_mod_elem **tm_list_add, **tm_list_rem;
811 if (!tree_mod_need_log(fs_info, NULL))
814 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
817 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
822 tm_list_add = tm_list;
823 tm_list_rem = tm_list + nr_items;
824 for (i = 0; i < nr_items; i++) {
825 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
826 MOD_LOG_KEY_REMOVE, GFP_NOFS);
827 if (!tm_list_rem[i]) {
832 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
833 MOD_LOG_KEY_ADD, GFP_NOFS);
834 if (!tm_list_add[i]) {
840 if (tree_mod_dont_log(fs_info, NULL))
844 for (i = 0; i < nr_items; i++) {
845 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
848 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
853 tree_mod_log_write_unlock(fs_info);
859 for (i = 0; i < nr_items * 2; i++) {
860 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
861 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
865 tree_mod_log_write_unlock(fs_info);
872 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
873 int dst_offset, int src_offset, int nr_items)
876 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
882 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
883 struct extent_buffer *eb, int slot, int atomic)
887 ret = tree_mod_log_insert_key(fs_info, eb, slot,
889 atomic ? GFP_ATOMIC : GFP_NOFS);
894 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
896 struct tree_mod_elem **tm_list = NULL;
901 if (btrfs_header_level(eb) == 0)
904 if (!tree_mod_need_log(fs_info, NULL))
907 nritems = btrfs_header_nritems(eb);
908 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
912 for (i = 0; i < nritems; i++) {
913 tm_list[i] = alloc_tree_mod_elem(eb, i,
914 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
921 if (tree_mod_dont_log(fs_info, eb))
924 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
925 tree_mod_log_write_unlock(fs_info);
933 for (i = 0; i < nritems; i++)
941 tree_mod_log_set_root_pointer(struct btrfs_root *root,
942 struct extent_buffer *new_root_node,
946 ret = tree_mod_log_insert_root(root->fs_info, root->node,
947 new_root_node, GFP_NOFS, log_removal);
952 * check if the tree block can be shared by multiple trees
954 int btrfs_block_can_be_shared(struct btrfs_root *root,
955 struct extent_buffer *buf)
958 * Tree blocks not in refernece counted trees and tree roots
959 * are never shared. If a block was allocated after the last
960 * snapshot and the block was not allocated by tree relocation,
961 * we know the block is not shared.
963 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
964 buf != root->node && buf != root->commit_root &&
965 (btrfs_header_generation(buf) <=
966 btrfs_root_last_snapshot(&root->root_item) ||
967 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
969 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
970 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
971 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
977 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
978 struct btrfs_root *root,
979 struct extent_buffer *buf,
980 struct extent_buffer *cow,
990 * Backrefs update rules:
992 * Always use full backrefs for extent pointers in tree block
993 * allocated by tree relocation.
995 * If a shared tree block is no longer referenced by its owner
996 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
997 * use full backrefs for extent pointers in tree block.
999 * If a tree block is been relocating
1000 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1001 * use full backrefs for extent pointers in tree block.
1002 * The reason for this is some operations (such as drop tree)
1003 * are only allowed for blocks use full backrefs.
1006 if (btrfs_block_can_be_shared(root, buf)) {
1007 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1008 btrfs_header_level(buf), 1,
1014 btrfs_std_error(root->fs_info, ret);
1019 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1020 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1021 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1026 owner = btrfs_header_owner(buf);
1027 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1028 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1031 if ((owner == root->root_key.objectid ||
1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1034 ret = btrfs_inc_ref(trans, root, buf, 1);
1035 BUG_ON(ret); /* -ENOMEM */
1037 if (root->root_key.objectid ==
1038 BTRFS_TREE_RELOC_OBJECTID) {
1039 ret = btrfs_dec_ref(trans, root, buf, 0);
1040 BUG_ON(ret); /* -ENOMEM */
1041 ret = btrfs_inc_ref(trans, root, cow, 1);
1042 BUG_ON(ret); /* -ENOMEM */
1044 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1047 if (root->root_key.objectid ==
1048 BTRFS_TREE_RELOC_OBJECTID)
1049 ret = btrfs_inc_ref(trans, root, cow, 1);
1051 ret = btrfs_inc_ref(trans, root, cow, 0);
1052 BUG_ON(ret); /* -ENOMEM */
1054 if (new_flags != 0) {
1055 int level = btrfs_header_level(buf);
1057 ret = btrfs_set_disk_extent_flags(trans, root,
1060 new_flags, level, 0);
1065 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1066 if (root->root_key.objectid ==
1067 BTRFS_TREE_RELOC_OBJECTID)
1068 ret = btrfs_inc_ref(trans, root, cow, 1);
1070 ret = btrfs_inc_ref(trans, root, cow, 0);
1071 BUG_ON(ret); /* -ENOMEM */
1072 ret = btrfs_dec_ref(trans, root, buf, 1);
1073 BUG_ON(ret); /* -ENOMEM */
1075 clean_tree_block(trans, root->fs_info, buf);
1082 * does the dirty work in cow of a single block. The parent block (if
1083 * supplied) is updated to point to the new cow copy. The new buffer is marked
1084 * dirty and returned locked. If you modify the block it needs to be marked
1087 * search_start -- an allocation hint for the new block
1089 * empty_size -- a hint that you plan on doing more cow. This is the size in
1090 * bytes the allocator should try to find free next to the block it returns.
1091 * This is just a hint and may be ignored by the allocator.
1093 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1094 struct btrfs_root *root,
1095 struct extent_buffer *buf,
1096 struct extent_buffer *parent, int parent_slot,
1097 struct extent_buffer **cow_ret,
1098 u64 search_start, u64 empty_size)
1100 struct btrfs_disk_key disk_key;
1101 struct extent_buffer *cow;
1104 int unlock_orig = 0;
1107 if (*cow_ret == buf)
1110 btrfs_assert_tree_locked(buf);
1112 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1113 trans->transid != root->fs_info->running_transaction->transid);
1114 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1115 trans->transid != root->last_trans);
1117 level = btrfs_header_level(buf);
1120 btrfs_item_key(buf, &disk_key, 0);
1122 btrfs_node_key(buf, &disk_key, 0);
1124 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1126 parent_start = parent->start;
1132 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1133 root->root_key.objectid, &disk_key, level,
1134 search_start, empty_size);
1136 return PTR_ERR(cow);
1138 /* cow is set to blocking by btrfs_init_new_buffer */
1140 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1141 btrfs_set_header_bytenr(cow, cow->start);
1142 btrfs_set_header_generation(cow, trans->transid);
1143 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1144 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1145 BTRFS_HEADER_FLAG_RELOC);
1146 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1147 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1149 btrfs_set_header_owner(cow, root->root_key.objectid);
1151 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1154 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1156 btrfs_abort_transaction(trans, root, ret);
1160 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1161 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1166 if (buf == root->node) {
1167 WARN_ON(parent && parent != buf);
1168 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1169 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1170 parent_start = buf->start;
1174 extent_buffer_get(cow);
1175 tree_mod_log_set_root_pointer(root, cow, 1);
1176 rcu_assign_pointer(root->node, cow);
1178 btrfs_free_tree_block(trans, root, buf, parent_start,
1180 free_extent_buffer(buf);
1181 add_root_to_dirty_list(root);
1183 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1184 parent_start = parent->start;
1188 WARN_ON(trans->transid != btrfs_header_generation(parent));
1189 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1190 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1191 btrfs_set_node_blockptr(parent, parent_slot,
1193 btrfs_set_node_ptr_generation(parent, parent_slot,
1195 btrfs_mark_buffer_dirty(parent);
1197 ret = tree_mod_log_free_eb(root->fs_info, buf);
1199 btrfs_abort_transaction(trans, root, ret);
1203 btrfs_free_tree_block(trans, root, buf, parent_start,
1207 btrfs_tree_unlock(buf);
1208 free_extent_buffer_stale(buf);
1209 btrfs_mark_buffer_dirty(cow);
1215 * returns the logical address of the oldest predecessor of the given root.
1216 * entries older than time_seq are ignored.
1218 static struct tree_mod_elem *
1219 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1220 struct extent_buffer *eb_root, u64 time_seq)
1222 struct tree_mod_elem *tm;
1223 struct tree_mod_elem *found = NULL;
1224 u64 root_logical = eb_root->start;
1231 * the very last operation that's logged for a root is the replacement
1232 * operation (if it is replaced at all). this has the index of the *new*
1233 * root, making it the very first operation that's logged for this root.
1236 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1241 * if there are no tree operation for the oldest root, we simply
1242 * return it. this should only happen if that (old) root is at
1249 * if there's an operation that's not a root replacement, we
1250 * found the oldest version of our root. normally, we'll find a
1251 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1253 if (tm->op != MOD_LOG_ROOT_REPLACE)
1257 root_logical = tm->old_root.logical;
1261 /* if there's no old root to return, return what we found instead */
1269 * tm is a pointer to the first operation to rewind within eb. then, all
1270 * previous operations will be rewinded (until we reach something older than
1274 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1275 u64 time_seq, struct tree_mod_elem *first_tm)
1278 struct rb_node *next;
1279 struct tree_mod_elem *tm = first_tm;
1280 unsigned long o_dst;
1281 unsigned long o_src;
1282 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1284 n = btrfs_header_nritems(eb);
1285 tree_mod_log_read_lock(fs_info);
1286 while (tm && tm->seq >= time_seq) {
1288 * all the operations are recorded with the operator used for
1289 * the modification. as we're going backwards, we do the
1290 * opposite of each operation here.
1293 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1294 BUG_ON(tm->slot < n);
1296 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1297 case MOD_LOG_KEY_REMOVE:
1298 btrfs_set_node_key(eb, &tm->key, tm->slot);
1299 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1300 btrfs_set_node_ptr_generation(eb, tm->slot,
1304 case MOD_LOG_KEY_REPLACE:
1305 BUG_ON(tm->slot >= n);
1306 btrfs_set_node_key(eb, &tm->key, tm->slot);
1307 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1308 btrfs_set_node_ptr_generation(eb, tm->slot,
1311 case MOD_LOG_KEY_ADD:
1312 /* if a move operation is needed it's in the log */
1315 case MOD_LOG_MOVE_KEYS:
1316 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1317 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1318 memmove_extent_buffer(eb, o_dst, o_src,
1319 tm->move.nr_items * p_size);
1321 case MOD_LOG_ROOT_REPLACE:
1323 * this operation is special. for roots, this must be
1324 * handled explicitly before rewinding.
1325 * for non-roots, this operation may exist if the node
1326 * was a root: root A -> child B; then A gets empty and
1327 * B is promoted to the new root. in the mod log, we'll
1328 * have a root-replace operation for B, a tree block
1329 * that is no root. we simply ignore that operation.
1333 next = rb_next(&tm->node);
1336 tm = container_of(next, struct tree_mod_elem, node);
1337 if (tm->index != first_tm->index)
1340 tree_mod_log_read_unlock(fs_info);
1341 btrfs_set_header_nritems(eb, n);
1345 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1346 * is returned. If rewind operations happen, a fresh buffer is returned. The
1347 * returned buffer is always read-locked. If the returned buffer is not the
1348 * input buffer, the lock on the input buffer is released and the input buffer
1349 * is freed (its refcount is decremented).
1351 static struct extent_buffer *
1352 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1353 struct extent_buffer *eb, u64 time_seq)
1355 struct extent_buffer *eb_rewin;
1356 struct tree_mod_elem *tm;
1361 if (btrfs_header_level(eb) == 0)
1364 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1368 btrfs_set_path_blocking(path);
1369 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1371 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1372 BUG_ON(tm->slot != 0);
1373 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1375 btrfs_tree_read_unlock_blocking(eb);
1376 free_extent_buffer(eb);
1379 btrfs_set_header_bytenr(eb_rewin, eb->start);
1380 btrfs_set_header_backref_rev(eb_rewin,
1381 btrfs_header_backref_rev(eb));
1382 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1383 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1385 eb_rewin = btrfs_clone_extent_buffer(eb);
1387 btrfs_tree_read_unlock_blocking(eb);
1388 free_extent_buffer(eb);
1393 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1394 btrfs_tree_read_unlock_blocking(eb);
1395 free_extent_buffer(eb);
1397 extent_buffer_get(eb_rewin);
1398 btrfs_tree_read_lock(eb_rewin);
1399 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1400 WARN_ON(btrfs_header_nritems(eb_rewin) >
1401 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1407 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1408 * value. If there are no changes, the current root->root_node is returned. If
1409 * anything changed in between, there's a fresh buffer allocated on which the
1410 * rewind operations are done. In any case, the returned buffer is read locked.
1411 * Returns NULL on error (with no locks held).
1413 static inline struct extent_buffer *
1414 get_old_root(struct btrfs_root *root, u64 time_seq)
1416 struct tree_mod_elem *tm;
1417 struct extent_buffer *eb = NULL;
1418 struct extent_buffer *eb_root;
1419 struct extent_buffer *old;
1420 struct tree_mod_root *old_root = NULL;
1421 u64 old_generation = 0;
1424 eb_root = btrfs_read_lock_root_node(root);
1425 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1429 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1430 old_root = &tm->old_root;
1431 old_generation = tm->generation;
1432 logical = old_root->logical;
1434 logical = eb_root->start;
1437 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1438 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1439 btrfs_tree_read_unlock(eb_root);
1440 free_extent_buffer(eb_root);
1441 old = read_tree_block(root, logical, 0);
1442 if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
1443 free_extent_buffer(old);
1444 btrfs_warn(root->fs_info,
1445 "failed to read tree block %llu from get_old_root", logical);
1447 eb = btrfs_clone_extent_buffer(old);
1448 free_extent_buffer(old);
1450 } else if (old_root) {
1451 btrfs_tree_read_unlock(eb_root);
1452 free_extent_buffer(eb_root);
1453 eb = alloc_dummy_extent_buffer(root->fs_info, logical);
1455 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1456 eb = btrfs_clone_extent_buffer(eb_root);
1457 btrfs_tree_read_unlock_blocking(eb_root);
1458 free_extent_buffer(eb_root);
1463 extent_buffer_get(eb);
1464 btrfs_tree_read_lock(eb);
1466 btrfs_set_header_bytenr(eb, eb->start);
1467 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1468 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1469 btrfs_set_header_level(eb, old_root->level);
1470 btrfs_set_header_generation(eb, old_generation);
1473 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1475 WARN_ON(btrfs_header_level(eb) != 0);
1476 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1481 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1483 struct tree_mod_elem *tm;
1485 struct extent_buffer *eb_root = btrfs_root_node(root);
1487 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1488 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1489 level = tm->old_root.level;
1491 level = btrfs_header_level(eb_root);
1493 free_extent_buffer(eb_root);
1498 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1499 struct btrfs_root *root,
1500 struct extent_buffer *buf)
1502 if (btrfs_test_is_dummy_root(root))
1505 /* ensure we can see the force_cow */
1509 * We do not need to cow a block if
1510 * 1) this block is not created or changed in this transaction;
1511 * 2) this block does not belong to TREE_RELOC tree;
1512 * 3) the root is not forced COW.
1514 * What is forced COW:
1515 * when we create snapshot during commiting the transaction,
1516 * after we've finished coping src root, we must COW the shared
1517 * block to ensure the metadata consistency.
1519 if (btrfs_header_generation(buf) == trans->transid &&
1520 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1521 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1522 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1523 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1529 * cows a single block, see __btrfs_cow_block for the real work.
1530 * This version of it has extra checks so that a block isn't cow'd more than
1531 * once per transaction, as long as it hasn't been written yet
1533 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1534 struct btrfs_root *root, struct extent_buffer *buf,
1535 struct extent_buffer *parent, int parent_slot,
1536 struct extent_buffer **cow_ret)
1541 if (trans->transaction != root->fs_info->running_transaction)
1542 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1544 root->fs_info->running_transaction->transid);
1546 if (trans->transid != root->fs_info->generation)
1547 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1548 trans->transid, root->fs_info->generation);
1550 if (!should_cow_block(trans, root, buf)) {
1555 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1558 btrfs_set_lock_blocking(parent);
1559 btrfs_set_lock_blocking(buf);
1561 ret = __btrfs_cow_block(trans, root, buf, parent,
1562 parent_slot, cow_ret, search_start, 0);
1564 trace_btrfs_cow_block(root, buf, *cow_ret);
1570 * helper function for defrag to decide if two blocks pointed to by a
1571 * node are actually close by
1573 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1575 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1577 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1583 * compare two keys in a memcmp fashion
1585 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1587 struct btrfs_key k1;
1589 btrfs_disk_key_to_cpu(&k1, disk);
1591 return btrfs_comp_cpu_keys(&k1, k2);
1595 * same as comp_keys only with two btrfs_key's
1597 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1599 if (k1->objectid > k2->objectid)
1601 if (k1->objectid < k2->objectid)
1603 if (k1->type > k2->type)
1605 if (k1->type < k2->type)
1607 if (k1->offset > k2->offset)
1609 if (k1->offset < k2->offset)
1615 * this is used by the defrag code to go through all the
1616 * leaves pointed to by a node and reallocate them so that
1617 * disk order is close to key order
1619 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1620 struct btrfs_root *root, struct extent_buffer *parent,
1621 int start_slot, u64 *last_ret,
1622 struct btrfs_key *progress)
1624 struct extent_buffer *cur;
1627 u64 search_start = *last_ret;
1637 int progress_passed = 0;
1638 struct btrfs_disk_key disk_key;
1640 parent_level = btrfs_header_level(parent);
1642 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1643 WARN_ON(trans->transid != root->fs_info->generation);
1645 parent_nritems = btrfs_header_nritems(parent);
1646 blocksize = root->nodesize;
1647 end_slot = parent_nritems - 1;
1649 if (parent_nritems <= 1)
1652 btrfs_set_lock_blocking(parent);
1654 for (i = start_slot; i <= end_slot; i++) {
1657 btrfs_node_key(parent, &disk_key, i);
1658 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1661 progress_passed = 1;
1662 blocknr = btrfs_node_blockptr(parent, i);
1663 gen = btrfs_node_ptr_generation(parent, i);
1664 if (last_block == 0)
1665 last_block = blocknr;
1668 other = btrfs_node_blockptr(parent, i - 1);
1669 close = close_blocks(blocknr, other, blocksize);
1671 if (!close && i < end_slot) {
1672 other = btrfs_node_blockptr(parent, i + 1);
1673 close = close_blocks(blocknr, other, blocksize);
1676 last_block = blocknr;
1680 cur = btrfs_find_tree_block(root->fs_info, blocknr);
1682 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1685 if (!cur || !uptodate) {
1687 cur = read_tree_block(root, blocknr, gen);
1688 if (!cur || !extent_buffer_uptodate(cur)) {
1689 free_extent_buffer(cur);
1692 } else if (!uptodate) {
1693 err = btrfs_read_buffer(cur, gen);
1695 free_extent_buffer(cur);
1700 if (search_start == 0)
1701 search_start = last_block;
1703 btrfs_tree_lock(cur);
1704 btrfs_set_lock_blocking(cur);
1705 err = __btrfs_cow_block(trans, root, cur, parent, i,
1708 (end_slot - i) * blocksize));
1710 btrfs_tree_unlock(cur);
1711 free_extent_buffer(cur);
1714 search_start = cur->start;
1715 last_block = cur->start;
1716 *last_ret = search_start;
1717 btrfs_tree_unlock(cur);
1718 free_extent_buffer(cur);
1724 * The leaf data grows from end-to-front in the node.
1725 * this returns the address of the start of the last item,
1726 * which is the stop of the leaf data stack
1728 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1729 struct extent_buffer *leaf)
1731 u32 nr = btrfs_header_nritems(leaf);
1733 return BTRFS_LEAF_DATA_SIZE(root);
1734 return btrfs_item_offset_nr(leaf, nr - 1);
1739 * search for key in the extent_buffer. The items start at offset p,
1740 * and they are item_size apart. There are 'max' items in p.
1742 * the slot in the array is returned via slot, and it points to
1743 * the place where you would insert key if it is not found in
1746 * slot may point to max if the key is bigger than all of the keys
1748 static noinline int generic_bin_search(struct extent_buffer *eb,
1750 int item_size, struct btrfs_key *key,
1757 struct btrfs_disk_key *tmp = NULL;
1758 struct btrfs_disk_key unaligned;
1759 unsigned long offset;
1761 unsigned long map_start = 0;
1762 unsigned long map_len = 0;
1765 while (low < high) {
1766 mid = (low + high) / 2;
1767 offset = p + mid * item_size;
1769 if (!kaddr || offset < map_start ||
1770 (offset + sizeof(struct btrfs_disk_key)) >
1771 map_start + map_len) {
1773 err = map_private_extent_buffer(eb, offset,
1774 sizeof(struct btrfs_disk_key),
1775 &kaddr, &map_start, &map_len);
1778 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1781 read_extent_buffer(eb, &unaligned,
1782 offset, sizeof(unaligned));
1787 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1790 ret = comp_keys(tmp, key);
1806 * simple bin_search frontend that does the right thing for
1809 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1810 int level, int *slot)
1813 return generic_bin_search(eb,
1814 offsetof(struct btrfs_leaf, items),
1815 sizeof(struct btrfs_item),
1816 key, btrfs_header_nritems(eb),
1819 return generic_bin_search(eb,
1820 offsetof(struct btrfs_node, ptrs),
1821 sizeof(struct btrfs_key_ptr),
1822 key, btrfs_header_nritems(eb),
1826 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1827 int level, int *slot)
1829 return bin_search(eb, key, level, slot);
1832 static void root_add_used(struct btrfs_root *root, u32 size)
1834 spin_lock(&root->accounting_lock);
1835 btrfs_set_root_used(&root->root_item,
1836 btrfs_root_used(&root->root_item) + size);
1837 spin_unlock(&root->accounting_lock);
1840 static void root_sub_used(struct btrfs_root *root, u32 size)
1842 spin_lock(&root->accounting_lock);
1843 btrfs_set_root_used(&root->root_item,
1844 btrfs_root_used(&root->root_item) - size);
1845 spin_unlock(&root->accounting_lock);
1848 /* given a node and slot number, this reads the blocks it points to. The
1849 * extent buffer is returned with a reference taken (but unlocked).
1850 * NULL is returned on error.
1852 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1853 struct extent_buffer *parent, int slot)
1855 int level = btrfs_header_level(parent);
1856 struct extent_buffer *eb;
1860 if (slot >= btrfs_header_nritems(parent))
1865 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1866 btrfs_node_ptr_generation(parent, slot));
1867 if (eb && !extent_buffer_uptodate(eb)) {
1868 free_extent_buffer(eb);
1876 * node level balancing, used to make sure nodes are in proper order for
1877 * item deletion. We balance from the top down, so we have to make sure
1878 * that a deletion won't leave an node completely empty later on.
1880 static noinline int balance_level(struct btrfs_trans_handle *trans,
1881 struct btrfs_root *root,
1882 struct btrfs_path *path, int level)
1884 struct extent_buffer *right = NULL;
1885 struct extent_buffer *mid;
1886 struct extent_buffer *left = NULL;
1887 struct extent_buffer *parent = NULL;
1891 int orig_slot = path->slots[level];
1897 mid = path->nodes[level];
1899 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1900 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1901 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1903 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1905 if (level < BTRFS_MAX_LEVEL - 1) {
1906 parent = path->nodes[level + 1];
1907 pslot = path->slots[level + 1];
1911 * deal with the case where there is only one pointer in the root
1912 * by promoting the node below to a root
1915 struct extent_buffer *child;
1917 if (btrfs_header_nritems(mid) != 1)
1920 /* promote the child to a root */
1921 child = read_node_slot(root, mid, 0);
1924 btrfs_std_error(root->fs_info, ret);
1928 btrfs_tree_lock(child);
1929 btrfs_set_lock_blocking(child);
1930 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1932 btrfs_tree_unlock(child);
1933 free_extent_buffer(child);
1937 tree_mod_log_set_root_pointer(root, child, 1);
1938 rcu_assign_pointer(root->node, child);
1940 add_root_to_dirty_list(root);
1941 btrfs_tree_unlock(child);
1943 path->locks[level] = 0;
1944 path->nodes[level] = NULL;
1945 clean_tree_block(trans, root->fs_info, mid);
1946 btrfs_tree_unlock(mid);
1947 /* once for the path */
1948 free_extent_buffer(mid);
1950 root_sub_used(root, mid->len);
1951 btrfs_free_tree_block(trans, root, mid, 0, 1);
1952 /* once for the root ptr */
1953 free_extent_buffer_stale(mid);
1956 if (btrfs_header_nritems(mid) >
1957 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1960 left = read_node_slot(root, parent, pslot - 1);
1962 btrfs_tree_lock(left);
1963 btrfs_set_lock_blocking(left);
1964 wret = btrfs_cow_block(trans, root, left,
1965 parent, pslot - 1, &left);
1971 right = read_node_slot(root, parent, pslot + 1);
1973 btrfs_tree_lock(right);
1974 btrfs_set_lock_blocking(right);
1975 wret = btrfs_cow_block(trans, root, right,
1976 parent, pslot + 1, &right);
1983 /* first, try to make some room in the middle buffer */
1985 orig_slot += btrfs_header_nritems(left);
1986 wret = push_node_left(trans, root, left, mid, 1);
1992 * then try to empty the right most buffer into the middle
1995 wret = push_node_left(trans, root, mid, right, 1);
1996 if (wret < 0 && wret != -ENOSPC)
1998 if (btrfs_header_nritems(right) == 0) {
1999 clean_tree_block(trans, root->fs_info, right);
2000 btrfs_tree_unlock(right);
2001 del_ptr(root, path, level + 1, pslot + 1);
2002 root_sub_used(root, right->len);
2003 btrfs_free_tree_block(trans, root, right, 0, 1);
2004 free_extent_buffer_stale(right);
2007 struct btrfs_disk_key right_key;
2008 btrfs_node_key(right, &right_key, 0);
2009 tree_mod_log_set_node_key(root->fs_info, parent,
2011 btrfs_set_node_key(parent, &right_key, pslot + 1);
2012 btrfs_mark_buffer_dirty(parent);
2015 if (btrfs_header_nritems(mid) == 1) {
2017 * we're not allowed to leave a node with one item in the
2018 * tree during a delete. A deletion from lower in the tree
2019 * could try to delete the only pointer in this node.
2020 * So, pull some keys from the left.
2021 * There has to be a left pointer at this point because
2022 * otherwise we would have pulled some pointers from the
2027 btrfs_std_error(root->fs_info, ret);
2030 wret = balance_node_right(trans, root, mid, left);
2036 wret = push_node_left(trans, root, left, mid, 1);
2042 if (btrfs_header_nritems(mid) == 0) {
2043 clean_tree_block(trans, root->fs_info, mid);
2044 btrfs_tree_unlock(mid);
2045 del_ptr(root, path, level + 1, pslot);
2046 root_sub_used(root, mid->len);
2047 btrfs_free_tree_block(trans, root, mid, 0, 1);
2048 free_extent_buffer_stale(mid);
2051 /* update the parent key to reflect our changes */
2052 struct btrfs_disk_key mid_key;
2053 btrfs_node_key(mid, &mid_key, 0);
2054 tree_mod_log_set_node_key(root->fs_info, parent,
2056 btrfs_set_node_key(parent, &mid_key, pslot);
2057 btrfs_mark_buffer_dirty(parent);
2060 /* update the path */
2062 if (btrfs_header_nritems(left) > orig_slot) {
2063 extent_buffer_get(left);
2064 /* left was locked after cow */
2065 path->nodes[level] = left;
2066 path->slots[level + 1] -= 1;
2067 path->slots[level] = orig_slot;
2069 btrfs_tree_unlock(mid);
2070 free_extent_buffer(mid);
2073 orig_slot -= btrfs_header_nritems(left);
2074 path->slots[level] = orig_slot;
2077 /* double check we haven't messed things up */
2079 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2083 btrfs_tree_unlock(right);
2084 free_extent_buffer(right);
2087 if (path->nodes[level] != left)
2088 btrfs_tree_unlock(left);
2089 free_extent_buffer(left);
2094 /* Node balancing for insertion. Here we only split or push nodes around
2095 * when they are completely full. This is also done top down, so we
2096 * have to be pessimistic.
2098 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2099 struct btrfs_root *root,
2100 struct btrfs_path *path, int level)
2102 struct extent_buffer *right = NULL;
2103 struct extent_buffer *mid;
2104 struct extent_buffer *left = NULL;
2105 struct extent_buffer *parent = NULL;
2109 int orig_slot = path->slots[level];
2114 mid = path->nodes[level];
2115 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2117 if (level < BTRFS_MAX_LEVEL - 1) {
2118 parent = path->nodes[level + 1];
2119 pslot = path->slots[level + 1];
2125 left = read_node_slot(root, parent, pslot - 1);
2127 /* first, try to make some room in the middle buffer */
2131 btrfs_tree_lock(left);
2132 btrfs_set_lock_blocking(left);
2134 left_nr = btrfs_header_nritems(left);
2135 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2138 ret = btrfs_cow_block(trans, root, left, parent,
2143 wret = push_node_left(trans, root,
2150 struct btrfs_disk_key disk_key;
2151 orig_slot += left_nr;
2152 btrfs_node_key(mid, &disk_key, 0);
2153 tree_mod_log_set_node_key(root->fs_info, parent,
2155 btrfs_set_node_key(parent, &disk_key, pslot);
2156 btrfs_mark_buffer_dirty(parent);
2157 if (btrfs_header_nritems(left) > orig_slot) {
2158 path->nodes[level] = left;
2159 path->slots[level + 1] -= 1;
2160 path->slots[level] = orig_slot;
2161 btrfs_tree_unlock(mid);
2162 free_extent_buffer(mid);
2165 btrfs_header_nritems(left);
2166 path->slots[level] = orig_slot;
2167 btrfs_tree_unlock(left);
2168 free_extent_buffer(left);
2172 btrfs_tree_unlock(left);
2173 free_extent_buffer(left);
2175 right = read_node_slot(root, parent, pslot + 1);
2178 * then try to empty the right most buffer into the middle
2183 btrfs_tree_lock(right);
2184 btrfs_set_lock_blocking(right);
2186 right_nr = btrfs_header_nritems(right);
2187 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2190 ret = btrfs_cow_block(trans, root, right,
2196 wret = balance_node_right(trans, root,
2203 struct btrfs_disk_key disk_key;
2205 btrfs_node_key(right, &disk_key, 0);
2206 tree_mod_log_set_node_key(root->fs_info, parent,
2208 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2209 btrfs_mark_buffer_dirty(parent);
2211 if (btrfs_header_nritems(mid) <= orig_slot) {
2212 path->nodes[level] = right;
2213 path->slots[level + 1] += 1;
2214 path->slots[level] = orig_slot -
2215 btrfs_header_nritems(mid);
2216 btrfs_tree_unlock(mid);
2217 free_extent_buffer(mid);
2219 btrfs_tree_unlock(right);
2220 free_extent_buffer(right);
2224 btrfs_tree_unlock(right);
2225 free_extent_buffer(right);
2231 * readahead one full node of leaves, finding things that are close
2232 * to the block in 'slot', and triggering ra on them.
2234 static void reada_for_search(struct btrfs_root *root,
2235 struct btrfs_path *path,
2236 int level, int slot, u64 objectid)
2238 struct extent_buffer *node;
2239 struct btrfs_disk_key disk_key;
2245 int direction = path->reada;
2246 struct extent_buffer *eb;
2254 if (!path->nodes[level])
2257 node = path->nodes[level];
2259 search = btrfs_node_blockptr(node, slot);
2260 blocksize = root->nodesize;
2261 eb = btrfs_find_tree_block(root->fs_info, search);
2263 free_extent_buffer(eb);
2269 nritems = btrfs_header_nritems(node);
2273 if (direction < 0) {
2277 } else if (direction > 0) {
2282 if (path->reada < 0 && objectid) {
2283 btrfs_node_key(node, &disk_key, nr);
2284 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2287 search = btrfs_node_blockptr(node, nr);
2288 if ((search <= target && target - search <= 65536) ||
2289 (search > target && search - target <= 65536)) {
2290 gen = btrfs_node_ptr_generation(node, nr);
2291 readahead_tree_block(root, search);
2295 if ((nread > 65536 || nscan > 32))
2300 static noinline void reada_for_balance(struct btrfs_root *root,
2301 struct btrfs_path *path, int level)
2305 struct extent_buffer *parent;
2306 struct extent_buffer *eb;
2311 parent = path->nodes[level + 1];
2315 nritems = btrfs_header_nritems(parent);
2316 slot = path->slots[level + 1];
2319 block1 = btrfs_node_blockptr(parent, slot - 1);
2320 gen = btrfs_node_ptr_generation(parent, slot - 1);
2321 eb = btrfs_find_tree_block(root->fs_info, block1);
2323 * if we get -eagain from btrfs_buffer_uptodate, we
2324 * don't want to return eagain here. That will loop
2327 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2329 free_extent_buffer(eb);
2331 if (slot + 1 < nritems) {
2332 block2 = btrfs_node_blockptr(parent, slot + 1);
2333 gen = btrfs_node_ptr_generation(parent, slot + 1);
2334 eb = btrfs_find_tree_block(root->fs_info, block2);
2335 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2337 free_extent_buffer(eb);
2341 readahead_tree_block(root, block1);
2343 readahead_tree_block(root, block2);
2348 * when we walk down the tree, it is usually safe to unlock the higher layers
2349 * in the tree. The exceptions are when our path goes through slot 0, because
2350 * operations on the tree might require changing key pointers higher up in the
2353 * callers might also have set path->keep_locks, which tells this code to keep
2354 * the lock if the path points to the last slot in the block. This is part of
2355 * walking through the tree, and selecting the next slot in the higher block.
2357 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2358 * if lowest_unlock is 1, level 0 won't be unlocked
2360 static noinline void unlock_up(struct btrfs_path *path, int level,
2361 int lowest_unlock, int min_write_lock_level,
2362 int *write_lock_level)
2365 int skip_level = level;
2367 struct extent_buffer *t;
2369 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2370 if (!path->nodes[i])
2372 if (!path->locks[i])
2374 if (!no_skips && path->slots[i] == 0) {
2378 if (!no_skips && path->keep_locks) {
2381 nritems = btrfs_header_nritems(t);
2382 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2387 if (skip_level < i && i >= lowest_unlock)
2391 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2392 btrfs_tree_unlock_rw(t, path->locks[i]);
2394 if (write_lock_level &&
2395 i > min_write_lock_level &&
2396 i <= *write_lock_level) {
2397 *write_lock_level = i - 1;
2404 * This releases any locks held in the path starting at level and
2405 * going all the way up to the root.
2407 * btrfs_search_slot will keep the lock held on higher nodes in a few
2408 * corner cases, such as COW of the block at slot zero in the node. This
2409 * ignores those rules, and it should only be called when there are no
2410 * more updates to be done higher up in the tree.
2412 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2416 if (path->keep_locks)
2419 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2420 if (!path->nodes[i])
2422 if (!path->locks[i])
2424 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2430 * helper function for btrfs_search_slot. The goal is to find a block
2431 * in cache without setting the path to blocking. If we find the block
2432 * we return zero and the path is unchanged.
2434 * If we can't find the block, we set the path blocking and do some
2435 * reada. -EAGAIN is returned and the search must be repeated.
2438 read_block_for_search(struct btrfs_trans_handle *trans,
2439 struct btrfs_root *root, struct btrfs_path *p,
2440 struct extent_buffer **eb_ret, int level, int slot,
2441 struct btrfs_key *key, u64 time_seq)
2445 struct extent_buffer *b = *eb_ret;
2446 struct extent_buffer *tmp;
2449 blocknr = btrfs_node_blockptr(b, slot);
2450 gen = btrfs_node_ptr_generation(b, slot);
2452 tmp = btrfs_find_tree_block(root->fs_info, blocknr);
2454 /* first we do an atomic uptodate check */
2455 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2460 /* the pages were up to date, but we failed
2461 * the generation number check. Do a full
2462 * read for the generation number that is correct.
2463 * We must do this without dropping locks so
2464 * we can trust our generation number
2466 btrfs_set_path_blocking(p);
2468 /* now we're allowed to do a blocking uptodate check */
2469 ret = btrfs_read_buffer(tmp, gen);
2474 free_extent_buffer(tmp);
2475 btrfs_release_path(p);
2480 * reduce lock contention at high levels
2481 * of the btree by dropping locks before
2482 * we read. Don't release the lock on the current
2483 * level because we need to walk this node to figure
2484 * out which blocks to read.
2486 btrfs_unlock_up_safe(p, level + 1);
2487 btrfs_set_path_blocking(p);
2489 free_extent_buffer(tmp);
2491 reada_for_search(root, p, level, slot, key->objectid);
2493 btrfs_release_path(p);
2496 tmp = read_tree_block(root, blocknr, 0);
2499 * If the read above didn't mark this buffer up to date,
2500 * it will never end up being up to date. Set ret to EIO now
2501 * and give up so that our caller doesn't loop forever
2504 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2506 free_extent_buffer(tmp);
2512 * helper function for btrfs_search_slot. This does all of the checks
2513 * for node-level blocks and does any balancing required based on
2516 * If no extra work was required, zero is returned. If we had to
2517 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2521 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2522 struct btrfs_root *root, struct btrfs_path *p,
2523 struct extent_buffer *b, int level, int ins_len,
2524 int *write_lock_level)
2527 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2528 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2531 if (*write_lock_level < level + 1) {
2532 *write_lock_level = level + 1;
2533 btrfs_release_path(p);
2537 btrfs_set_path_blocking(p);
2538 reada_for_balance(root, p, level);
2539 sret = split_node(trans, root, p, level);
2540 btrfs_clear_path_blocking(p, NULL, 0);
2547 b = p->nodes[level];
2548 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2549 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2552 if (*write_lock_level < level + 1) {
2553 *write_lock_level = level + 1;
2554 btrfs_release_path(p);
2558 btrfs_set_path_blocking(p);
2559 reada_for_balance(root, p, level);
2560 sret = balance_level(trans, root, p, level);
2561 btrfs_clear_path_blocking(p, NULL, 0);
2567 b = p->nodes[level];
2569 btrfs_release_path(p);
2572 BUG_ON(btrfs_header_nritems(b) == 1);
2582 static void key_search_validate(struct extent_buffer *b,
2583 struct btrfs_key *key,
2586 #ifdef CONFIG_BTRFS_ASSERT
2587 struct btrfs_disk_key disk_key;
2589 btrfs_cpu_key_to_disk(&disk_key, key);
2592 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2593 offsetof(struct btrfs_leaf, items[0].key),
2596 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2597 offsetof(struct btrfs_node, ptrs[0].key),
2602 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2603 int level, int *prev_cmp, int *slot)
2605 if (*prev_cmp != 0) {
2606 *prev_cmp = bin_search(b, key, level, slot);
2610 key_search_validate(b, key, level);
2616 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2617 u64 iobjectid, u64 ioff, u8 key_type,
2618 struct btrfs_key *found_key)
2621 struct btrfs_key key;
2622 struct extent_buffer *eb;
2627 key.type = key_type;
2628 key.objectid = iobjectid;
2631 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2635 eb = path->nodes[0];
2636 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2637 ret = btrfs_next_leaf(fs_root, path);
2640 eb = path->nodes[0];
2643 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2644 if (found_key->type != key.type ||
2645 found_key->objectid != key.objectid)
2652 * look for key in the tree. path is filled in with nodes along the way
2653 * if key is found, we return zero and you can find the item in the leaf
2654 * level of the path (level 0)
2656 * If the key isn't found, the path points to the slot where it should
2657 * be inserted, and 1 is returned. If there are other errors during the
2658 * search a negative error number is returned.
2660 * if ins_len > 0, nodes and leaves will be split as we walk down the
2661 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2664 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2665 *root, struct btrfs_key *key, struct btrfs_path *p, int
2668 struct extent_buffer *b;
2673 int lowest_unlock = 1;
2675 /* everything at write_lock_level or lower must be write locked */
2676 int write_lock_level = 0;
2677 u8 lowest_level = 0;
2678 int min_write_lock_level;
2681 lowest_level = p->lowest_level;
2682 WARN_ON(lowest_level && ins_len > 0);
2683 WARN_ON(p->nodes[0] != NULL);
2684 BUG_ON(!cow && ins_len);
2689 /* when we are removing items, we might have to go up to level
2690 * two as we update tree pointers Make sure we keep write
2691 * for those levels as well
2693 write_lock_level = 2;
2694 } else if (ins_len > 0) {
2696 * for inserting items, make sure we have a write lock on
2697 * level 1 so we can update keys
2699 write_lock_level = 1;
2703 write_lock_level = -1;
2705 if (cow && (p->keep_locks || p->lowest_level))
2706 write_lock_level = BTRFS_MAX_LEVEL;
2708 min_write_lock_level = write_lock_level;
2713 * we try very hard to do read locks on the root
2715 root_lock = BTRFS_READ_LOCK;
2717 if (p->search_commit_root) {
2719 * the commit roots are read only
2720 * so we always do read locks
2722 if (p->need_commit_sem)
2723 down_read(&root->fs_info->commit_root_sem);
2724 b = root->commit_root;
2725 extent_buffer_get(b);
2726 level = btrfs_header_level(b);
2727 if (p->need_commit_sem)
2728 up_read(&root->fs_info->commit_root_sem);
2729 if (!p->skip_locking)
2730 btrfs_tree_read_lock(b);
2732 if (p->skip_locking) {
2733 b = btrfs_root_node(root);
2734 level = btrfs_header_level(b);
2736 /* we don't know the level of the root node
2737 * until we actually have it read locked
2739 b = btrfs_read_lock_root_node(root);
2740 level = btrfs_header_level(b);
2741 if (level <= write_lock_level) {
2742 /* whoops, must trade for write lock */
2743 btrfs_tree_read_unlock(b);
2744 free_extent_buffer(b);
2745 b = btrfs_lock_root_node(root);
2746 root_lock = BTRFS_WRITE_LOCK;
2748 /* the level might have changed, check again */
2749 level = btrfs_header_level(b);
2753 p->nodes[level] = b;
2754 if (!p->skip_locking)
2755 p->locks[level] = root_lock;
2758 level = btrfs_header_level(b);
2761 * setup the path here so we can release it under lock
2762 * contention with the cow code
2766 * if we don't really need to cow this block
2767 * then we don't want to set the path blocking,
2768 * so we test it here
2770 if (!should_cow_block(trans, root, b))
2774 * must have write locks on this node and the
2777 if (level > write_lock_level ||
2778 (level + 1 > write_lock_level &&
2779 level + 1 < BTRFS_MAX_LEVEL &&
2780 p->nodes[level + 1])) {
2781 write_lock_level = level + 1;
2782 btrfs_release_path(p);
2786 btrfs_set_path_blocking(p);
2787 err = btrfs_cow_block(trans, root, b,
2788 p->nodes[level + 1],
2789 p->slots[level + 1], &b);
2796 p->nodes[level] = b;
2797 btrfs_clear_path_blocking(p, NULL, 0);
2800 * we have a lock on b and as long as we aren't changing
2801 * the tree, there is no way to for the items in b to change.
2802 * It is safe to drop the lock on our parent before we
2803 * go through the expensive btree search on b.
2805 * If we're inserting or deleting (ins_len != 0), then we might
2806 * be changing slot zero, which may require changing the parent.
2807 * So, we can't drop the lock until after we know which slot
2808 * we're operating on.
2810 if (!ins_len && !p->keep_locks) {
2813 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2814 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2819 ret = key_search(b, key, level, &prev_cmp, &slot);
2823 if (ret && slot > 0) {
2827 p->slots[level] = slot;
2828 err = setup_nodes_for_search(trans, root, p, b, level,
2829 ins_len, &write_lock_level);
2836 b = p->nodes[level];
2837 slot = p->slots[level];
2840 * slot 0 is special, if we change the key
2841 * we have to update the parent pointer
2842 * which means we must have a write lock
2845 if (slot == 0 && ins_len &&
2846 write_lock_level < level + 1) {
2847 write_lock_level = level + 1;
2848 btrfs_release_path(p);
2852 unlock_up(p, level, lowest_unlock,
2853 min_write_lock_level, &write_lock_level);
2855 if (level == lowest_level) {
2861 err = read_block_for_search(trans, root, p,
2862 &b, level, slot, key, 0);
2870 if (!p->skip_locking) {
2871 level = btrfs_header_level(b);
2872 if (level <= write_lock_level) {
2873 err = btrfs_try_tree_write_lock(b);
2875 btrfs_set_path_blocking(p);
2877 btrfs_clear_path_blocking(p, b,
2880 p->locks[level] = BTRFS_WRITE_LOCK;
2882 err = btrfs_tree_read_lock_atomic(b);
2884 btrfs_set_path_blocking(p);
2885 btrfs_tree_read_lock(b);
2886 btrfs_clear_path_blocking(p, b,
2889 p->locks[level] = BTRFS_READ_LOCK;
2891 p->nodes[level] = b;
2894 p->slots[level] = slot;
2896 btrfs_leaf_free_space(root, b) < ins_len) {
2897 if (write_lock_level < 1) {
2898 write_lock_level = 1;
2899 btrfs_release_path(p);
2903 btrfs_set_path_blocking(p);
2904 err = split_leaf(trans, root, key,
2905 p, ins_len, ret == 0);
2906 btrfs_clear_path_blocking(p, NULL, 0);
2914 if (!p->search_for_split)
2915 unlock_up(p, level, lowest_unlock,
2916 min_write_lock_level, &write_lock_level);
2923 * we don't really know what they plan on doing with the path
2924 * from here on, so for now just mark it as blocking
2926 if (!p->leave_spinning)
2927 btrfs_set_path_blocking(p);
2928 if (ret < 0 && !p->skip_release_on_error)
2929 btrfs_release_path(p);
2934 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2935 * current state of the tree together with the operations recorded in the tree
2936 * modification log to search for the key in a previous version of this tree, as
2937 * denoted by the time_seq parameter.
2939 * Naturally, there is no support for insert, delete or cow operations.
2941 * The resulting path and return value will be set up as if we called
2942 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2944 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2945 struct btrfs_path *p, u64 time_seq)
2947 struct extent_buffer *b;
2952 int lowest_unlock = 1;
2953 u8 lowest_level = 0;
2956 lowest_level = p->lowest_level;
2957 WARN_ON(p->nodes[0] != NULL);
2959 if (p->search_commit_root) {
2961 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2965 b = get_old_root(root, time_seq);
2966 level = btrfs_header_level(b);
2967 p->locks[level] = BTRFS_READ_LOCK;
2970 level = btrfs_header_level(b);
2971 p->nodes[level] = b;
2972 btrfs_clear_path_blocking(p, NULL, 0);
2975 * we have a lock on b and as long as we aren't changing
2976 * the tree, there is no way to for the items in b to change.
2977 * It is safe to drop the lock on our parent before we
2978 * go through the expensive btree search on b.
2980 btrfs_unlock_up_safe(p, level + 1);
2983 * Since we can unwind eb's we want to do a real search every
2987 ret = key_search(b, key, level, &prev_cmp, &slot);
2991 if (ret && slot > 0) {
2995 p->slots[level] = slot;
2996 unlock_up(p, level, lowest_unlock, 0, NULL);
2998 if (level == lowest_level) {
3004 err = read_block_for_search(NULL, root, p, &b, level,
3005 slot, key, time_seq);
3013 level = btrfs_header_level(b);
3014 err = btrfs_tree_read_lock_atomic(b);
3016 btrfs_set_path_blocking(p);
3017 btrfs_tree_read_lock(b);
3018 btrfs_clear_path_blocking(p, b,
3021 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3026 p->locks[level] = BTRFS_READ_LOCK;
3027 p->nodes[level] = b;
3029 p->slots[level] = slot;
3030 unlock_up(p, level, lowest_unlock, 0, NULL);
3036 if (!p->leave_spinning)
3037 btrfs_set_path_blocking(p);
3039 btrfs_release_path(p);
3045 * helper to use instead of search slot if no exact match is needed but
3046 * instead the next or previous item should be returned.
3047 * When find_higher is true, the next higher item is returned, the next lower
3049 * When return_any and find_higher are both true, and no higher item is found,
3050 * return the next lower instead.
3051 * When return_any is true and find_higher is false, and no lower item is found,
3052 * return the next higher instead.
3053 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3056 int btrfs_search_slot_for_read(struct btrfs_root *root,
3057 struct btrfs_key *key, struct btrfs_path *p,
3058 int find_higher, int return_any)
3061 struct extent_buffer *leaf;
3064 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3068 * a return value of 1 means the path is at the position where the
3069 * item should be inserted. Normally this is the next bigger item,
3070 * but in case the previous item is the last in a leaf, path points
3071 * to the first free slot in the previous leaf, i.e. at an invalid
3077 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3078 ret = btrfs_next_leaf(root, p);
3084 * no higher item found, return the next
3089 btrfs_release_path(p);
3093 if (p->slots[0] == 0) {
3094 ret = btrfs_prev_leaf(root, p);
3099 if (p->slots[0] == btrfs_header_nritems(leaf))
3106 * no lower item found, return the next
3111 btrfs_release_path(p);
3121 * adjust the pointers going up the tree, starting at level
3122 * making sure the right key of each node is points to 'key'.
3123 * This is used after shifting pointers to the left, so it stops
3124 * fixing up pointers when a given leaf/node is not in slot 0 of the
3128 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3129 struct btrfs_path *path,
3130 struct btrfs_disk_key *key, int level)
3133 struct extent_buffer *t;
3135 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3136 int tslot = path->slots[i];
3137 if (!path->nodes[i])
3140 tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3141 btrfs_set_node_key(t, key, tslot);
3142 btrfs_mark_buffer_dirty(path->nodes[i]);
3151 * This function isn't completely safe. It's the caller's responsibility
3152 * that the new key won't break the order
3154 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3155 struct btrfs_path *path,
3156 struct btrfs_key *new_key)
3158 struct btrfs_disk_key disk_key;
3159 struct extent_buffer *eb;
3162 eb = path->nodes[0];
3163 slot = path->slots[0];
3165 btrfs_item_key(eb, &disk_key, slot - 1);
3166 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3168 if (slot < btrfs_header_nritems(eb) - 1) {
3169 btrfs_item_key(eb, &disk_key, slot + 1);
3170 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3173 btrfs_cpu_key_to_disk(&disk_key, new_key);
3174 btrfs_set_item_key(eb, &disk_key, slot);
3175 btrfs_mark_buffer_dirty(eb);
3177 fixup_low_keys(fs_info, path, &disk_key, 1);
3181 * try to push data from one node into the next node left in the
3184 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3185 * error, and > 0 if there was no room in the left hand block.
3187 static int push_node_left(struct btrfs_trans_handle *trans,
3188 struct btrfs_root *root, struct extent_buffer *dst,
3189 struct extent_buffer *src, int empty)
3196 src_nritems = btrfs_header_nritems(src);
3197 dst_nritems = btrfs_header_nritems(dst);
3198 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3199 WARN_ON(btrfs_header_generation(src) != trans->transid);
3200 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3202 if (!empty && src_nritems <= 8)
3205 if (push_items <= 0)
3209 push_items = min(src_nritems, push_items);
3210 if (push_items < src_nritems) {
3211 /* leave at least 8 pointers in the node if
3212 * we aren't going to empty it
3214 if (src_nritems - push_items < 8) {
3215 if (push_items <= 8)
3221 push_items = min(src_nritems - 8, push_items);
3223 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3226 btrfs_abort_transaction(trans, root, ret);
3229 copy_extent_buffer(dst, src,
3230 btrfs_node_key_ptr_offset(dst_nritems),
3231 btrfs_node_key_ptr_offset(0),
3232 push_items * sizeof(struct btrfs_key_ptr));
3234 if (push_items < src_nritems) {
3236 * don't call tree_mod_log_eb_move here, key removal was already
3237 * fully logged by tree_mod_log_eb_copy above.
3239 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3240 btrfs_node_key_ptr_offset(push_items),
3241 (src_nritems - push_items) *
3242 sizeof(struct btrfs_key_ptr));
3244 btrfs_set_header_nritems(src, src_nritems - push_items);
3245 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3246 btrfs_mark_buffer_dirty(src);
3247 btrfs_mark_buffer_dirty(dst);
3253 * try to push data from one node into the next node right in the
3256 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3257 * error, and > 0 if there was no room in the right hand block.
3259 * this will only push up to 1/2 the contents of the left node over
3261 static int balance_node_right(struct btrfs_trans_handle *trans,
3262 struct btrfs_root *root,
3263 struct extent_buffer *dst,
3264 struct extent_buffer *src)
3272 WARN_ON(btrfs_header_generation(src) != trans->transid);
3273 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3275 src_nritems = btrfs_header_nritems(src);
3276 dst_nritems = btrfs_header_nritems(dst);
3277 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3278 if (push_items <= 0)
3281 if (src_nritems < 4)
3284 max_push = src_nritems / 2 + 1;
3285 /* don't try to empty the node */
3286 if (max_push >= src_nritems)
3289 if (max_push < push_items)
3290 push_items = max_push;
3292 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3293 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3294 btrfs_node_key_ptr_offset(0),
3296 sizeof(struct btrfs_key_ptr));
3298 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3299 src_nritems - push_items, push_items);
3301 btrfs_abort_transaction(trans, root, ret);
3304 copy_extent_buffer(dst, src,
3305 btrfs_node_key_ptr_offset(0),
3306 btrfs_node_key_ptr_offset(src_nritems - push_items),
3307 push_items * sizeof(struct btrfs_key_ptr));
3309 btrfs_set_header_nritems(src, src_nritems - push_items);
3310 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3312 btrfs_mark_buffer_dirty(src);
3313 btrfs_mark_buffer_dirty(dst);
3319 * helper function to insert a new root level in the tree.
3320 * A new node is allocated, and a single item is inserted to
3321 * point to the existing root
3323 * returns zero on success or < 0 on failure.
3325 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3326 struct btrfs_root *root,
3327 struct btrfs_path *path, int level)
3330 struct extent_buffer *lower;
3331 struct extent_buffer *c;
3332 struct extent_buffer *old;
3333 struct btrfs_disk_key lower_key;
3335 BUG_ON(path->nodes[level]);
3336 BUG_ON(path->nodes[level-1] != root->node);
3338 lower = path->nodes[level-1];
3340 btrfs_item_key(lower, &lower_key, 0);
3342 btrfs_node_key(lower, &lower_key, 0);
3344 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3345 &lower_key, level, root->node->start, 0);
3349 root_add_used(root, root->nodesize);
3351 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3352 btrfs_set_header_nritems(c, 1);
3353 btrfs_set_header_level(c, level);
3354 btrfs_set_header_bytenr(c, c->start);
3355 btrfs_set_header_generation(c, trans->transid);
3356 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3357 btrfs_set_header_owner(c, root->root_key.objectid);
3359 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3362 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3363 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3365 btrfs_set_node_key(c, &lower_key, 0);
3366 btrfs_set_node_blockptr(c, 0, lower->start);
3367 lower_gen = btrfs_header_generation(lower);
3368 WARN_ON(lower_gen != trans->transid);
3370 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3372 btrfs_mark_buffer_dirty(c);
3375 tree_mod_log_set_root_pointer(root, c, 0);
3376 rcu_assign_pointer(root->node, c);
3378 /* the super has an extra ref to root->node */
3379 free_extent_buffer(old);
3381 add_root_to_dirty_list(root);
3382 extent_buffer_get(c);
3383 path->nodes[level] = c;
3384 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3385 path->slots[level] = 0;
3390 * worker function to insert a single pointer in a node.
3391 * the node should have enough room for the pointer already
3393 * slot and level indicate where you want the key to go, and
3394 * blocknr is the block the key points to.
3396 static void insert_ptr(struct btrfs_trans_handle *trans,
3397 struct btrfs_root *root, struct btrfs_path *path,
3398 struct btrfs_disk_key *key, u64 bytenr,
3399 int slot, int level)
3401 struct extent_buffer *lower;
3405 BUG_ON(!path->nodes[level]);
3406 btrfs_assert_tree_locked(path->nodes[level]);
3407 lower = path->nodes[level];
3408 nritems = btrfs_header_nritems(lower);
3409 BUG_ON(slot > nritems);
3410 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3411 if (slot != nritems) {
3413 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3414 slot, nritems - slot);
3415 memmove_extent_buffer(lower,
3416 btrfs_node_key_ptr_offset(slot + 1),
3417 btrfs_node_key_ptr_offset(slot),
3418 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3421 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3422 MOD_LOG_KEY_ADD, GFP_NOFS);
3425 btrfs_set_node_key(lower, key, slot);
3426 btrfs_set_node_blockptr(lower, slot, bytenr);
3427 WARN_ON(trans->transid == 0);
3428 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3429 btrfs_set_header_nritems(lower, nritems + 1);
3430 btrfs_mark_buffer_dirty(lower);
3434 * split the node at the specified level in path in two.
3435 * The path is corrected to point to the appropriate node after the split
3437 * Before splitting this tries to make some room in the node by pushing
3438 * left and right, if either one works, it returns right away.
3440 * returns 0 on success and < 0 on failure
3442 static noinline int split_node(struct btrfs_trans_handle *trans,
3443 struct btrfs_root *root,
3444 struct btrfs_path *path, int level)
3446 struct extent_buffer *c;
3447 struct extent_buffer *split;
3448 struct btrfs_disk_key disk_key;
3453 c = path->nodes[level];
3454 WARN_ON(btrfs_header_generation(c) != trans->transid);
3455 if (c == root->node) {
3457 * trying to split the root, lets make a new one
3459 * tree mod log: We don't log_removal old root in
3460 * insert_new_root, because that root buffer will be kept as a
3461 * normal node. We are going to log removal of half of the
3462 * elements below with tree_mod_log_eb_copy. We're holding a
3463 * tree lock on the buffer, which is why we cannot race with
3464 * other tree_mod_log users.
3466 ret = insert_new_root(trans, root, path, level + 1);
3470 ret = push_nodes_for_insert(trans, root, path, level);
3471 c = path->nodes[level];
3472 if (!ret && btrfs_header_nritems(c) <
3473 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3479 c_nritems = btrfs_header_nritems(c);
3480 mid = (c_nritems + 1) / 2;
3481 btrfs_node_key(c, &disk_key, mid);
3483 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3484 &disk_key, level, c->start, 0);
3486 return PTR_ERR(split);
3488 root_add_used(root, root->nodesize);
3490 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3491 btrfs_set_header_level(split, btrfs_header_level(c));
3492 btrfs_set_header_bytenr(split, split->start);
3493 btrfs_set_header_generation(split, trans->transid);
3494 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3495 btrfs_set_header_owner(split, root->root_key.objectid);
3496 write_extent_buffer(split, root->fs_info->fsid,
3497 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3498 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3499 btrfs_header_chunk_tree_uuid(split),
3502 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3503 mid, c_nritems - mid);
3505 btrfs_abort_transaction(trans, root, ret);
3508 copy_extent_buffer(split, c,
3509 btrfs_node_key_ptr_offset(0),
3510 btrfs_node_key_ptr_offset(mid),
3511 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3512 btrfs_set_header_nritems(split, c_nritems - mid);
3513 btrfs_set_header_nritems(c, mid);
3516 btrfs_mark_buffer_dirty(c);
3517 btrfs_mark_buffer_dirty(split);
3519 insert_ptr(trans, root, path, &disk_key, split->start,
3520 path->slots[level + 1] + 1, level + 1);
3522 if (path->slots[level] >= mid) {
3523 path->slots[level] -= mid;
3524 btrfs_tree_unlock(c);
3525 free_extent_buffer(c);
3526 path->nodes[level] = split;
3527 path->slots[level + 1] += 1;
3529 btrfs_tree_unlock(split);
3530 free_extent_buffer(split);
3536 * how many bytes are required to store the items in a leaf. start
3537 * and nr indicate which items in the leaf to check. This totals up the
3538 * space used both by the item structs and the item data
3540 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3542 struct btrfs_item *start_item;
3543 struct btrfs_item *end_item;
3544 struct btrfs_map_token token;
3546 int nritems = btrfs_header_nritems(l);
3547 int end = min(nritems, start + nr) - 1;
3551 btrfs_init_map_token(&token);
3552 start_item = btrfs_item_nr(start);
3553 end_item = btrfs_item_nr(end);
3554 data_len = btrfs_token_item_offset(l, start_item, &token) +
3555 btrfs_token_item_size(l, start_item, &token);
3556 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3557 data_len += sizeof(struct btrfs_item) * nr;
3558 WARN_ON(data_len < 0);
3563 * The space between the end of the leaf items and
3564 * the start of the leaf data. IOW, how much room
3565 * the leaf has left for both items and data
3567 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3568 struct extent_buffer *leaf)
3570 int nritems = btrfs_header_nritems(leaf);
3572 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3574 btrfs_crit(root->fs_info,
3575 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3576 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3577 leaf_space_used(leaf, 0, nritems), nritems);
3583 * min slot controls the lowest index we're willing to push to the
3584 * right. We'll push up to and including min_slot, but no lower
3586 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3587 struct btrfs_root *root,
3588 struct btrfs_path *path,
3589 int data_size, int empty,
3590 struct extent_buffer *right,
3591 int free_space, u32 left_nritems,
3594 struct extent_buffer *left = path->nodes[0];
3595 struct extent_buffer *upper = path->nodes[1];
3596 struct btrfs_map_token token;
3597 struct btrfs_disk_key disk_key;
3602 struct btrfs_item *item;
3608 btrfs_init_map_token(&token);
3613 nr = max_t(u32, 1, min_slot);
3615 if (path->slots[0] >= left_nritems)
3616 push_space += data_size;
3618 slot = path->slots[1];
3619 i = left_nritems - 1;
3621 item = btrfs_item_nr(i);
3623 if (!empty && push_items > 0) {
3624 if (path->slots[0] > i)
3626 if (path->slots[0] == i) {
3627 int space = btrfs_leaf_free_space(root, left);
3628 if (space + push_space * 2 > free_space)
3633 if (path->slots[0] == i)
3634 push_space += data_size;
3636 this_item_size = btrfs_item_size(left, item);
3637 if (this_item_size + sizeof(*item) + push_space > free_space)
3641 push_space += this_item_size + sizeof(*item);
3647 if (push_items == 0)
3650 WARN_ON(!empty && push_items == left_nritems);
3652 /* push left to right */
3653 right_nritems = btrfs_header_nritems(right);
3655 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3656 push_space -= leaf_data_end(root, left);
3658 /* make room in the right data area */
3659 data_end = leaf_data_end(root, right);
3660 memmove_extent_buffer(right,
3661 btrfs_leaf_data(right) + data_end - push_space,
3662 btrfs_leaf_data(right) + data_end,
3663 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3665 /* copy from the left data area */
3666 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3667 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3668 btrfs_leaf_data(left) + leaf_data_end(root, left),
3671 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3672 btrfs_item_nr_offset(0),
3673 right_nritems * sizeof(struct btrfs_item));
3675 /* copy the items from left to right */
3676 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3677 btrfs_item_nr_offset(left_nritems - push_items),
3678 push_items * sizeof(struct btrfs_item));
3680 /* update the item pointers */
3681 right_nritems += push_items;
3682 btrfs_set_header_nritems(right, right_nritems);
3683 push_space = BTRFS_LEAF_DATA_SIZE(root);
3684 for (i = 0; i < right_nritems; i++) {
3685 item = btrfs_item_nr(i);
3686 push_space -= btrfs_token_item_size(right, item, &token);
3687 btrfs_set_token_item_offset(right, item, push_space, &token);
3690 left_nritems -= push_items;
3691 btrfs_set_header_nritems(left, left_nritems);
3694 btrfs_mark_buffer_dirty(left);
3696 clean_tree_block(trans, root->fs_info, left);
3698 btrfs_mark_buffer_dirty(right);
3700 btrfs_item_key(right, &disk_key, 0);
3701 btrfs_set_node_key(upper, &disk_key, slot + 1);
3702 btrfs_mark_buffer_dirty(upper);
3704 /* then fixup the leaf pointer in the path */
3705 if (path->slots[0] >= left_nritems) {
3706 path->slots[0] -= left_nritems;
3707 if (btrfs_header_nritems(path->nodes[0]) == 0)
3708 clean_tree_block(trans, root->fs_info, path->nodes[0]);
3709 btrfs_tree_unlock(path->nodes[0]);
3710 free_extent_buffer(path->nodes[0]);
3711 path->nodes[0] = right;
3712 path->slots[1] += 1;
3714 btrfs_tree_unlock(right);
3715 free_extent_buffer(right);
3720 btrfs_tree_unlock(right);
3721 free_extent_buffer(right);
3726 * push some data in the path leaf to the right, trying to free up at
3727 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3729 * returns 1 if the push failed because the other node didn't have enough
3730 * room, 0 if everything worked out and < 0 if there were major errors.
3732 * this will push starting from min_slot to the end of the leaf. It won't
3733 * push any slot lower than min_slot
3735 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3736 *root, struct btrfs_path *path,
3737 int min_data_size, int data_size,
3738 int empty, u32 min_slot)
3740 struct extent_buffer *left = path->nodes[0];
3741 struct extent_buffer *right;
3742 struct extent_buffer *upper;
3748 if (!path->nodes[1])
3751 slot = path->slots[1];
3752 upper = path->nodes[1];
3753 if (slot >= btrfs_header_nritems(upper) - 1)
3756 btrfs_assert_tree_locked(path->nodes[1]);
3758 right = read_node_slot(root, upper, slot + 1);
3762 btrfs_tree_lock(right);
3763 btrfs_set_lock_blocking(right);
3765 free_space = btrfs_leaf_free_space(root, right);
3766 if (free_space < data_size)
3769 /* cow and double check */
3770 ret = btrfs_cow_block(trans, root, right, upper,
3775 free_space = btrfs_leaf_free_space(root, right);
3776 if (free_space < data_size)
3779 left_nritems = btrfs_header_nritems(left);
3780 if (left_nritems == 0)
3783 if (path->slots[0] == left_nritems && !empty) {
3784 /* Key greater than all keys in the leaf, right neighbor has
3785 * enough room for it and we're not emptying our leaf to delete
3786 * it, therefore use right neighbor to insert the new item and
3787 * no need to touch/dirty our left leaft. */
3788 btrfs_tree_unlock(left);
3789 free_extent_buffer(left);
3790 path->nodes[0] = right;
3796 return __push_leaf_right(trans, root, path, min_data_size, empty,
3797 right, free_space, left_nritems, min_slot);
3799 btrfs_tree_unlock(right);
3800 free_extent_buffer(right);
3805 * push some data in the path leaf to the left, trying to free up at
3806 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3808 * max_slot can put a limit on how far into the leaf we'll push items. The
3809 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3812 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3813 struct btrfs_root *root,
3814 struct btrfs_path *path, int data_size,
3815 int empty, struct extent_buffer *left,
3816 int free_space, u32 right_nritems,
3819 struct btrfs_disk_key disk_key;
3820 struct extent_buffer *right = path->nodes[0];
3824 struct btrfs_item *item;
3825 u32 old_left_nritems;
3829 u32 old_left_item_size;
3830 struct btrfs_map_token token;
3832 btrfs_init_map_token(&token);
3835 nr = min(right_nritems, max_slot);
3837 nr = min(right_nritems - 1, max_slot);
3839 for (i = 0; i < nr; i++) {
3840 item = btrfs_item_nr(i);
3842 if (!empty && push_items > 0) {
3843 if (path->slots[0] < i)
3845 if (path->slots[0] == i) {
3846 int space = btrfs_leaf_free_space(root, right);
3847 if (space + push_space * 2 > free_space)
3852 if (path->slots[0] == i)
3853 push_space += data_size;
3855 this_item_size = btrfs_item_size(right, item);
3856 if (this_item_size + sizeof(*item) + push_space > free_space)
3860 push_space += this_item_size + sizeof(*item);
3863 if (push_items == 0) {
3867 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3869 /* push data from right to left */
3870 copy_extent_buffer(left, right,
3871 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3872 btrfs_item_nr_offset(0),
3873 push_items * sizeof(struct btrfs_item));
3875 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3876 btrfs_item_offset_nr(right, push_items - 1);
3878 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3879 leaf_data_end(root, left) - push_space,
3880 btrfs_leaf_data(right) +
3881 btrfs_item_offset_nr(right, push_items - 1),
3883 old_left_nritems = btrfs_header_nritems(left);
3884 BUG_ON(old_left_nritems <= 0);
3886 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3887 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3890 item = btrfs_item_nr(i);
3892 ioff = btrfs_token_item_offset(left, item, &token);
3893 btrfs_set_token_item_offset(left, item,
3894 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3897 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3899 /* fixup right node */
3900 if (push_items > right_nritems)
3901 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3904 if (push_items < right_nritems) {
3905 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3906 leaf_data_end(root, right);
3907 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3908 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3909 btrfs_leaf_data(right) +
3910 leaf_data_end(root, right), push_space);
3912 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3913 btrfs_item_nr_offset(push_items),
3914 (btrfs_header_nritems(right) - push_items) *
3915 sizeof(struct btrfs_item));
3917 right_nritems -= push_items;
3918 btrfs_set_header_nritems(right, right_nritems);
3919 push_space = BTRFS_LEAF_DATA_SIZE(root);
3920 for (i = 0; i < right_nritems; i++) {
3921 item = btrfs_item_nr(i);
3923 push_space = push_space - btrfs_token_item_size(right,
3925 btrfs_set_token_item_offset(right, item, push_space, &token);
3928 btrfs_mark_buffer_dirty(left);
3930 btrfs_mark_buffer_dirty(right);
3932 clean_tree_block(trans, root->fs_info, right);
3934 btrfs_item_key(right, &disk_key, 0);
3935 fixup_low_keys(root->fs_info, path, &disk_key, 1);
3937 /* then fixup the leaf pointer in the path */
3938 if (path->slots[0] < push_items) {
3939 path->slots[0] += old_left_nritems;
3940 btrfs_tree_unlock(path->nodes[0]);
3941 free_extent_buffer(path->nodes[0]);
3942 path->nodes[0] = left;
3943 path->slots[1] -= 1;
3945 btrfs_tree_unlock(left);
3946 free_extent_buffer(left);
3947 path->slots[0] -= push_items;
3949 BUG_ON(path->slots[0] < 0);
3952 btrfs_tree_unlock(left);
3953 free_extent_buffer(left);
3958 * push some data in the path leaf to the left, trying to free up at
3959 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3961 * max_slot can put a limit on how far into the leaf we'll push items. The
3962 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3965 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3966 *root, struct btrfs_path *path, int min_data_size,
3967 int data_size, int empty, u32 max_slot)
3969 struct extent_buffer *right = path->nodes[0];
3970 struct extent_buffer *left;
3976 slot = path->slots[1];
3979 if (!path->nodes[1])
3982 right_nritems = btrfs_header_nritems(right);
3983 if (right_nritems == 0)
3986 btrfs_assert_tree_locked(path->nodes[1]);
3988 left = read_node_slot(root, path->nodes[1], slot - 1);
3992 btrfs_tree_lock(left);
3993 btrfs_set_lock_blocking(left);
3995 free_space = btrfs_leaf_free_space(root, left);
3996 if (free_space < data_size) {
4001 /* cow and double check */
4002 ret = btrfs_cow_block(trans, root, left,
4003 path->nodes[1], slot - 1, &left);
4005 /* we hit -ENOSPC, but it isn't fatal here */
4011 free_space = btrfs_leaf_free_space(root, left);
4012 if (free_space < data_size) {
4017 return __push_leaf_left(trans, root, path, min_data_size,
4018 empty, left, free_space, right_nritems,
4021 btrfs_tree_unlock(left);
4022 free_extent_buffer(left);
4027 * split the path's leaf in two, making sure there is at least data_size
4028 * available for the resulting leaf level of the path.
4030 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4031 struct btrfs_root *root,
4032 struct btrfs_path *path,
4033 struct extent_buffer *l,
4034 struct extent_buffer *right,
4035 int slot, int mid, int nritems)
4040 struct btrfs_disk_key disk_key;
4041 struct btrfs_map_token token;
4043 btrfs_init_map_token(&token);
4045 nritems = nritems - mid;
4046 btrfs_set_header_nritems(right, nritems);
4047 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4049 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4050 btrfs_item_nr_offset(mid),
4051 nritems * sizeof(struct btrfs_item));
4053 copy_extent_buffer(right, l,
4054 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4055 data_copy_size, btrfs_leaf_data(l) +
4056 leaf_data_end(root, l), data_copy_size);
4058 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4059 btrfs_item_end_nr(l, mid);
4061 for (i = 0; i < nritems; i++) {
4062 struct btrfs_item *item = btrfs_item_nr(i);
4065 ioff = btrfs_token_item_offset(right, item, &token);
4066 btrfs_set_token_item_offset(right, item,
4067 ioff + rt_data_off, &token);
4070 btrfs_set_header_nritems(l, mid);
4071 btrfs_item_key(right, &disk_key, 0);
4072 insert_ptr(trans, root, path, &disk_key, right->start,
4073 path->slots[1] + 1, 1);
4075 btrfs_mark_buffer_dirty(right);
4076 btrfs_mark_buffer_dirty(l);
4077 BUG_ON(path->slots[0] != slot);
4080 btrfs_tree_unlock(path->nodes[0]);
4081 free_extent_buffer(path->nodes[0]);
4082 path->nodes[0] = right;
4083 path->slots[0] -= mid;
4084 path->slots[1] += 1;
4086 btrfs_tree_unlock(right);
4087 free_extent_buffer(right);
4090 BUG_ON(path->slots[0] < 0);
4094 * double splits happen when we need to insert a big item in the middle
4095 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4096 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4099 * We avoid this by trying to push the items on either side of our target
4100 * into the adjacent leaves. If all goes well we can avoid the double split
4103 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4104 struct btrfs_root *root,
4105 struct btrfs_path *path,
4112 int space_needed = data_size;
4114 slot = path->slots[0];
4115 if (slot < btrfs_header_nritems(path->nodes[0]))
4116 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4119 * try to push all the items after our slot into the
4122 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4129 nritems = btrfs_header_nritems(path->nodes[0]);
4131 * our goal is to get our slot at the start or end of a leaf. If
4132 * we've done so we're done
4134 if (path->slots[0] == 0 || path->slots[0] == nritems)
4137 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4140 /* try to push all the items before our slot into the next leaf */
4141 slot = path->slots[0];
4142 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4155 * split the path's leaf in two, making sure there is at least data_size
4156 * available for the resulting leaf level of the path.
4158 * returns 0 if all went well and < 0 on failure.
4160 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4161 struct btrfs_root *root,
4162 struct btrfs_key *ins_key,
4163 struct btrfs_path *path, int data_size,
4166 struct btrfs_disk_key disk_key;
4167 struct extent_buffer *l;
4171 struct extent_buffer *right;
4172 struct btrfs_fs_info *fs_info = root->fs_info;
4176 int num_doubles = 0;
4177 int tried_avoid_double = 0;
4180 slot = path->slots[0];
4181 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4182 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4185 /* first try to make some room by pushing left and right */
4186 if (data_size && path->nodes[1]) {
4187 int space_needed = data_size;
4189 if (slot < btrfs_header_nritems(l))
4190 space_needed -= btrfs_leaf_free_space(root, l);
4192 wret = push_leaf_right(trans, root, path, space_needed,
4193 space_needed, 0, 0);
4197 wret = push_leaf_left(trans, root, path, space_needed,
4198 space_needed, 0, (u32)-1);
4204 /* did the pushes work? */
4205 if (btrfs_leaf_free_space(root, l) >= data_size)
4209 if (!path->nodes[1]) {
4210 ret = insert_new_root(trans, root, path, 1);
4217 slot = path->slots[0];
4218 nritems = btrfs_header_nritems(l);
4219 mid = (nritems + 1) / 2;
4223 leaf_space_used(l, mid, nritems - mid) + data_size >
4224 BTRFS_LEAF_DATA_SIZE(root)) {
4225 if (slot >= nritems) {
4229 if (mid != nritems &&
4230 leaf_space_used(l, mid, nritems - mid) +
4231 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4232 if (data_size && !tried_avoid_double)
4233 goto push_for_double;
4239 if (leaf_space_used(l, 0, mid) + data_size >
4240 BTRFS_LEAF_DATA_SIZE(root)) {
4241 if (!extend && data_size && slot == 0) {
4243 } else if ((extend || !data_size) && slot == 0) {
4247 if (mid != nritems &&
4248 leaf_space_used(l, mid, nritems - mid) +
4249 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4250 if (data_size && !tried_avoid_double)
4251 goto push_for_double;
4259 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4261 btrfs_item_key(l, &disk_key, mid);
4263 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4264 &disk_key, 0, l->start, 0);
4266 return PTR_ERR(right);
4268 root_add_used(root, root->nodesize);
4270 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4271 btrfs_set_header_bytenr(right, right->start);
4272 btrfs_set_header_generation(right, trans->transid);
4273 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4274 btrfs_set_header_owner(right, root->root_key.objectid);
4275 btrfs_set_header_level(right, 0);
4276 write_extent_buffer(right, fs_info->fsid,
4277 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4279 write_extent_buffer(right, fs_info->chunk_tree_uuid,
4280 btrfs_header_chunk_tree_uuid(right),
4285 btrfs_set_header_nritems(right, 0);
4286 insert_ptr(trans, root, path, &disk_key, right->start,
4287 path->slots[1] + 1, 1);
4288 btrfs_tree_unlock(path->nodes[0]);
4289 free_extent_buffer(path->nodes[0]);
4290 path->nodes[0] = right;
4292 path->slots[1] += 1;
4294 btrfs_set_header_nritems(right, 0);
4295 insert_ptr(trans, root, path, &disk_key, right->start,
4297 btrfs_tree_unlock(path->nodes[0]);
4298 free_extent_buffer(path->nodes[0]);
4299 path->nodes[0] = right;
4301 if (path->slots[1] == 0)
4302 fixup_low_keys(fs_info, path, &disk_key, 1);
4304 btrfs_mark_buffer_dirty(right);
4308 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4311 BUG_ON(num_doubles != 0);
4319 push_for_double_split(trans, root, path, data_size);
4320 tried_avoid_double = 1;
4321 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4326 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4327 struct btrfs_root *root,
4328 struct btrfs_path *path, int ins_len)
4330 struct btrfs_key key;
4331 struct extent_buffer *leaf;
4332 struct btrfs_file_extent_item *fi;
4337 leaf = path->nodes[0];
4338 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4340 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4341 key.type != BTRFS_EXTENT_CSUM_KEY);
4343 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4346 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4347 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4348 fi = btrfs_item_ptr(leaf, path->slots[0],
4349 struct btrfs_file_extent_item);
4350 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4352 btrfs_release_path(path);
4354 path->keep_locks = 1;
4355 path->search_for_split = 1;
4356 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4357 path->search_for_split = 0;
4364 leaf = path->nodes[0];
4365 /* if our item isn't there, return now */
4366 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4369 /* the leaf has changed, it now has room. return now */
4370 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4373 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4374 fi = btrfs_item_ptr(leaf, path->slots[0],
4375 struct btrfs_file_extent_item);
4376 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4380 btrfs_set_path_blocking(path);
4381 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4385 path->keep_locks = 0;
4386 btrfs_unlock_up_safe(path, 1);
4389 path->keep_locks = 0;
4393 static noinline int split_item(struct btrfs_trans_handle *trans,
4394 struct btrfs_root *root,
4395 struct btrfs_path *path,
4396 struct btrfs_key *new_key,
4397 unsigned long split_offset)
4399 struct extent_buffer *leaf;
4400 struct btrfs_item *item;
4401 struct btrfs_item *new_item;
4407 struct btrfs_disk_key disk_key;
4409 leaf = path->nodes[0];
4410 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4412 btrfs_set_path_blocking(path);
4414 item = btrfs_item_nr(path->slots[0]);
4415 orig_offset = btrfs_item_offset(leaf, item);
4416 item_size = btrfs_item_size(leaf, item);
4418 buf = kmalloc(item_size, GFP_NOFS);
4422 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4423 path->slots[0]), item_size);
4425 slot = path->slots[0] + 1;
4426 nritems = btrfs_header_nritems(leaf);
4427 if (slot != nritems) {
4428 /* shift the items */
4429 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4430 btrfs_item_nr_offset(slot),
4431 (nritems - slot) * sizeof(struct btrfs_item));
4434 btrfs_cpu_key_to_disk(&disk_key, new_key);
4435 btrfs_set_item_key(leaf, &disk_key, slot);
4437 new_item = btrfs_item_nr(slot);
4439 btrfs_set_item_offset(leaf, new_item, orig_offset);
4440 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4442 btrfs_set_item_offset(leaf, item,
4443 orig_offset + item_size - split_offset);
4444 btrfs_set_item_size(leaf, item, split_offset);
4446 btrfs_set_header_nritems(leaf, nritems + 1);
4448 /* write the data for the start of the original item */
4449 write_extent_buffer(leaf, buf,
4450 btrfs_item_ptr_offset(leaf, path->slots[0]),
4453 /* write the data for the new item */
4454 write_extent_buffer(leaf, buf + split_offset,
4455 btrfs_item_ptr_offset(leaf, slot),
4456 item_size - split_offset);
4457 btrfs_mark_buffer_dirty(leaf);
4459 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4465 * This function splits a single item into two items,
4466 * giving 'new_key' to the new item and splitting the
4467 * old one at split_offset (from the start of the item).
4469 * The path may be released by this operation. After
4470 * the split, the path is pointing to the old item. The
4471 * new item is going to be in the same node as the old one.
4473 * Note, the item being split must be smaller enough to live alone on
4474 * a tree block with room for one extra struct btrfs_item
4476 * This allows us to split the item in place, keeping a lock on the
4477 * leaf the entire time.
4479 int btrfs_split_item(struct btrfs_trans_handle *trans,
4480 struct btrfs_root *root,
4481 struct btrfs_path *path,
4482 struct btrfs_key *new_key,
4483 unsigned long split_offset)
4486 ret = setup_leaf_for_split(trans, root, path,
4487 sizeof(struct btrfs_item));
4491 ret = split_item(trans, root, path, new_key, split_offset);
4496 * This function duplicate a item, giving 'new_key' to the new item.
4497 * It guarantees both items live in the same tree leaf and the new item
4498 * is contiguous with the original item.
4500 * This allows us to split file extent in place, keeping a lock on the
4501 * leaf the entire time.
4503 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4504 struct btrfs_root *root,
4505 struct btrfs_path *path,
4506 struct btrfs_key *new_key)
4508 struct extent_buffer *leaf;
4512 leaf = path->nodes[0];
4513 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4514 ret = setup_leaf_for_split(trans, root, path,
4515 item_size + sizeof(struct btrfs_item));
4520 setup_items_for_insert(root, path, new_key, &item_size,
4521 item_size, item_size +
4522 sizeof(struct btrfs_item), 1);
4523 leaf = path->nodes[0];
4524 memcpy_extent_buffer(leaf,
4525 btrfs_item_ptr_offset(leaf, path->slots[0]),
4526 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4532 * make the item pointed to by the path smaller. new_size indicates
4533 * how small to make it, and from_end tells us if we just chop bytes
4534 * off the end of the item or if we shift the item to chop bytes off
4537 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4538 u32 new_size, int from_end)
4541 struct extent_buffer *leaf;
4542 struct btrfs_item *item;
4544 unsigned int data_end;
4545 unsigned int old_data_start;
4546 unsigned int old_size;
4547 unsigned int size_diff;
4549 struct btrfs_map_token token;
4551 btrfs_init_map_token(&token);
4553 leaf = path->nodes[0];
4554 slot = path->slots[0];
4556 old_size = btrfs_item_size_nr(leaf, slot);
4557 if (old_size == new_size)
4560 nritems = btrfs_header_nritems(leaf);
4561 data_end = leaf_data_end(root, leaf);
4563 old_data_start = btrfs_item_offset_nr(leaf, slot);
4565 size_diff = old_size - new_size;
4568 BUG_ON(slot >= nritems);
4571 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4573 /* first correct the data pointers */
4574 for (i = slot; i < nritems; i++) {
4576 item = btrfs_item_nr(i);
4578 ioff = btrfs_token_item_offset(leaf, item, &token);
4579 btrfs_set_token_item_offset(leaf, item,
4580 ioff + size_diff, &token);
4583 /* shift the data */
4585 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4586 data_end + size_diff, btrfs_leaf_data(leaf) +
4587 data_end, old_data_start + new_size - data_end);
4589 struct btrfs_disk_key disk_key;
4592 btrfs_item_key(leaf, &disk_key, slot);
4594 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4596 struct btrfs_file_extent_item *fi;
4598 fi = btrfs_item_ptr(leaf, slot,
4599 struct btrfs_file_extent_item);
4600 fi = (struct btrfs_file_extent_item *)(
4601 (unsigned long)fi - size_diff);
4603 if (btrfs_file_extent_type(leaf, fi) ==
4604 BTRFS_FILE_EXTENT_INLINE) {
4605 ptr = btrfs_item_ptr_offset(leaf, slot);
4606 memmove_extent_buffer(leaf, ptr,
4608 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4612 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4613 data_end + size_diff, btrfs_leaf_data(leaf) +
4614 data_end, old_data_start - data_end);
4616 offset = btrfs_disk_key_offset(&disk_key);
4617 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4618 btrfs_set_item_key(leaf, &disk_key, slot);
4620 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4623 item = btrfs_item_nr(slot);
4624 btrfs_set_item_size(leaf, item, new_size);
4625 btrfs_mark_buffer_dirty(leaf);
4627 if (btrfs_leaf_free_space(root, leaf) < 0) {
4628 btrfs_print_leaf(root, leaf);
4634 * make the item pointed to by the path bigger, data_size is the added size.
4636 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4640 struct extent_buffer *leaf;
4641 struct btrfs_item *item;
4643 unsigned int data_end;
4644 unsigned int old_data;
4645 unsigned int old_size;
4647 struct btrfs_map_token token;
4649 btrfs_init_map_token(&token);
4651 leaf = path->nodes[0];
4653 nritems = btrfs_header_nritems(leaf);
4654 data_end = leaf_data_end(root, leaf);
4656 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4657 btrfs_print_leaf(root, leaf);
4660 slot = path->slots[0];
4661 old_data = btrfs_item_end_nr(leaf, slot);
4664 if (slot >= nritems) {
4665 btrfs_print_leaf(root, leaf);
4666 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4672 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4674 /* first correct the data pointers */
4675 for (i = slot; i < nritems; i++) {
4677 item = btrfs_item_nr(i);
4679 ioff = btrfs_token_item_offset(leaf, item, &token);
4680 btrfs_set_token_item_offset(leaf, item,
4681 ioff - data_size, &token);
4684 /* shift the data */
4685 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4686 data_end - data_size, btrfs_leaf_data(leaf) +
4687 data_end, old_data - data_end);
4689 data_end = old_data;
4690 old_size = btrfs_item_size_nr(leaf, slot);
4691 item = btrfs_item_nr(slot);
4692 btrfs_set_item_size(leaf, item, old_size + data_size);
4693 btrfs_mark_buffer_dirty(leaf);
4695 if (btrfs_leaf_free_space(root, leaf) < 0) {
4696 btrfs_print_leaf(root, leaf);
4702 * this is a helper for btrfs_insert_empty_items, the main goal here is
4703 * to save stack depth by doing the bulk of the work in a function
4704 * that doesn't call btrfs_search_slot
4706 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4707 struct btrfs_key *cpu_key, u32 *data_size,
4708 u32 total_data, u32 total_size, int nr)
4710 struct btrfs_item *item;
4713 unsigned int data_end;
4714 struct btrfs_disk_key disk_key;
4715 struct extent_buffer *leaf;
4717 struct btrfs_map_token token;
4719 if (path->slots[0] == 0) {
4720 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4721 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4723 btrfs_unlock_up_safe(path, 1);
4725 btrfs_init_map_token(&token);
4727 leaf = path->nodes[0];
4728 slot = path->slots[0];
4730 nritems = btrfs_header_nritems(leaf);
4731 data_end = leaf_data_end(root, leaf);
4733 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4734 btrfs_print_leaf(root, leaf);
4735 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4736 total_size, btrfs_leaf_free_space(root, leaf));
4740 if (slot != nritems) {
4741 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4743 if (old_data < data_end) {
4744 btrfs_print_leaf(root, leaf);
4745 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4746 slot, old_data, data_end);
4750 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4752 /* first correct the data pointers */
4753 for (i = slot; i < nritems; i++) {
4756 item = btrfs_item_nr( i);
4757 ioff = btrfs_token_item_offset(leaf, item, &token);
4758 btrfs_set_token_item_offset(leaf, item,
4759 ioff - total_data, &token);
4761 /* shift the items */
4762 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4763 btrfs_item_nr_offset(slot),
4764 (nritems - slot) * sizeof(struct btrfs_item));
4766 /* shift the data */
4767 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4768 data_end - total_data, btrfs_leaf_data(leaf) +
4769 data_end, old_data - data_end);
4770 data_end = old_data;
4773 /* setup the item for the new data */
4774 for (i = 0; i < nr; i++) {
4775 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4776 btrfs_set_item_key(leaf, &disk_key, slot + i);
4777 item = btrfs_item_nr(slot + i);
4778 btrfs_set_token_item_offset(leaf, item,
4779 data_end - data_size[i], &token);
4780 data_end -= data_size[i];
4781 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4784 btrfs_set_header_nritems(leaf, nritems + nr);
4785 btrfs_mark_buffer_dirty(leaf);
4787 if (btrfs_leaf_free_space(root, leaf) < 0) {
4788 btrfs_print_leaf(root, leaf);
4794 * Given a key and some data, insert items into the tree.
4795 * This does all the path init required, making room in the tree if needed.
4797 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4798 struct btrfs_root *root,
4799 struct btrfs_path *path,
4800 struct btrfs_key *cpu_key, u32 *data_size,
4809 for (i = 0; i < nr; i++)
4810 total_data += data_size[i];
4812 total_size = total_data + (nr * sizeof(struct btrfs_item));
4813 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4819 slot = path->slots[0];
4822 setup_items_for_insert(root, path, cpu_key, data_size,
4823 total_data, total_size, nr);
4828 * Given a key and some data, insert an item into the tree.
4829 * This does all the path init required, making room in the tree if needed.
4831 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4832 *root, struct btrfs_key *cpu_key, void *data, u32
4836 struct btrfs_path *path;
4837 struct extent_buffer *leaf;
4840 path = btrfs_alloc_path();
4843 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4845 leaf = path->nodes[0];
4846 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4847 write_extent_buffer(leaf, data, ptr, data_size);
4848 btrfs_mark_buffer_dirty(leaf);
4850 btrfs_free_path(path);
4855 * delete the pointer from a given node.
4857 * the tree should have been previously balanced so the deletion does not
4860 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4861 int level, int slot)
4863 struct extent_buffer *parent = path->nodes[level];
4867 nritems = btrfs_header_nritems(parent);
4868 if (slot != nritems - 1) {
4870 tree_mod_log_eb_move(root->fs_info, parent, slot,
4871 slot + 1, nritems - slot - 1);
4872 memmove_extent_buffer(parent,
4873 btrfs_node_key_ptr_offset(slot),
4874 btrfs_node_key_ptr_offset(slot + 1),
4875 sizeof(struct btrfs_key_ptr) *
4876 (nritems - slot - 1));
4878 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4879 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4884 btrfs_set_header_nritems(parent, nritems);
4885 if (nritems == 0 && parent == root->node) {
4886 BUG_ON(btrfs_header_level(root->node) != 1);
4887 /* just turn the root into a leaf and break */
4888 btrfs_set_header_level(root->node, 0);
4889 } else if (slot == 0) {
4890 struct btrfs_disk_key disk_key;
4892 btrfs_node_key(parent, &disk_key, 0);
4893 fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
4895 btrfs_mark_buffer_dirty(parent);
4899 * a helper function to delete the leaf pointed to by path->slots[1] and
4902 * This deletes the pointer in path->nodes[1] and frees the leaf
4903 * block extent. zero is returned if it all worked out, < 0 otherwise.
4905 * The path must have already been setup for deleting the leaf, including
4906 * all the proper balancing. path->nodes[1] must be locked.
4908 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4909 struct btrfs_root *root,
4910 struct btrfs_path *path,
4911 struct extent_buffer *leaf)
4913 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4914 del_ptr(root, path, 1, path->slots[1]);
4917 * btrfs_free_extent is expensive, we want to make sure we
4918 * aren't holding any locks when we call it
4920 btrfs_unlock_up_safe(path, 0);
4922 root_sub_used(root, leaf->len);
4924 extent_buffer_get(leaf);
4925 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4926 free_extent_buffer_stale(leaf);
4929 * delete the item at the leaf level in path. If that empties
4930 * the leaf, remove it from the tree
4932 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4933 struct btrfs_path *path, int slot, int nr)
4935 struct extent_buffer *leaf;
4936 struct btrfs_item *item;
4943 struct btrfs_map_token token;
4945 btrfs_init_map_token(&token);
4947 leaf = path->nodes[0];
4948 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4950 for (i = 0; i < nr; i++)
4951 dsize += btrfs_item_size_nr(leaf, slot + i);
4953 nritems = btrfs_header_nritems(leaf);
4955 if (slot + nr != nritems) {
4956 int data_end = leaf_data_end(root, leaf);
4958 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4960 btrfs_leaf_data(leaf) + data_end,
4961 last_off - data_end);
4963 for (i = slot + nr; i < nritems; i++) {
4966 item = btrfs_item_nr(i);
4967 ioff = btrfs_token_item_offset(leaf, item, &token);
4968 btrfs_set_token_item_offset(leaf, item,
4969 ioff + dsize, &token);
4972 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4973 btrfs_item_nr_offset(slot + nr),
4974 sizeof(struct btrfs_item) *
4975 (nritems - slot - nr));
4977 btrfs_set_header_nritems(leaf, nritems - nr);
4980 /* delete the leaf if we've emptied it */
4982 if (leaf == root->node) {
4983 btrfs_set_header_level(leaf, 0);
4985 btrfs_set_path_blocking(path);
4986 clean_tree_block(trans, root->fs_info, leaf);
4987 btrfs_del_leaf(trans, root, path, leaf);
4990 int used = leaf_space_used(leaf, 0, nritems);
4992 struct btrfs_disk_key disk_key;
4994 btrfs_item_key(leaf, &disk_key, 0);
4995 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4998 /* delete the leaf if it is mostly empty */
4999 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5000 /* push_leaf_left fixes the path.
5001 * make sure the path still points to our leaf
5002 * for possible call to del_ptr below
5004 slot = path->slots[1];
5005 extent_buffer_get(leaf);
5007 btrfs_set_path_blocking(path);
5008 wret = push_leaf_left(trans, root, path, 1, 1,
5010 if (wret < 0 && wret != -ENOSPC)
5013 if (path->nodes[0] == leaf &&
5014 btrfs_header_nritems(leaf)) {
5015 wret = push_leaf_right(trans, root, path, 1,
5017 if (wret < 0 && wret != -ENOSPC)
5021 if (btrfs_header_nritems(leaf) == 0) {
5022 path->slots[1] = slot;
5023 btrfs_del_leaf(trans, root, path, leaf);
5024 free_extent_buffer(leaf);
5027 /* if we're still in the path, make sure
5028 * we're dirty. Otherwise, one of the
5029 * push_leaf functions must have already
5030 * dirtied this buffer
5032 if (path->nodes[0] == leaf)
5033 btrfs_mark_buffer_dirty(leaf);
5034 free_extent_buffer(leaf);
5037 btrfs_mark_buffer_dirty(leaf);
5044 * search the tree again to find a leaf with lesser keys
5045 * returns 0 if it found something or 1 if there are no lesser leaves.
5046 * returns < 0 on io errors.
5048 * This may release the path, and so you may lose any locks held at the
5051 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5053 struct btrfs_key key;
5054 struct btrfs_disk_key found_key;
5057 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5059 if (key.offset > 0) {
5061 } else if (key.type > 0) {
5063 key.offset = (u64)-1;
5064 } else if (key.objectid > 0) {
5067 key.offset = (u64)-1;
5072 btrfs_release_path(path);
5073 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5076 btrfs_item_key(path->nodes[0], &found_key, 0);
5077 ret = comp_keys(&found_key, &key);
5079 * We might have had an item with the previous key in the tree right
5080 * before we released our path. And after we released our path, that
5081 * item might have been pushed to the first slot (0) of the leaf we
5082 * were holding due to a tree balance. Alternatively, an item with the
5083 * previous key can exist as the only element of a leaf (big fat item).
5084 * Therefore account for these 2 cases, so that our callers (like
5085 * btrfs_previous_item) don't miss an existing item with a key matching
5086 * the previous key we computed above.
5094 * A helper function to walk down the tree starting at min_key, and looking
5095 * for nodes or leaves that are have a minimum transaction id.
5096 * This is used by the btree defrag code, and tree logging
5098 * This does not cow, but it does stuff the starting key it finds back
5099 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5100 * key and get a writable path.
5102 * This does lock as it descends, and path->keep_locks should be set
5103 * to 1 by the caller.
5105 * This honors path->lowest_level to prevent descent past a given level
5108 * min_trans indicates the oldest transaction that you are interested
5109 * in walking through. Any nodes or leaves older than min_trans are
5110 * skipped over (without reading them).
5112 * returns zero if something useful was found, < 0 on error and 1 if there
5113 * was nothing in the tree that matched the search criteria.
5115 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5116 struct btrfs_path *path,
5119 struct extent_buffer *cur;
5120 struct btrfs_key found_key;
5126 int keep_locks = path->keep_locks;
5128 path->keep_locks = 1;
5130 cur = btrfs_read_lock_root_node(root);
5131 level = btrfs_header_level(cur);
5132 WARN_ON(path->nodes[level]);
5133 path->nodes[level] = cur;
5134 path->locks[level] = BTRFS_READ_LOCK;
5136 if (btrfs_header_generation(cur) < min_trans) {
5141 nritems = btrfs_header_nritems(cur);
5142 level = btrfs_header_level(cur);
5143 sret = bin_search(cur, min_key, level, &slot);
5145 /* at the lowest level, we're done, setup the path and exit */
5146 if (level == path->lowest_level) {
5147 if (slot >= nritems)
5150 path->slots[level] = slot;
5151 btrfs_item_key_to_cpu(cur, &found_key, slot);
5154 if (sret && slot > 0)
5157 * check this node pointer against the min_trans parameters.
5158 * If it is too old, old, skip to the next one.
5160 while (slot < nritems) {
5163 gen = btrfs_node_ptr_generation(cur, slot);
5164 if (gen < min_trans) {
5172 * we didn't find a candidate key in this node, walk forward
5173 * and find another one
5175 if (slot >= nritems) {
5176 path->slots[level] = slot;
5177 btrfs_set_path_blocking(path);
5178 sret = btrfs_find_next_key(root, path, min_key, level,
5181 btrfs_release_path(path);
5187 /* save our key for returning back */
5188 btrfs_node_key_to_cpu(cur, &found_key, slot);
5189 path->slots[level] = slot;
5190 if (level == path->lowest_level) {
5194 btrfs_set_path_blocking(path);
5195 cur = read_node_slot(root, cur, slot);
5196 BUG_ON(!cur); /* -ENOMEM */
5198 btrfs_tree_read_lock(cur);
5200 path->locks[level - 1] = BTRFS_READ_LOCK;
5201 path->nodes[level - 1] = cur;
5202 unlock_up(path, level, 1, 0, NULL);
5203 btrfs_clear_path_blocking(path, NULL, 0);
5206 path->keep_locks = keep_locks;
5208 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5209 btrfs_set_path_blocking(path);
5210 memcpy(min_key, &found_key, sizeof(found_key));
5215 static void tree_move_down(struct btrfs_root *root,
5216 struct btrfs_path *path,
5217 int *level, int root_level)
5219 BUG_ON(*level == 0);
5220 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5221 path->slots[*level]);
5222 path->slots[*level - 1] = 0;
5226 static int tree_move_next_or_upnext(struct btrfs_root *root,
5227 struct btrfs_path *path,
5228 int *level, int root_level)
5232 nritems = btrfs_header_nritems(path->nodes[*level]);
5234 path->slots[*level]++;
5236 while (path->slots[*level] >= nritems) {
5237 if (*level == root_level)
5241 path->slots[*level] = 0;
5242 free_extent_buffer(path->nodes[*level]);
5243 path->nodes[*level] = NULL;
5245 path->slots[*level]++;
5247 nritems = btrfs_header_nritems(path->nodes[*level]);
5254 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5257 static int tree_advance(struct btrfs_root *root,
5258 struct btrfs_path *path,
5259 int *level, int root_level,
5261 struct btrfs_key *key)
5265 if (*level == 0 || !allow_down) {
5266 ret = tree_move_next_or_upnext(root, path, level, root_level);
5268 tree_move_down(root, path, level, root_level);
5273 btrfs_item_key_to_cpu(path->nodes[*level], key,
5274 path->slots[*level]);
5276 btrfs_node_key_to_cpu(path->nodes[*level], key,
5277 path->slots[*level]);
5282 static int tree_compare_item(struct btrfs_root *left_root,
5283 struct btrfs_path *left_path,
5284 struct btrfs_path *right_path,
5289 unsigned long off1, off2;
5291 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5292 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5296 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5297 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5298 right_path->slots[0]);
5300 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5302 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5309 #define ADVANCE_ONLY_NEXT -1
5312 * This function compares two trees and calls the provided callback for
5313 * every changed/new/deleted item it finds.
5314 * If shared tree blocks are encountered, whole subtrees are skipped, making
5315 * the compare pretty fast on snapshotted subvolumes.
5317 * This currently works on commit roots only. As commit roots are read only,
5318 * we don't do any locking. The commit roots are protected with transactions.
5319 * Transactions are ended and rejoined when a commit is tried in between.
5321 * This function checks for modifications done to the trees while comparing.
5322 * If it detects a change, it aborts immediately.
5324 int btrfs_compare_trees(struct btrfs_root *left_root,
5325 struct btrfs_root *right_root,
5326 btrfs_changed_cb_t changed_cb, void *ctx)
5330 struct btrfs_path *left_path = NULL;
5331 struct btrfs_path *right_path = NULL;
5332 struct btrfs_key left_key;
5333 struct btrfs_key right_key;
5334 char *tmp_buf = NULL;
5335 int left_root_level;
5336 int right_root_level;
5339 int left_end_reached;
5340 int right_end_reached;
5348 left_path = btrfs_alloc_path();
5353 right_path = btrfs_alloc_path();
5359 tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
5365 left_path->search_commit_root = 1;
5366 left_path->skip_locking = 1;
5367 right_path->search_commit_root = 1;
5368 right_path->skip_locking = 1;
5371 * Strategy: Go to the first items of both trees. Then do
5373 * If both trees are at level 0
5374 * Compare keys of current items
5375 * If left < right treat left item as new, advance left tree
5377 * If left > right treat right item as deleted, advance right tree
5379 * If left == right do deep compare of items, treat as changed if
5380 * needed, advance both trees and repeat
5381 * If both trees are at the same level but not at level 0
5382 * Compare keys of current nodes/leafs
5383 * If left < right advance left tree and repeat
5384 * If left > right advance right tree and repeat
5385 * If left == right compare blockptrs of the next nodes/leafs
5386 * If they match advance both trees but stay at the same level
5388 * If they don't match advance both trees while allowing to go
5390 * If tree levels are different
5391 * Advance the tree that needs it and repeat
5393 * Advancing a tree means:
5394 * If we are at level 0, try to go to the next slot. If that's not
5395 * possible, go one level up and repeat. Stop when we found a level
5396 * where we could go to the next slot. We may at this point be on a
5399 * If we are not at level 0 and not on shared tree blocks, go one
5402 * If we are not at level 0 and on shared tree blocks, go one slot to
5403 * the right if possible or go up and right.
5406 down_read(&left_root->fs_info->commit_root_sem);
5407 left_level = btrfs_header_level(left_root->commit_root);
5408 left_root_level = left_level;
5409 left_path->nodes[left_level] = left_root->commit_root;
5410 extent_buffer_get(left_path->nodes[left_level]);
5412 right_level = btrfs_header_level(right_root->commit_root);
5413 right_root_level = right_level;
5414 right_path->nodes[right_level] = right_root->commit_root;
5415 extent_buffer_get(right_path->nodes[right_level]);
5416 up_read(&left_root->fs_info->commit_root_sem);
5418 if (left_level == 0)
5419 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5420 &left_key, left_path->slots[left_level]);
5422 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5423 &left_key, left_path->slots[left_level]);
5424 if (right_level == 0)
5425 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5426 &right_key, right_path->slots[right_level]);
5428 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5429 &right_key, right_path->slots[right_level]);
5431 left_end_reached = right_end_reached = 0;
5432 advance_left = advance_right = 0;
5435 if (advance_left && !left_end_reached) {
5436 ret = tree_advance(left_root, left_path, &left_level,
5438 advance_left != ADVANCE_ONLY_NEXT,
5441 left_end_reached = ADVANCE;
5444 if (advance_right && !right_end_reached) {
5445 ret = tree_advance(right_root, right_path, &right_level,
5447 advance_right != ADVANCE_ONLY_NEXT,
5450 right_end_reached = ADVANCE;
5454 if (left_end_reached && right_end_reached) {
5457 } else if (left_end_reached) {
5458 if (right_level == 0) {
5459 ret = changed_cb(left_root, right_root,
5460 left_path, right_path,
5462 BTRFS_COMPARE_TREE_DELETED,
5467 advance_right = ADVANCE;
5469 } else if (right_end_reached) {
5470 if (left_level == 0) {
5471 ret = changed_cb(left_root, right_root,
5472 left_path, right_path,
5474 BTRFS_COMPARE_TREE_NEW,
5479 advance_left = ADVANCE;
5483 if (left_level == 0 && right_level == 0) {
5484 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5486 ret = changed_cb(left_root, right_root,
5487 left_path, right_path,
5489 BTRFS_COMPARE_TREE_NEW,
5493 advance_left = ADVANCE;
5494 } else if (cmp > 0) {
5495 ret = changed_cb(left_root, right_root,
5496 left_path, right_path,
5498 BTRFS_COMPARE_TREE_DELETED,
5502 advance_right = ADVANCE;
5504 enum btrfs_compare_tree_result result;
5506 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5507 ret = tree_compare_item(left_root, left_path,
5508 right_path, tmp_buf);
5510 result = BTRFS_COMPARE_TREE_CHANGED;
5512 result = BTRFS_COMPARE_TREE_SAME;
5513 ret = changed_cb(left_root, right_root,
5514 left_path, right_path,
5515 &left_key, result, ctx);
5518 advance_left = ADVANCE;
5519 advance_right = ADVANCE;
5521 } else if (left_level == right_level) {
5522 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5524 advance_left = ADVANCE;
5525 } else if (cmp > 0) {
5526 advance_right = ADVANCE;
5528 left_blockptr = btrfs_node_blockptr(
5529 left_path->nodes[left_level],
5530 left_path->slots[left_level]);
5531 right_blockptr = btrfs_node_blockptr(
5532 right_path->nodes[right_level],
5533 right_path->slots[right_level]);
5534 left_gen = btrfs_node_ptr_generation(
5535 left_path->nodes[left_level],
5536 left_path->slots[left_level]);
5537 right_gen = btrfs_node_ptr_generation(
5538 right_path->nodes[right_level],
5539 right_path->slots[right_level]);
5540 if (left_blockptr == right_blockptr &&
5541 left_gen == right_gen) {
5543 * As we're on a shared block, don't
5544 * allow to go deeper.
5546 advance_left = ADVANCE_ONLY_NEXT;
5547 advance_right = ADVANCE_ONLY_NEXT;
5549 advance_left = ADVANCE;
5550 advance_right = ADVANCE;
5553 } else if (left_level < right_level) {
5554 advance_right = ADVANCE;
5556 advance_left = ADVANCE;
5561 btrfs_free_path(left_path);
5562 btrfs_free_path(right_path);
5568 * this is similar to btrfs_next_leaf, but does not try to preserve
5569 * and fixup the path. It looks for and returns the next key in the
5570 * tree based on the current path and the min_trans parameters.
5572 * 0 is returned if another key is found, < 0 if there are any errors
5573 * and 1 is returned if there are no higher keys in the tree
5575 * path->keep_locks should be set to 1 on the search made before
5576 * calling this function.
5578 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5579 struct btrfs_key *key, int level, u64 min_trans)
5582 struct extent_buffer *c;
5584 WARN_ON(!path->keep_locks);
5585 while (level < BTRFS_MAX_LEVEL) {
5586 if (!path->nodes[level])
5589 slot = path->slots[level] + 1;
5590 c = path->nodes[level];
5592 if (slot >= btrfs_header_nritems(c)) {
5595 struct btrfs_key cur_key;
5596 if (level + 1 >= BTRFS_MAX_LEVEL ||
5597 !path->nodes[level + 1])
5600 if (path->locks[level + 1]) {
5605 slot = btrfs_header_nritems(c) - 1;
5607 btrfs_item_key_to_cpu(c, &cur_key, slot);
5609 btrfs_node_key_to_cpu(c, &cur_key, slot);
5611 orig_lowest = path->lowest_level;
5612 btrfs_release_path(path);
5613 path->lowest_level = level;
5614 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5616 path->lowest_level = orig_lowest;
5620 c = path->nodes[level];
5621 slot = path->slots[level];
5628 btrfs_item_key_to_cpu(c, key, slot);
5630 u64 gen = btrfs_node_ptr_generation(c, slot);
5632 if (gen < min_trans) {
5636 btrfs_node_key_to_cpu(c, key, slot);
5644 * search the tree again to find a leaf with greater keys
5645 * returns 0 if it found something or 1 if there are no greater leaves.
5646 * returns < 0 on io errors.
5648 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5650 return btrfs_next_old_leaf(root, path, 0);
5653 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5658 struct extent_buffer *c;
5659 struct extent_buffer *next;
5660 struct btrfs_key key;
5663 int old_spinning = path->leave_spinning;
5664 int next_rw_lock = 0;
5666 nritems = btrfs_header_nritems(path->nodes[0]);
5670 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5675 btrfs_release_path(path);
5677 path->keep_locks = 1;
5678 path->leave_spinning = 1;
5681 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5683 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5684 path->keep_locks = 0;
5689 nritems = btrfs_header_nritems(path->nodes[0]);
5691 * by releasing the path above we dropped all our locks. A balance
5692 * could have added more items next to the key that used to be
5693 * at the very end of the block. So, check again here and
5694 * advance the path if there are now more items available.
5696 if (nritems > 0 && path->slots[0] < nritems - 1) {
5703 * So the above check misses one case:
5704 * - after releasing the path above, someone has removed the item that
5705 * used to be at the very end of the block, and balance between leafs
5706 * gets another one with bigger key.offset to replace it.
5708 * This one should be returned as well, or we can get leaf corruption
5709 * later(esp. in __btrfs_drop_extents()).
5711 * And a bit more explanation about this check,
5712 * with ret > 0, the key isn't found, the path points to the slot
5713 * where it should be inserted, so the path->slots[0] item must be the
5716 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5721 while (level < BTRFS_MAX_LEVEL) {
5722 if (!path->nodes[level]) {
5727 slot = path->slots[level] + 1;
5728 c = path->nodes[level];
5729 if (slot >= btrfs_header_nritems(c)) {
5731 if (level == BTRFS_MAX_LEVEL) {
5739 btrfs_tree_unlock_rw(next, next_rw_lock);
5740 free_extent_buffer(next);
5744 next_rw_lock = path->locks[level];
5745 ret = read_block_for_search(NULL, root, path, &next, level,
5751 btrfs_release_path(path);
5755 if (!path->skip_locking) {
5756 ret = btrfs_try_tree_read_lock(next);
5757 if (!ret && time_seq) {
5759 * If we don't get the lock, we may be racing
5760 * with push_leaf_left, holding that lock while
5761 * itself waiting for the leaf we've currently
5762 * locked. To solve this situation, we give up
5763 * on our lock and cycle.
5765 free_extent_buffer(next);
5766 btrfs_release_path(path);
5771 btrfs_set_path_blocking(path);
5772 btrfs_tree_read_lock(next);
5773 btrfs_clear_path_blocking(path, next,
5776 next_rw_lock = BTRFS_READ_LOCK;
5780 path->slots[level] = slot;
5783 c = path->nodes[level];
5784 if (path->locks[level])
5785 btrfs_tree_unlock_rw(c, path->locks[level]);
5787 free_extent_buffer(c);
5788 path->nodes[level] = next;
5789 path->slots[level] = 0;
5790 if (!path->skip_locking)
5791 path->locks[level] = next_rw_lock;
5795 ret = read_block_for_search(NULL, root, path, &next, level,
5801 btrfs_release_path(path);
5805 if (!path->skip_locking) {
5806 ret = btrfs_try_tree_read_lock(next);
5808 btrfs_set_path_blocking(path);
5809 btrfs_tree_read_lock(next);
5810 btrfs_clear_path_blocking(path, next,
5813 next_rw_lock = BTRFS_READ_LOCK;
5818 unlock_up(path, 0, 1, 0, NULL);
5819 path->leave_spinning = old_spinning;
5821 btrfs_set_path_blocking(path);
5827 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5828 * searching until it gets past min_objectid or finds an item of 'type'
5830 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5832 int btrfs_previous_item(struct btrfs_root *root,
5833 struct btrfs_path *path, u64 min_objectid,
5836 struct btrfs_key found_key;
5837 struct extent_buffer *leaf;
5842 if (path->slots[0] == 0) {
5843 btrfs_set_path_blocking(path);
5844 ret = btrfs_prev_leaf(root, path);
5850 leaf = path->nodes[0];
5851 nritems = btrfs_header_nritems(leaf);
5854 if (path->slots[0] == nritems)
5857 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5858 if (found_key.objectid < min_objectid)
5860 if (found_key.type == type)
5862 if (found_key.objectid == min_objectid &&
5863 found_key.type < type)
5870 * search in extent tree to find a previous Metadata/Data extent item with
5873 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5875 int btrfs_previous_extent_item(struct btrfs_root *root,
5876 struct btrfs_path *path, u64 min_objectid)
5878 struct btrfs_key found_key;
5879 struct extent_buffer *leaf;
5884 if (path->slots[0] == 0) {
5885 btrfs_set_path_blocking(path);
5886 ret = btrfs_prev_leaf(root, path);
5892 leaf = path->nodes[0];
5893 nritems = btrfs_header_nritems(leaf);
5896 if (path->slots[0] == nritems)
5899 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5900 if (found_key.objectid < min_objectid)
5902 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5903 found_key.type == BTRFS_METADATA_ITEM_KEY)
5905 if (found_key.objectid == min_objectid &&
5906 found_key.type < BTRFS_EXTENT_ITEM_KEY)