2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
42 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
45 struct btrfs_path *btrfs_alloc_path(void)
47 struct btrfs_path *path;
48 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
59 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 if (!p->nodes[i] || !p->locks[i])
62 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 if (p->locks[i] == BTRFS_READ_LOCK)
64 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
71 * reset all the locked nodes in the patch to spinning locks.
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 struct extent_buffer *held, int held_rw)
84 btrfs_set_lock_blocking_rw(held, held_rw);
85 if (held_rw == BTRFS_WRITE_LOCK)
86 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
87 else if (held_rw == BTRFS_READ_LOCK)
88 held_rw = BTRFS_READ_LOCK_BLOCKING;
90 btrfs_set_path_blocking(p);
92 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
93 if (p->nodes[i] && p->locks[i]) {
94 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
95 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
96 p->locks[i] = BTRFS_WRITE_LOCK;
97 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
98 p->locks[i] = BTRFS_READ_LOCK;
103 btrfs_clear_lock_blocking_rw(held, held_rw);
106 /* this also releases the path */
107 void btrfs_free_path(struct btrfs_path *p)
111 btrfs_release_path(p);
112 kmem_cache_free(btrfs_path_cachep, p);
116 * path release drops references on the extent buffers in the path
117 * and it drops any locks held by this path
119 * It is safe to call this on paths that no locks or extent buffers held.
121 noinline void btrfs_release_path(struct btrfs_path *p)
125 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
130 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
133 free_extent_buffer(p->nodes[i]);
139 * safely gets a reference on the root node of a tree. A lock
140 * is not taken, so a concurrent writer may put a different node
141 * at the root of the tree. See btrfs_lock_root_node for the
144 * The extent buffer returned by this has a reference taken, so
145 * it won't disappear. It may stop being the root of the tree
146 * at any time because there are no locks held.
148 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
150 struct extent_buffer *eb;
154 eb = rcu_dereference(root->node);
157 * RCU really hurts here, we could free up the root node because
158 * it was cow'ed but we may not get the new root node yet so do
159 * the inc_not_zero dance and if it doesn't work then
160 * synchronize_rcu and try again.
162 if (atomic_inc_not_zero(&eb->refs)) {
172 /* loop around taking references on and locking the root node of the
173 * tree until you end up with a lock on the root. A locked buffer
174 * is returned, with a reference held.
176 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
178 struct extent_buffer *eb;
181 eb = btrfs_root_node(root);
183 if (eb == root->node)
185 btrfs_tree_unlock(eb);
186 free_extent_buffer(eb);
191 /* loop around taking references on and locking the root node of the
192 * tree until you end up with a lock on the root. A locked buffer
193 * is returned, with a reference held.
195 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
197 struct extent_buffer *eb;
200 eb = btrfs_root_node(root);
201 btrfs_tree_read_lock(eb);
202 if (eb == root->node)
204 btrfs_tree_read_unlock(eb);
205 free_extent_buffer(eb);
210 /* cowonly root (everything not a reference counted cow subvolume), just get
211 * put onto a simple dirty list. transaction.c walks this to make sure they
212 * get properly updated on disk.
214 static void add_root_to_dirty_list(struct btrfs_root *root)
216 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
217 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
220 spin_lock(&root->fs_info->trans_lock);
221 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
222 /* Want the extent tree to be the last on the list */
223 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
224 list_move_tail(&root->dirty_list,
225 &root->fs_info->dirty_cowonly_roots);
227 list_move(&root->dirty_list,
228 &root->fs_info->dirty_cowonly_roots);
230 spin_unlock(&root->fs_info->trans_lock);
234 * used by snapshot creation to make a copy of a root for a tree with
235 * a given objectid. The buffer with the new root node is returned in
236 * cow_ret, and this func returns zero on success or a negative error code.
238 int btrfs_copy_root(struct btrfs_trans_handle *trans,
239 struct btrfs_root *root,
240 struct extent_buffer *buf,
241 struct extent_buffer **cow_ret, u64 new_root_objectid)
243 struct extent_buffer *cow;
246 struct btrfs_disk_key disk_key;
248 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
249 trans->transid != root->fs_info->running_transaction->transid);
250 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
251 trans->transid != root->last_trans);
253 level = btrfs_header_level(buf);
255 btrfs_item_key(buf, &disk_key, 0);
257 btrfs_node_key(buf, &disk_key, 0);
259 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
260 &disk_key, level, buf->start, 0);
264 copy_extent_buffer(cow, buf, 0, 0, cow->len);
265 btrfs_set_header_bytenr(cow, cow->start);
266 btrfs_set_header_generation(cow, trans->transid);
267 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
268 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
269 BTRFS_HEADER_FLAG_RELOC);
270 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
271 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
273 btrfs_set_header_owner(cow, new_root_objectid);
275 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
278 WARN_ON(btrfs_header_generation(buf) > trans->transid);
279 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
280 ret = btrfs_inc_ref(trans, root, cow, 1);
282 ret = btrfs_inc_ref(trans, root, cow, 0);
287 btrfs_mark_buffer_dirty(cow);
296 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
297 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
299 MOD_LOG_ROOT_REPLACE,
302 struct tree_mod_move {
307 struct tree_mod_root {
312 struct tree_mod_elem {
314 u64 index; /* shifted logical */
318 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
321 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
324 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
325 struct btrfs_disk_key key;
328 /* this is used for op == MOD_LOG_MOVE_KEYS */
329 struct tree_mod_move move;
331 /* this is used for op == MOD_LOG_ROOT_REPLACE */
332 struct tree_mod_root old_root;
335 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
337 read_lock(&fs_info->tree_mod_log_lock);
340 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
342 read_unlock(&fs_info->tree_mod_log_lock);
345 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
347 write_lock(&fs_info->tree_mod_log_lock);
350 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
352 write_unlock(&fs_info->tree_mod_log_lock);
356 * Pull a new tree mod seq number for our operation.
358 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
360 return atomic64_inc_return(&fs_info->tree_mod_seq);
364 * This adds a new blocker to the tree mod log's blocker list if the @elem
365 * passed does not already have a sequence number set. So when a caller expects
366 * to record tree modifications, it should ensure to set elem->seq to zero
367 * before calling btrfs_get_tree_mod_seq.
368 * Returns a fresh, unused tree log modification sequence number, even if no new
371 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
372 struct seq_list *elem)
374 tree_mod_log_write_lock(fs_info);
375 spin_lock(&fs_info->tree_mod_seq_lock);
377 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
378 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
380 spin_unlock(&fs_info->tree_mod_seq_lock);
381 tree_mod_log_write_unlock(fs_info);
386 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
387 struct seq_list *elem)
389 struct rb_root *tm_root;
390 struct rb_node *node;
391 struct rb_node *next;
392 struct seq_list *cur_elem;
393 struct tree_mod_elem *tm;
394 u64 min_seq = (u64)-1;
395 u64 seq_putting = elem->seq;
400 spin_lock(&fs_info->tree_mod_seq_lock);
401 list_del(&elem->list);
404 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
405 if (cur_elem->seq < min_seq) {
406 if (seq_putting > cur_elem->seq) {
408 * blocker with lower sequence number exists, we
409 * cannot remove anything from the log
411 spin_unlock(&fs_info->tree_mod_seq_lock);
414 min_seq = cur_elem->seq;
417 spin_unlock(&fs_info->tree_mod_seq_lock);
420 * anything that's lower than the lowest existing (read: blocked)
421 * sequence number can be removed from the tree.
423 tree_mod_log_write_lock(fs_info);
424 tm_root = &fs_info->tree_mod_log;
425 for (node = rb_first(tm_root); node; node = next) {
426 next = rb_next(node);
427 tm = container_of(node, struct tree_mod_elem, node);
428 if (tm->seq > min_seq)
430 rb_erase(node, tm_root);
433 tree_mod_log_write_unlock(fs_info);
437 * key order of the log:
440 * the index is the shifted logical of the *new* root node for root replace
441 * operations, or the shifted logical of the affected block for all other
444 * Note: must be called with write lock (tree_mod_log_write_lock).
447 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
449 struct rb_root *tm_root;
450 struct rb_node **new;
451 struct rb_node *parent = NULL;
452 struct tree_mod_elem *cur;
456 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
458 tm_root = &fs_info->tree_mod_log;
459 new = &tm_root->rb_node;
461 cur = container_of(*new, struct tree_mod_elem, node);
463 if (cur->index < tm->index)
464 new = &((*new)->rb_left);
465 else if (cur->index > tm->index)
466 new = &((*new)->rb_right);
467 else if (cur->seq < tm->seq)
468 new = &((*new)->rb_left);
469 else if (cur->seq > tm->seq)
470 new = &((*new)->rb_right);
475 rb_link_node(&tm->node, parent, new);
476 rb_insert_color(&tm->node, tm_root);
481 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
482 * returns zero with the tree_mod_log_lock acquired. The caller must hold
483 * this until all tree mod log insertions are recorded in the rb tree and then
484 * call tree_mod_log_write_unlock() to release.
486 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
487 struct extent_buffer *eb) {
489 if (list_empty(&(fs_info)->tree_mod_seq_list))
491 if (eb && btrfs_header_level(eb) == 0)
494 tree_mod_log_write_lock(fs_info);
495 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
496 tree_mod_log_write_unlock(fs_info);
503 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
504 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
505 struct extent_buffer *eb)
508 if (list_empty(&(fs_info)->tree_mod_seq_list))
510 if (eb && btrfs_header_level(eb) == 0)
516 static struct tree_mod_elem *
517 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
518 enum mod_log_op op, gfp_t flags)
520 struct tree_mod_elem *tm;
522 tm = kzalloc(sizeof(*tm), flags);
526 tm->index = eb->start >> PAGE_CACHE_SHIFT;
527 if (op != MOD_LOG_KEY_ADD) {
528 btrfs_node_key(eb, &tm->key, slot);
529 tm->blockptr = btrfs_node_blockptr(eb, slot);
533 tm->generation = btrfs_node_ptr_generation(eb, slot);
534 RB_CLEAR_NODE(&tm->node);
540 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
541 struct extent_buffer *eb, int slot,
542 enum mod_log_op op, gfp_t flags)
544 struct tree_mod_elem *tm;
547 if (!tree_mod_need_log(fs_info, eb))
550 tm = alloc_tree_mod_elem(eb, slot, op, flags);
554 if (tree_mod_dont_log(fs_info, eb)) {
559 ret = __tree_mod_log_insert(fs_info, tm);
560 tree_mod_log_write_unlock(fs_info);
568 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
569 struct extent_buffer *eb, int dst_slot, int src_slot,
570 int nr_items, gfp_t flags)
572 struct tree_mod_elem *tm = NULL;
573 struct tree_mod_elem **tm_list = NULL;
578 if (!tree_mod_need_log(fs_info, eb))
581 tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
585 tm = kzalloc(sizeof(*tm), flags);
591 tm->index = eb->start >> PAGE_CACHE_SHIFT;
593 tm->move.dst_slot = dst_slot;
594 tm->move.nr_items = nr_items;
595 tm->op = MOD_LOG_MOVE_KEYS;
597 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
598 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
599 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
606 if (tree_mod_dont_log(fs_info, eb))
611 * When we override something during the move, we log these removals.
612 * This can only happen when we move towards the beginning of the
613 * buffer, i.e. dst_slot < src_slot.
615 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
616 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
621 ret = __tree_mod_log_insert(fs_info, tm);
624 tree_mod_log_write_unlock(fs_info);
629 for (i = 0; i < nr_items; i++) {
630 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
631 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
635 tree_mod_log_write_unlock(fs_info);
643 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
644 struct tree_mod_elem **tm_list,
650 for (i = nritems - 1; i >= 0; i--) {
651 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
653 for (j = nritems - 1; j > i; j--)
654 rb_erase(&tm_list[j]->node,
655 &fs_info->tree_mod_log);
664 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
665 struct extent_buffer *old_root,
666 struct extent_buffer *new_root, gfp_t flags,
669 struct tree_mod_elem *tm = NULL;
670 struct tree_mod_elem **tm_list = NULL;
675 if (!tree_mod_need_log(fs_info, NULL))
678 if (log_removal && btrfs_header_level(old_root) > 0) {
679 nritems = btrfs_header_nritems(old_root);
680 tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
686 for (i = 0; i < nritems; i++) {
687 tm_list[i] = alloc_tree_mod_elem(old_root, i,
688 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
696 tm = kzalloc(sizeof(*tm), flags);
702 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
703 tm->old_root.logical = old_root->start;
704 tm->old_root.level = btrfs_header_level(old_root);
705 tm->generation = btrfs_header_generation(old_root);
706 tm->op = MOD_LOG_ROOT_REPLACE;
708 if (tree_mod_dont_log(fs_info, NULL))
712 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
714 ret = __tree_mod_log_insert(fs_info, tm);
716 tree_mod_log_write_unlock(fs_info);
725 for (i = 0; i < nritems; i++)
734 static struct tree_mod_elem *
735 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
738 struct rb_root *tm_root;
739 struct rb_node *node;
740 struct tree_mod_elem *cur = NULL;
741 struct tree_mod_elem *found = NULL;
742 u64 index = start >> PAGE_CACHE_SHIFT;
744 tree_mod_log_read_lock(fs_info);
745 tm_root = &fs_info->tree_mod_log;
746 node = tm_root->rb_node;
748 cur = container_of(node, struct tree_mod_elem, node);
749 if (cur->index < index) {
750 node = node->rb_left;
751 } else if (cur->index > index) {
752 node = node->rb_right;
753 } else if (cur->seq < min_seq) {
754 node = node->rb_left;
755 } else if (!smallest) {
756 /* we want the node with the highest seq */
758 BUG_ON(found->seq > cur->seq);
760 node = node->rb_left;
761 } else if (cur->seq > min_seq) {
762 /* we want the node with the smallest seq */
764 BUG_ON(found->seq < cur->seq);
766 node = node->rb_right;
772 tree_mod_log_read_unlock(fs_info);
778 * this returns the element from the log with the smallest time sequence
779 * value that's in the log (the oldest log item). any element with a time
780 * sequence lower than min_seq will be ignored.
782 static struct tree_mod_elem *
783 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
786 return __tree_mod_log_search(fs_info, start, min_seq, 1);
790 * this returns the element from the log with the largest time sequence
791 * value that's in the log (the most recent log item). any element with
792 * a time sequence lower than min_seq will be ignored.
794 static struct tree_mod_elem *
795 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
797 return __tree_mod_log_search(fs_info, start, min_seq, 0);
801 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
802 struct extent_buffer *src, unsigned long dst_offset,
803 unsigned long src_offset, int nr_items)
806 struct tree_mod_elem **tm_list = NULL;
807 struct tree_mod_elem **tm_list_add, **tm_list_rem;
811 if (!tree_mod_need_log(fs_info, NULL))
814 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
817 tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
822 tm_list_add = tm_list;
823 tm_list_rem = tm_list + nr_items;
824 for (i = 0; i < nr_items; i++) {
825 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
826 MOD_LOG_KEY_REMOVE, GFP_NOFS);
827 if (!tm_list_rem[i]) {
832 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
833 MOD_LOG_KEY_ADD, GFP_NOFS);
834 if (!tm_list_add[i]) {
840 if (tree_mod_dont_log(fs_info, NULL))
844 for (i = 0; i < nr_items; i++) {
845 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
848 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
853 tree_mod_log_write_unlock(fs_info);
859 for (i = 0; i < nr_items * 2; i++) {
860 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
861 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
865 tree_mod_log_write_unlock(fs_info);
872 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
873 int dst_offset, int src_offset, int nr_items)
876 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
882 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
883 struct extent_buffer *eb, int slot, int atomic)
887 ret = tree_mod_log_insert_key(fs_info, eb, slot,
889 atomic ? GFP_ATOMIC : GFP_NOFS);
894 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
896 struct tree_mod_elem **tm_list = NULL;
901 if (btrfs_header_level(eb) == 0)
904 if (!tree_mod_need_log(fs_info, NULL))
907 nritems = btrfs_header_nritems(eb);
908 tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
913 for (i = 0; i < nritems; i++) {
914 tm_list[i] = alloc_tree_mod_elem(eb, i,
915 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
922 if (tree_mod_dont_log(fs_info, eb))
925 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
926 tree_mod_log_write_unlock(fs_info);
934 for (i = 0; i < nritems; i++)
942 tree_mod_log_set_root_pointer(struct btrfs_root *root,
943 struct extent_buffer *new_root_node,
947 ret = tree_mod_log_insert_root(root->fs_info, root->node,
948 new_root_node, GFP_NOFS, log_removal);
953 * check if the tree block can be shared by multiple trees
955 int btrfs_block_can_be_shared(struct btrfs_root *root,
956 struct extent_buffer *buf)
959 * Tree blocks not in refernece counted trees and tree roots
960 * are never shared. If a block was allocated after the last
961 * snapshot and the block was not allocated by tree relocation,
962 * we know the block is not shared.
964 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
965 buf != root->node && buf != root->commit_root &&
966 (btrfs_header_generation(buf) <=
967 btrfs_root_last_snapshot(&root->root_item) ||
968 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
970 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
971 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
972 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
978 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
979 struct btrfs_root *root,
980 struct extent_buffer *buf,
981 struct extent_buffer *cow,
991 * Backrefs update rules:
993 * Always use full backrefs for extent pointers in tree block
994 * allocated by tree relocation.
996 * If a shared tree block is no longer referenced by its owner
997 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
998 * use full backrefs for extent pointers in tree block.
1000 * If a tree block is been relocating
1001 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1002 * use full backrefs for extent pointers in tree block.
1003 * The reason for this is some operations (such as drop tree)
1004 * are only allowed for blocks use full backrefs.
1007 if (btrfs_block_can_be_shared(root, buf)) {
1008 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1009 btrfs_header_level(buf), 1,
1015 btrfs_std_error(root->fs_info, ret);
1020 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1021 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1022 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1027 owner = btrfs_header_owner(buf);
1028 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1029 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1032 if ((owner == root->root_key.objectid ||
1033 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1034 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1035 ret = btrfs_inc_ref(trans, root, buf, 1);
1036 BUG_ON(ret); /* -ENOMEM */
1038 if (root->root_key.objectid ==
1039 BTRFS_TREE_RELOC_OBJECTID) {
1040 ret = btrfs_dec_ref(trans, root, buf, 0);
1041 BUG_ON(ret); /* -ENOMEM */
1042 ret = btrfs_inc_ref(trans, root, cow, 1);
1043 BUG_ON(ret); /* -ENOMEM */
1045 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1048 if (root->root_key.objectid ==
1049 BTRFS_TREE_RELOC_OBJECTID)
1050 ret = btrfs_inc_ref(trans, root, cow, 1);
1052 ret = btrfs_inc_ref(trans, root, cow, 0);
1053 BUG_ON(ret); /* -ENOMEM */
1055 if (new_flags != 0) {
1056 int level = btrfs_header_level(buf);
1058 ret = btrfs_set_disk_extent_flags(trans, root,
1061 new_flags, level, 0);
1066 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1067 if (root->root_key.objectid ==
1068 BTRFS_TREE_RELOC_OBJECTID)
1069 ret = btrfs_inc_ref(trans, root, cow, 1);
1071 ret = btrfs_inc_ref(trans, root, cow, 0);
1072 BUG_ON(ret); /* -ENOMEM */
1073 ret = btrfs_dec_ref(trans, root, buf, 1);
1074 BUG_ON(ret); /* -ENOMEM */
1076 clean_tree_block(trans, root, buf);
1083 * does the dirty work in cow of a single block. The parent block (if
1084 * supplied) is updated to point to the new cow copy. The new buffer is marked
1085 * dirty and returned locked. If you modify the block it needs to be marked
1088 * search_start -- an allocation hint for the new block
1090 * empty_size -- a hint that you plan on doing more cow. This is the size in
1091 * bytes the allocator should try to find free next to the block it returns.
1092 * This is just a hint and may be ignored by the allocator.
1094 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1095 struct btrfs_root *root,
1096 struct extent_buffer *buf,
1097 struct extent_buffer *parent, int parent_slot,
1098 struct extent_buffer **cow_ret,
1099 u64 search_start, u64 empty_size)
1101 struct btrfs_disk_key disk_key;
1102 struct extent_buffer *cow;
1105 int unlock_orig = 0;
1108 if (*cow_ret == buf)
1111 btrfs_assert_tree_locked(buf);
1113 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1114 trans->transid != root->fs_info->running_transaction->transid);
1115 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1116 trans->transid != root->last_trans);
1118 level = btrfs_header_level(buf);
1121 btrfs_item_key(buf, &disk_key, 0);
1123 btrfs_node_key(buf, &disk_key, 0);
1125 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1127 parent_start = parent->start;
1133 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1134 root->root_key.objectid, &disk_key, level,
1135 search_start, empty_size);
1137 return PTR_ERR(cow);
1139 /* cow is set to blocking by btrfs_init_new_buffer */
1141 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1142 btrfs_set_header_bytenr(cow, cow->start);
1143 btrfs_set_header_generation(cow, trans->transid);
1144 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1145 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1146 BTRFS_HEADER_FLAG_RELOC);
1147 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1148 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1150 btrfs_set_header_owner(cow, root->root_key.objectid);
1152 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1155 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1157 btrfs_abort_transaction(trans, root, ret);
1161 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1162 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1167 if (buf == root->node) {
1168 WARN_ON(parent && parent != buf);
1169 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1170 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1171 parent_start = buf->start;
1175 extent_buffer_get(cow);
1176 tree_mod_log_set_root_pointer(root, cow, 1);
1177 rcu_assign_pointer(root->node, cow);
1179 btrfs_free_tree_block(trans, root, buf, parent_start,
1181 free_extent_buffer(buf);
1182 add_root_to_dirty_list(root);
1184 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1185 parent_start = parent->start;
1189 WARN_ON(trans->transid != btrfs_header_generation(parent));
1190 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1191 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1192 btrfs_set_node_blockptr(parent, parent_slot,
1194 btrfs_set_node_ptr_generation(parent, parent_slot,
1196 btrfs_mark_buffer_dirty(parent);
1198 ret = tree_mod_log_free_eb(root->fs_info, buf);
1200 btrfs_abort_transaction(trans, root, ret);
1204 btrfs_free_tree_block(trans, root, buf, parent_start,
1208 btrfs_tree_unlock(buf);
1209 free_extent_buffer_stale(buf);
1210 btrfs_mark_buffer_dirty(cow);
1216 * returns the logical address of the oldest predecessor of the given root.
1217 * entries older than time_seq are ignored.
1219 static struct tree_mod_elem *
1220 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1221 struct extent_buffer *eb_root, u64 time_seq)
1223 struct tree_mod_elem *tm;
1224 struct tree_mod_elem *found = NULL;
1225 u64 root_logical = eb_root->start;
1232 * the very last operation that's logged for a root is the replacement
1233 * operation (if it is replaced at all). this has the index of the *new*
1234 * root, making it the very first operation that's logged for this root.
1237 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1242 * if there are no tree operation for the oldest root, we simply
1243 * return it. this should only happen if that (old) root is at
1250 * if there's an operation that's not a root replacement, we
1251 * found the oldest version of our root. normally, we'll find a
1252 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1254 if (tm->op != MOD_LOG_ROOT_REPLACE)
1258 root_logical = tm->old_root.logical;
1262 /* if there's no old root to return, return what we found instead */
1270 * tm is a pointer to the first operation to rewind within eb. then, all
1271 * previous operations will be rewinded (until we reach something older than
1275 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1276 u64 time_seq, struct tree_mod_elem *first_tm)
1279 struct rb_node *next;
1280 struct tree_mod_elem *tm = first_tm;
1281 unsigned long o_dst;
1282 unsigned long o_src;
1283 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1285 n = btrfs_header_nritems(eb);
1286 tree_mod_log_read_lock(fs_info);
1287 while (tm && tm->seq >= time_seq) {
1289 * all the operations are recorded with the operator used for
1290 * the modification. as we're going backwards, we do the
1291 * opposite of each operation here.
1294 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1295 BUG_ON(tm->slot < n);
1297 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1298 case MOD_LOG_KEY_REMOVE:
1299 btrfs_set_node_key(eb, &tm->key, tm->slot);
1300 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1301 btrfs_set_node_ptr_generation(eb, tm->slot,
1305 case MOD_LOG_KEY_REPLACE:
1306 BUG_ON(tm->slot >= n);
1307 btrfs_set_node_key(eb, &tm->key, tm->slot);
1308 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1309 btrfs_set_node_ptr_generation(eb, tm->slot,
1312 case MOD_LOG_KEY_ADD:
1313 /* if a move operation is needed it's in the log */
1316 case MOD_LOG_MOVE_KEYS:
1317 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1318 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1319 memmove_extent_buffer(eb, o_dst, o_src,
1320 tm->move.nr_items * p_size);
1322 case MOD_LOG_ROOT_REPLACE:
1324 * this operation is special. for roots, this must be
1325 * handled explicitly before rewinding.
1326 * for non-roots, this operation may exist if the node
1327 * was a root: root A -> child B; then A gets empty and
1328 * B is promoted to the new root. in the mod log, we'll
1329 * have a root-replace operation for B, a tree block
1330 * that is no root. we simply ignore that operation.
1334 next = rb_next(&tm->node);
1337 tm = container_of(next, struct tree_mod_elem, node);
1338 if (tm->index != first_tm->index)
1341 tree_mod_log_read_unlock(fs_info);
1342 btrfs_set_header_nritems(eb, n);
1346 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1347 * is returned. If rewind operations happen, a fresh buffer is returned. The
1348 * returned buffer is always read-locked. If the returned buffer is not the
1349 * input buffer, the lock on the input buffer is released and the input buffer
1350 * is freed (its refcount is decremented).
1352 static struct extent_buffer *
1353 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1354 struct extent_buffer *eb, u64 time_seq)
1356 struct extent_buffer *eb_rewin;
1357 struct tree_mod_elem *tm;
1362 if (btrfs_header_level(eb) == 0)
1365 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1369 btrfs_set_path_blocking(path);
1370 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1372 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1373 BUG_ON(tm->slot != 0);
1374 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1375 fs_info->tree_root->nodesize);
1377 btrfs_tree_read_unlock_blocking(eb);
1378 free_extent_buffer(eb);
1381 btrfs_set_header_bytenr(eb_rewin, eb->start);
1382 btrfs_set_header_backref_rev(eb_rewin,
1383 btrfs_header_backref_rev(eb));
1384 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1385 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1387 eb_rewin = btrfs_clone_extent_buffer(eb);
1389 btrfs_tree_read_unlock_blocking(eb);
1390 free_extent_buffer(eb);
1395 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1396 btrfs_tree_read_unlock_blocking(eb);
1397 free_extent_buffer(eb);
1399 extent_buffer_get(eb_rewin);
1400 btrfs_tree_read_lock(eb_rewin);
1401 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1402 WARN_ON(btrfs_header_nritems(eb_rewin) >
1403 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1409 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1410 * value. If there are no changes, the current root->root_node is returned. If
1411 * anything changed in between, there's a fresh buffer allocated on which the
1412 * rewind operations are done. In any case, the returned buffer is read locked.
1413 * Returns NULL on error (with no locks held).
1415 static inline struct extent_buffer *
1416 get_old_root(struct btrfs_root *root, u64 time_seq)
1418 struct tree_mod_elem *tm;
1419 struct extent_buffer *eb = NULL;
1420 struct extent_buffer *eb_root;
1421 struct extent_buffer *old;
1422 struct tree_mod_root *old_root = NULL;
1423 u64 old_generation = 0;
1426 eb_root = btrfs_read_lock_root_node(root);
1427 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1431 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1432 old_root = &tm->old_root;
1433 old_generation = tm->generation;
1434 logical = old_root->logical;
1436 logical = eb_root->start;
1439 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1440 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1441 btrfs_tree_read_unlock(eb_root);
1442 free_extent_buffer(eb_root);
1443 old = read_tree_block(root, logical, 0);
1444 if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
1445 free_extent_buffer(old);
1446 btrfs_warn(root->fs_info,
1447 "failed to read tree block %llu from get_old_root", logical);
1449 eb = btrfs_clone_extent_buffer(old);
1450 free_extent_buffer(old);
1452 } else if (old_root) {
1453 btrfs_tree_read_unlock(eb_root);
1454 free_extent_buffer(eb_root);
1455 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1457 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1458 eb = btrfs_clone_extent_buffer(eb_root);
1459 btrfs_tree_read_unlock_blocking(eb_root);
1460 free_extent_buffer(eb_root);
1465 extent_buffer_get(eb);
1466 btrfs_tree_read_lock(eb);
1468 btrfs_set_header_bytenr(eb, eb->start);
1469 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1470 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1471 btrfs_set_header_level(eb, old_root->level);
1472 btrfs_set_header_generation(eb, old_generation);
1475 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1477 WARN_ON(btrfs_header_level(eb) != 0);
1478 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1483 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1485 struct tree_mod_elem *tm;
1487 struct extent_buffer *eb_root = btrfs_root_node(root);
1489 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1490 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1491 level = tm->old_root.level;
1493 level = btrfs_header_level(eb_root);
1495 free_extent_buffer(eb_root);
1500 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1501 struct btrfs_root *root,
1502 struct extent_buffer *buf)
1504 if (btrfs_test_is_dummy_root(root))
1507 /* ensure we can see the force_cow */
1511 * We do not need to cow a block if
1512 * 1) this block is not created or changed in this transaction;
1513 * 2) this block does not belong to TREE_RELOC tree;
1514 * 3) the root is not forced COW.
1516 * What is forced COW:
1517 * when we create snapshot during commiting the transaction,
1518 * after we've finished coping src root, we must COW the shared
1519 * block to ensure the metadata consistency.
1521 if (btrfs_header_generation(buf) == trans->transid &&
1522 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1523 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1524 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1525 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1531 * cows a single block, see __btrfs_cow_block for the real work.
1532 * This version of it has extra checks so that a block isn't cow'd more than
1533 * once per transaction, as long as it hasn't been written yet
1535 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1536 struct btrfs_root *root, struct extent_buffer *buf,
1537 struct extent_buffer *parent, int parent_slot,
1538 struct extent_buffer **cow_ret)
1543 if (trans->transaction != root->fs_info->running_transaction)
1544 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1546 root->fs_info->running_transaction->transid);
1548 if (trans->transid != root->fs_info->generation)
1549 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1550 trans->transid, root->fs_info->generation);
1552 if (!should_cow_block(trans, root, buf)) {
1557 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1560 btrfs_set_lock_blocking(parent);
1561 btrfs_set_lock_blocking(buf);
1563 ret = __btrfs_cow_block(trans, root, buf, parent,
1564 parent_slot, cow_ret, search_start, 0);
1566 trace_btrfs_cow_block(root, buf, *cow_ret);
1572 * helper function for defrag to decide if two blocks pointed to by a
1573 * node are actually close by
1575 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1577 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1579 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1585 * compare two keys in a memcmp fashion
1587 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1589 struct btrfs_key k1;
1591 btrfs_disk_key_to_cpu(&k1, disk);
1593 return btrfs_comp_cpu_keys(&k1, k2);
1597 * same as comp_keys only with two btrfs_key's
1599 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1601 if (k1->objectid > k2->objectid)
1603 if (k1->objectid < k2->objectid)
1605 if (k1->type > k2->type)
1607 if (k1->type < k2->type)
1609 if (k1->offset > k2->offset)
1611 if (k1->offset < k2->offset)
1617 * this is used by the defrag code to go through all the
1618 * leaves pointed to by a node and reallocate them so that
1619 * disk order is close to key order
1621 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1622 struct btrfs_root *root, struct extent_buffer *parent,
1623 int start_slot, u64 *last_ret,
1624 struct btrfs_key *progress)
1626 struct extent_buffer *cur;
1629 u64 search_start = *last_ret;
1639 int progress_passed = 0;
1640 struct btrfs_disk_key disk_key;
1642 parent_level = btrfs_header_level(parent);
1644 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1645 WARN_ON(trans->transid != root->fs_info->generation);
1647 parent_nritems = btrfs_header_nritems(parent);
1648 blocksize = root->nodesize;
1649 end_slot = parent_nritems;
1651 if (parent_nritems == 1)
1654 btrfs_set_lock_blocking(parent);
1656 for (i = start_slot; i < end_slot; i++) {
1659 btrfs_node_key(parent, &disk_key, i);
1660 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1663 progress_passed = 1;
1664 blocknr = btrfs_node_blockptr(parent, i);
1665 gen = btrfs_node_ptr_generation(parent, i);
1666 if (last_block == 0)
1667 last_block = blocknr;
1670 other = btrfs_node_blockptr(parent, i - 1);
1671 close = close_blocks(blocknr, other, blocksize);
1673 if (!close && i < end_slot - 2) {
1674 other = btrfs_node_blockptr(parent, i + 1);
1675 close = close_blocks(blocknr, other, blocksize);
1678 last_block = blocknr;
1682 cur = btrfs_find_tree_block(root, blocknr);
1684 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1687 if (!cur || !uptodate) {
1689 cur = read_tree_block(root, blocknr, gen);
1690 if (!cur || !extent_buffer_uptodate(cur)) {
1691 free_extent_buffer(cur);
1694 } else if (!uptodate) {
1695 err = btrfs_read_buffer(cur, gen);
1697 free_extent_buffer(cur);
1702 if (search_start == 0)
1703 search_start = last_block;
1705 btrfs_tree_lock(cur);
1706 btrfs_set_lock_blocking(cur);
1707 err = __btrfs_cow_block(trans, root, cur, parent, i,
1710 (end_slot - i) * blocksize));
1712 btrfs_tree_unlock(cur);
1713 free_extent_buffer(cur);
1716 search_start = cur->start;
1717 last_block = cur->start;
1718 *last_ret = search_start;
1719 btrfs_tree_unlock(cur);
1720 free_extent_buffer(cur);
1726 * The leaf data grows from end-to-front in the node.
1727 * this returns the address of the start of the last item,
1728 * which is the stop of the leaf data stack
1730 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1731 struct extent_buffer *leaf)
1733 u32 nr = btrfs_header_nritems(leaf);
1735 return BTRFS_LEAF_DATA_SIZE(root);
1736 return btrfs_item_offset_nr(leaf, nr - 1);
1741 * search for key in the extent_buffer. The items start at offset p,
1742 * and they are item_size apart. There are 'max' items in p.
1744 * the slot in the array is returned via slot, and it points to
1745 * the place where you would insert key if it is not found in
1748 * slot may point to max if the key is bigger than all of the keys
1750 static noinline int generic_bin_search(struct extent_buffer *eb,
1752 int item_size, struct btrfs_key *key,
1759 struct btrfs_disk_key *tmp = NULL;
1760 struct btrfs_disk_key unaligned;
1761 unsigned long offset;
1763 unsigned long map_start = 0;
1764 unsigned long map_len = 0;
1767 while (low < high) {
1768 mid = (low + high) / 2;
1769 offset = p + mid * item_size;
1771 if (!kaddr || offset < map_start ||
1772 (offset + sizeof(struct btrfs_disk_key)) >
1773 map_start + map_len) {
1775 err = map_private_extent_buffer(eb, offset,
1776 sizeof(struct btrfs_disk_key),
1777 &kaddr, &map_start, &map_len);
1780 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1783 read_extent_buffer(eb, &unaligned,
1784 offset, sizeof(unaligned));
1789 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1792 ret = comp_keys(tmp, key);
1808 * simple bin_search frontend that does the right thing for
1811 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1812 int level, int *slot)
1815 return generic_bin_search(eb,
1816 offsetof(struct btrfs_leaf, items),
1817 sizeof(struct btrfs_item),
1818 key, btrfs_header_nritems(eb),
1821 return generic_bin_search(eb,
1822 offsetof(struct btrfs_node, ptrs),
1823 sizeof(struct btrfs_key_ptr),
1824 key, btrfs_header_nritems(eb),
1828 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1829 int level, int *slot)
1831 return bin_search(eb, key, level, slot);
1834 static void root_add_used(struct btrfs_root *root, u32 size)
1836 spin_lock(&root->accounting_lock);
1837 btrfs_set_root_used(&root->root_item,
1838 btrfs_root_used(&root->root_item) + size);
1839 spin_unlock(&root->accounting_lock);
1842 static void root_sub_used(struct btrfs_root *root, u32 size)
1844 spin_lock(&root->accounting_lock);
1845 btrfs_set_root_used(&root->root_item,
1846 btrfs_root_used(&root->root_item) - size);
1847 spin_unlock(&root->accounting_lock);
1850 /* given a node and slot number, this reads the blocks it points to. The
1851 * extent buffer is returned with a reference taken (but unlocked).
1852 * NULL is returned on error.
1854 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1855 struct extent_buffer *parent, int slot)
1857 int level = btrfs_header_level(parent);
1858 struct extent_buffer *eb;
1862 if (slot >= btrfs_header_nritems(parent))
1867 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1868 btrfs_node_ptr_generation(parent, slot));
1869 if (eb && !extent_buffer_uptodate(eb)) {
1870 free_extent_buffer(eb);
1878 * node level balancing, used to make sure nodes are in proper order for
1879 * item deletion. We balance from the top down, so we have to make sure
1880 * that a deletion won't leave an node completely empty later on.
1882 static noinline int balance_level(struct btrfs_trans_handle *trans,
1883 struct btrfs_root *root,
1884 struct btrfs_path *path, int level)
1886 struct extent_buffer *right = NULL;
1887 struct extent_buffer *mid;
1888 struct extent_buffer *left = NULL;
1889 struct extent_buffer *parent = NULL;
1893 int orig_slot = path->slots[level];
1899 mid = path->nodes[level];
1901 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1902 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1903 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1905 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1907 if (level < BTRFS_MAX_LEVEL - 1) {
1908 parent = path->nodes[level + 1];
1909 pslot = path->slots[level + 1];
1913 * deal with the case where there is only one pointer in the root
1914 * by promoting the node below to a root
1917 struct extent_buffer *child;
1919 if (btrfs_header_nritems(mid) != 1)
1922 /* promote the child to a root */
1923 child = read_node_slot(root, mid, 0);
1926 btrfs_std_error(root->fs_info, ret);
1930 btrfs_tree_lock(child);
1931 btrfs_set_lock_blocking(child);
1932 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1934 btrfs_tree_unlock(child);
1935 free_extent_buffer(child);
1939 tree_mod_log_set_root_pointer(root, child, 1);
1940 rcu_assign_pointer(root->node, child);
1942 add_root_to_dirty_list(root);
1943 btrfs_tree_unlock(child);
1945 path->locks[level] = 0;
1946 path->nodes[level] = NULL;
1947 clean_tree_block(trans, root, mid);
1948 btrfs_tree_unlock(mid);
1949 /* once for the path */
1950 free_extent_buffer(mid);
1952 root_sub_used(root, mid->len);
1953 btrfs_free_tree_block(trans, root, mid, 0, 1);
1954 /* once for the root ptr */
1955 free_extent_buffer_stale(mid);
1958 if (btrfs_header_nritems(mid) >
1959 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1962 left = read_node_slot(root, parent, pslot - 1);
1964 btrfs_tree_lock(left);
1965 btrfs_set_lock_blocking(left);
1966 wret = btrfs_cow_block(trans, root, left,
1967 parent, pslot - 1, &left);
1973 right = read_node_slot(root, parent, pslot + 1);
1975 btrfs_tree_lock(right);
1976 btrfs_set_lock_blocking(right);
1977 wret = btrfs_cow_block(trans, root, right,
1978 parent, pslot + 1, &right);
1985 /* first, try to make some room in the middle buffer */
1987 orig_slot += btrfs_header_nritems(left);
1988 wret = push_node_left(trans, root, left, mid, 1);
1994 * then try to empty the right most buffer into the middle
1997 wret = push_node_left(trans, root, mid, right, 1);
1998 if (wret < 0 && wret != -ENOSPC)
2000 if (btrfs_header_nritems(right) == 0) {
2001 clean_tree_block(trans, root, right);
2002 btrfs_tree_unlock(right);
2003 del_ptr(root, path, level + 1, pslot + 1);
2004 root_sub_used(root, right->len);
2005 btrfs_free_tree_block(trans, root, right, 0, 1);
2006 free_extent_buffer_stale(right);
2009 struct btrfs_disk_key right_key;
2010 btrfs_node_key(right, &right_key, 0);
2011 tree_mod_log_set_node_key(root->fs_info, parent,
2013 btrfs_set_node_key(parent, &right_key, pslot + 1);
2014 btrfs_mark_buffer_dirty(parent);
2017 if (btrfs_header_nritems(mid) == 1) {
2019 * we're not allowed to leave a node with one item in the
2020 * tree during a delete. A deletion from lower in the tree
2021 * could try to delete the only pointer in this node.
2022 * So, pull some keys from the left.
2023 * There has to be a left pointer at this point because
2024 * otherwise we would have pulled some pointers from the
2029 btrfs_std_error(root->fs_info, ret);
2032 wret = balance_node_right(trans, root, mid, left);
2038 wret = push_node_left(trans, root, left, mid, 1);
2044 if (btrfs_header_nritems(mid) == 0) {
2045 clean_tree_block(trans, root, mid);
2046 btrfs_tree_unlock(mid);
2047 del_ptr(root, path, level + 1, pslot);
2048 root_sub_used(root, mid->len);
2049 btrfs_free_tree_block(trans, root, mid, 0, 1);
2050 free_extent_buffer_stale(mid);
2053 /* update the parent key to reflect our changes */
2054 struct btrfs_disk_key mid_key;
2055 btrfs_node_key(mid, &mid_key, 0);
2056 tree_mod_log_set_node_key(root->fs_info, parent,
2058 btrfs_set_node_key(parent, &mid_key, pslot);
2059 btrfs_mark_buffer_dirty(parent);
2062 /* update the path */
2064 if (btrfs_header_nritems(left) > orig_slot) {
2065 extent_buffer_get(left);
2066 /* left was locked after cow */
2067 path->nodes[level] = left;
2068 path->slots[level + 1] -= 1;
2069 path->slots[level] = orig_slot;
2071 btrfs_tree_unlock(mid);
2072 free_extent_buffer(mid);
2075 orig_slot -= btrfs_header_nritems(left);
2076 path->slots[level] = orig_slot;
2079 /* double check we haven't messed things up */
2081 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2085 btrfs_tree_unlock(right);
2086 free_extent_buffer(right);
2089 if (path->nodes[level] != left)
2090 btrfs_tree_unlock(left);
2091 free_extent_buffer(left);
2096 /* Node balancing for insertion. Here we only split or push nodes around
2097 * when they are completely full. This is also done top down, so we
2098 * have to be pessimistic.
2100 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2101 struct btrfs_root *root,
2102 struct btrfs_path *path, int level)
2104 struct extent_buffer *right = NULL;
2105 struct extent_buffer *mid;
2106 struct extent_buffer *left = NULL;
2107 struct extent_buffer *parent = NULL;
2111 int orig_slot = path->slots[level];
2116 mid = path->nodes[level];
2117 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2119 if (level < BTRFS_MAX_LEVEL - 1) {
2120 parent = path->nodes[level + 1];
2121 pslot = path->slots[level + 1];
2127 left = read_node_slot(root, parent, pslot - 1);
2129 /* first, try to make some room in the middle buffer */
2133 btrfs_tree_lock(left);
2134 btrfs_set_lock_blocking(left);
2136 left_nr = btrfs_header_nritems(left);
2137 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2140 ret = btrfs_cow_block(trans, root, left, parent,
2145 wret = push_node_left(trans, root,
2152 struct btrfs_disk_key disk_key;
2153 orig_slot += left_nr;
2154 btrfs_node_key(mid, &disk_key, 0);
2155 tree_mod_log_set_node_key(root->fs_info, parent,
2157 btrfs_set_node_key(parent, &disk_key, pslot);
2158 btrfs_mark_buffer_dirty(parent);
2159 if (btrfs_header_nritems(left) > orig_slot) {
2160 path->nodes[level] = left;
2161 path->slots[level + 1] -= 1;
2162 path->slots[level] = orig_slot;
2163 btrfs_tree_unlock(mid);
2164 free_extent_buffer(mid);
2167 btrfs_header_nritems(left);
2168 path->slots[level] = orig_slot;
2169 btrfs_tree_unlock(left);
2170 free_extent_buffer(left);
2174 btrfs_tree_unlock(left);
2175 free_extent_buffer(left);
2177 right = read_node_slot(root, parent, pslot + 1);
2180 * then try to empty the right most buffer into the middle
2185 btrfs_tree_lock(right);
2186 btrfs_set_lock_blocking(right);
2188 right_nr = btrfs_header_nritems(right);
2189 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2192 ret = btrfs_cow_block(trans, root, right,
2198 wret = balance_node_right(trans, root,
2205 struct btrfs_disk_key disk_key;
2207 btrfs_node_key(right, &disk_key, 0);
2208 tree_mod_log_set_node_key(root->fs_info, parent,
2210 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2211 btrfs_mark_buffer_dirty(parent);
2213 if (btrfs_header_nritems(mid) <= orig_slot) {
2214 path->nodes[level] = right;
2215 path->slots[level + 1] += 1;
2216 path->slots[level] = orig_slot -
2217 btrfs_header_nritems(mid);
2218 btrfs_tree_unlock(mid);
2219 free_extent_buffer(mid);
2221 btrfs_tree_unlock(right);
2222 free_extent_buffer(right);
2226 btrfs_tree_unlock(right);
2227 free_extent_buffer(right);
2233 * readahead one full node of leaves, finding things that are close
2234 * to the block in 'slot', and triggering ra on them.
2236 static void reada_for_search(struct btrfs_root *root,
2237 struct btrfs_path *path,
2238 int level, int slot, u64 objectid)
2240 struct extent_buffer *node;
2241 struct btrfs_disk_key disk_key;
2247 int direction = path->reada;
2248 struct extent_buffer *eb;
2256 if (!path->nodes[level])
2259 node = path->nodes[level];
2261 search = btrfs_node_blockptr(node, slot);
2262 blocksize = root->nodesize;
2263 eb = btrfs_find_tree_block(root, search);
2265 free_extent_buffer(eb);
2271 nritems = btrfs_header_nritems(node);
2275 if (direction < 0) {
2279 } else if (direction > 0) {
2284 if (path->reada < 0 && objectid) {
2285 btrfs_node_key(node, &disk_key, nr);
2286 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2289 search = btrfs_node_blockptr(node, nr);
2290 if ((search <= target && target - search <= 65536) ||
2291 (search > target && search - target <= 65536)) {
2292 gen = btrfs_node_ptr_generation(node, nr);
2293 readahead_tree_block(root, search, blocksize);
2297 if ((nread > 65536 || nscan > 32))
2302 static noinline void reada_for_balance(struct btrfs_root *root,
2303 struct btrfs_path *path, int level)
2307 struct extent_buffer *parent;
2308 struct extent_buffer *eb;
2314 parent = path->nodes[level + 1];
2318 nritems = btrfs_header_nritems(parent);
2319 slot = path->slots[level + 1];
2320 blocksize = root->nodesize;
2323 block1 = btrfs_node_blockptr(parent, slot - 1);
2324 gen = btrfs_node_ptr_generation(parent, slot - 1);
2325 eb = btrfs_find_tree_block(root, block1);
2327 * if we get -eagain from btrfs_buffer_uptodate, we
2328 * don't want to return eagain here. That will loop
2331 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2333 free_extent_buffer(eb);
2335 if (slot + 1 < nritems) {
2336 block2 = btrfs_node_blockptr(parent, slot + 1);
2337 gen = btrfs_node_ptr_generation(parent, slot + 1);
2338 eb = btrfs_find_tree_block(root, block2);
2339 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2341 free_extent_buffer(eb);
2345 readahead_tree_block(root, block1, blocksize);
2347 readahead_tree_block(root, block2, blocksize);
2352 * when we walk down the tree, it is usually safe to unlock the higher layers
2353 * in the tree. The exceptions are when our path goes through slot 0, because
2354 * operations on the tree might require changing key pointers higher up in the
2357 * callers might also have set path->keep_locks, which tells this code to keep
2358 * the lock if the path points to the last slot in the block. This is part of
2359 * walking through the tree, and selecting the next slot in the higher block.
2361 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2362 * if lowest_unlock is 1, level 0 won't be unlocked
2364 static noinline void unlock_up(struct btrfs_path *path, int level,
2365 int lowest_unlock, int min_write_lock_level,
2366 int *write_lock_level)
2369 int skip_level = level;
2371 struct extent_buffer *t;
2373 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2374 if (!path->nodes[i])
2376 if (!path->locks[i])
2378 if (!no_skips && path->slots[i] == 0) {
2382 if (!no_skips && path->keep_locks) {
2385 nritems = btrfs_header_nritems(t);
2386 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2391 if (skip_level < i && i >= lowest_unlock)
2395 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2396 btrfs_tree_unlock_rw(t, path->locks[i]);
2398 if (write_lock_level &&
2399 i > min_write_lock_level &&
2400 i <= *write_lock_level) {
2401 *write_lock_level = i - 1;
2408 * This releases any locks held in the path starting at level and
2409 * going all the way up to the root.
2411 * btrfs_search_slot will keep the lock held on higher nodes in a few
2412 * corner cases, such as COW of the block at slot zero in the node. This
2413 * ignores those rules, and it should only be called when there are no
2414 * more updates to be done higher up in the tree.
2416 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2420 if (path->keep_locks)
2423 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2424 if (!path->nodes[i])
2426 if (!path->locks[i])
2428 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2434 * helper function for btrfs_search_slot. The goal is to find a block
2435 * in cache without setting the path to blocking. If we find the block
2436 * we return zero and the path is unchanged.
2438 * If we can't find the block, we set the path blocking and do some
2439 * reada. -EAGAIN is returned and the search must be repeated.
2442 read_block_for_search(struct btrfs_trans_handle *trans,
2443 struct btrfs_root *root, struct btrfs_path *p,
2444 struct extent_buffer **eb_ret, int level, int slot,
2445 struct btrfs_key *key, u64 time_seq)
2449 struct extent_buffer *b = *eb_ret;
2450 struct extent_buffer *tmp;
2453 blocknr = btrfs_node_blockptr(b, slot);
2454 gen = btrfs_node_ptr_generation(b, slot);
2456 tmp = btrfs_find_tree_block(root, blocknr);
2458 /* first we do an atomic uptodate check */
2459 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2464 /* the pages were up to date, but we failed
2465 * the generation number check. Do a full
2466 * read for the generation number that is correct.
2467 * We must do this without dropping locks so
2468 * we can trust our generation number
2470 btrfs_set_path_blocking(p);
2472 /* now we're allowed to do a blocking uptodate check */
2473 ret = btrfs_read_buffer(tmp, gen);
2478 free_extent_buffer(tmp);
2479 btrfs_release_path(p);
2484 * reduce lock contention at high levels
2485 * of the btree by dropping locks before
2486 * we read. Don't release the lock on the current
2487 * level because we need to walk this node to figure
2488 * out which blocks to read.
2490 btrfs_unlock_up_safe(p, level + 1);
2491 btrfs_set_path_blocking(p);
2493 free_extent_buffer(tmp);
2495 reada_for_search(root, p, level, slot, key->objectid);
2497 btrfs_release_path(p);
2500 tmp = read_tree_block(root, blocknr, 0);
2503 * If the read above didn't mark this buffer up to date,
2504 * it will never end up being up to date. Set ret to EIO now
2505 * and give up so that our caller doesn't loop forever
2508 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2510 free_extent_buffer(tmp);
2516 * helper function for btrfs_search_slot. This does all of the checks
2517 * for node-level blocks and does any balancing required based on
2520 * If no extra work was required, zero is returned. If we had to
2521 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2525 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2526 struct btrfs_root *root, struct btrfs_path *p,
2527 struct extent_buffer *b, int level, int ins_len,
2528 int *write_lock_level)
2531 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2532 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2535 if (*write_lock_level < level + 1) {
2536 *write_lock_level = level + 1;
2537 btrfs_release_path(p);
2541 btrfs_set_path_blocking(p);
2542 reada_for_balance(root, p, level);
2543 sret = split_node(trans, root, p, level);
2544 btrfs_clear_path_blocking(p, NULL, 0);
2551 b = p->nodes[level];
2552 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2553 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2556 if (*write_lock_level < level + 1) {
2557 *write_lock_level = level + 1;
2558 btrfs_release_path(p);
2562 btrfs_set_path_blocking(p);
2563 reada_for_balance(root, p, level);
2564 sret = balance_level(trans, root, p, level);
2565 btrfs_clear_path_blocking(p, NULL, 0);
2571 b = p->nodes[level];
2573 btrfs_release_path(p);
2576 BUG_ON(btrfs_header_nritems(b) == 1);
2586 static void key_search_validate(struct extent_buffer *b,
2587 struct btrfs_key *key,
2590 #ifdef CONFIG_BTRFS_ASSERT
2591 struct btrfs_disk_key disk_key;
2593 btrfs_cpu_key_to_disk(&disk_key, key);
2596 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2597 offsetof(struct btrfs_leaf, items[0].key),
2600 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2601 offsetof(struct btrfs_node, ptrs[0].key),
2606 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2607 int level, int *prev_cmp, int *slot)
2609 if (*prev_cmp != 0) {
2610 *prev_cmp = bin_search(b, key, level, slot);
2614 key_search_validate(b, key, level);
2620 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
2621 u64 iobjectid, u64 ioff, u8 key_type,
2622 struct btrfs_key *found_key)
2625 struct btrfs_key key;
2626 struct extent_buffer *eb;
2627 struct btrfs_path *path;
2629 key.type = key_type;
2630 key.objectid = iobjectid;
2633 if (found_path == NULL) {
2634 path = btrfs_alloc_path();
2640 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2641 if ((ret < 0) || (found_key == NULL)) {
2642 if (path != found_path)
2643 btrfs_free_path(path);
2647 eb = path->nodes[0];
2648 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2649 ret = btrfs_next_leaf(fs_root, path);
2652 eb = path->nodes[0];
2655 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2656 if (found_key->type != key.type ||
2657 found_key->objectid != key.objectid)
2664 * look for key in the tree. path is filled in with nodes along the way
2665 * if key is found, we return zero and you can find the item in the leaf
2666 * level of the path (level 0)
2668 * If the key isn't found, the path points to the slot where it should
2669 * be inserted, and 1 is returned. If there are other errors during the
2670 * search a negative error number is returned.
2672 * if ins_len > 0, nodes and leaves will be split as we walk down the
2673 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2676 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2677 *root, struct btrfs_key *key, struct btrfs_path *p, int
2680 struct extent_buffer *b;
2685 int lowest_unlock = 1;
2687 /* everything at write_lock_level or lower must be write locked */
2688 int write_lock_level = 0;
2689 u8 lowest_level = 0;
2690 int min_write_lock_level;
2693 lowest_level = p->lowest_level;
2694 WARN_ON(lowest_level && ins_len > 0);
2695 WARN_ON(p->nodes[0] != NULL);
2696 BUG_ON(!cow && ins_len);
2701 /* when we are removing items, we might have to go up to level
2702 * two as we update tree pointers Make sure we keep write
2703 * for those levels as well
2705 write_lock_level = 2;
2706 } else if (ins_len > 0) {
2708 * for inserting items, make sure we have a write lock on
2709 * level 1 so we can update keys
2711 write_lock_level = 1;
2715 write_lock_level = -1;
2717 if (cow && (p->keep_locks || p->lowest_level))
2718 write_lock_level = BTRFS_MAX_LEVEL;
2720 min_write_lock_level = write_lock_level;
2725 * we try very hard to do read locks on the root
2727 root_lock = BTRFS_READ_LOCK;
2729 if (p->search_commit_root) {
2731 * the commit roots are read only
2732 * so we always do read locks
2734 if (p->need_commit_sem)
2735 down_read(&root->fs_info->commit_root_sem);
2736 b = root->commit_root;
2737 extent_buffer_get(b);
2738 level = btrfs_header_level(b);
2739 if (p->need_commit_sem)
2740 up_read(&root->fs_info->commit_root_sem);
2741 if (!p->skip_locking)
2742 btrfs_tree_read_lock(b);
2744 if (p->skip_locking) {
2745 b = btrfs_root_node(root);
2746 level = btrfs_header_level(b);
2748 /* we don't know the level of the root node
2749 * until we actually have it read locked
2751 b = btrfs_read_lock_root_node(root);
2752 level = btrfs_header_level(b);
2753 if (level <= write_lock_level) {
2754 /* whoops, must trade for write lock */
2755 btrfs_tree_read_unlock(b);
2756 free_extent_buffer(b);
2757 b = btrfs_lock_root_node(root);
2758 root_lock = BTRFS_WRITE_LOCK;
2760 /* the level might have changed, check again */
2761 level = btrfs_header_level(b);
2765 p->nodes[level] = b;
2766 if (!p->skip_locking)
2767 p->locks[level] = root_lock;
2770 level = btrfs_header_level(b);
2773 * setup the path here so we can release it under lock
2774 * contention with the cow code
2778 * if we don't really need to cow this block
2779 * then we don't want to set the path blocking,
2780 * so we test it here
2782 if (!should_cow_block(trans, root, b))
2786 * must have write locks on this node and the
2789 if (level > write_lock_level ||
2790 (level + 1 > write_lock_level &&
2791 level + 1 < BTRFS_MAX_LEVEL &&
2792 p->nodes[level + 1])) {
2793 write_lock_level = level + 1;
2794 btrfs_release_path(p);
2798 btrfs_set_path_blocking(p);
2799 err = btrfs_cow_block(trans, root, b,
2800 p->nodes[level + 1],
2801 p->slots[level + 1], &b);
2808 p->nodes[level] = b;
2809 btrfs_clear_path_blocking(p, NULL, 0);
2812 * we have a lock on b and as long as we aren't changing
2813 * the tree, there is no way to for the items in b to change.
2814 * It is safe to drop the lock on our parent before we
2815 * go through the expensive btree search on b.
2817 * If we're inserting or deleting (ins_len != 0), then we might
2818 * be changing slot zero, which may require changing the parent.
2819 * So, we can't drop the lock until after we know which slot
2820 * we're operating on.
2822 if (!ins_len && !p->keep_locks) {
2825 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2826 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2831 ret = key_search(b, key, level, &prev_cmp, &slot);
2835 if (ret && slot > 0) {
2839 p->slots[level] = slot;
2840 err = setup_nodes_for_search(trans, root, p, b, level,
2841 ins_len, &write_lock_level);
2848 b = p->nodes[level];
2849 slot = p->slots[level];
2852 * slot 0 is special, if we change the key
2853 * we have to update the parent pointer
2854 * which means we must have a write lock
2857 if (slot == 0 && ins_len &&
2858 write_lock_level < level + 1) {
2859 write_lock_level = level + 1;
2860 btrfs_release_path(p);
2864 unlock_up(p, level, lowest_unlock,
2865 min_write_lock_level, &write_lock_level);
2867 if (level == lowest_level) {
2873 err = read_block_for_search(trans, root, p,
2874 &b, level, slot, key, 0);
2882 if (!p->skip_locking) {
2883 level = btrfs_header_level(b);
2884 if (level <= write_lock_level) {
2885 err = btrfs_try_tree_write_lock(b);
2887 btrfs_set_path_blocking(p);
2889 btrfs_clear_path_blocking(p, b,
2892 p->locks[level] = BTRFS_WRITE_LOCK;
2894 err = btrfs_tree_read_lock_atomic(b);
2896 btrfs_set_path_blocking(p);
2897 btrfs_tree_read_lock(b);
2898 btrfs_clear_path_blocking(p, b,
2901 p->locks[level] = BTRFS_READ_LOCK;
2903 p->nodes[level] = b;
2906 p->slots[level] = slot;
2908 btrfs_leaf_free_space(root, b) < ins_len) {
2909 if (write_lock_level < 1) {
2910 write_lock_level = 1;
2911 btrfs_release_path(p);
2915 btrfs_set_path_blocking(p);
2916 err = split_leaf(trans, root, key,
2917 p, ins_len, ret == 0);
2918 btrfs_clear_path_blocking(p, NULL, 0);
2926 if (!p->search_for_split)
2927 unlock_up(p, level, lowest_unlock,
2928 min_write_lock_level, &write_lock_level);
2935 * we don't really know what they plan on doing with the path
2936 * from here on, so for now just mark it as blocking
2938 if (!p->leave_spinning)
2939 btrfs_set_path_blocking(p);
2940 if (ret < 0 && !p->skip_release_on_error)
2941 btrfs_release_path(p);
2946 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2947 * current state of the tree together with the operations recorded in the tree
2948 * modification log to search for the key in a previous version of this tree, as
2949 * denoted by the time_seq parameter.
2951 * Naturally, there is no support for insert, delete or cow operations.
2953 * The resulting path and return value will be set up as if we called
2954 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2956 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2957 struct btrfs_path *p, u64 time_seq)
2959 struct extent_buffer *b;
2964 int lowest_unlock = 1;
2965 u8 lowest_level = 0;
2968 lowest_level = p->lowest_level;
2969 WARN_ON(p->nodes[0] != NULL);
2971 if (p->search_commit_root) {
2973 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2977 b = get_old_root(root, time_seq);
2978 level = btrfs_header_level(b);
2979 p->locks[level] = BTRFS_READ_LOCK;
2982 level = btrfs_header_level(b);
2983 p->nodes[level] = b;
2984 btrfs_clear_path_blocking(p, NULL, 0);
2987 * we have a lock on b and as long as we aren't changing
2988 * the tree, there is no way to for the items in b to change.
2989 * It is safe to drop the lock on our parent before we
2990 * go through the expensive btree search on b.
2992 btrfs_unlock_up_safe(p, level + 1);
2995 * Since we can unwind eb's we want to do a real search every
2999 ret = key_search(b, key, level, &prev_cmp, &slot);
3003 if (ret && slot > 0) {
3007 p->slots[level] = slot;
3008 unlock_up(p, level, lowest_unlock, 0, NULL);
3010 if (level == lowest_level) {
3016 err = read_block_for_search(NULL, root, p, &b, level,
3017 slot, key, time_seq);
3025 level = btrfs_header_level(b);
3026 err = btrfs_tree_read_lock_atomic(b);
3028 btrfs_set_path_blocking(p);
3029 btrfs_tree_read_lock(b);
3030 btrfs_clear_path_blocking(p, b,
3033 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3038 p->locks[level] = BTRFS_READ_LOCK;
3039 p->nodes[level] = b;
3041 p->slots[level] = slot;
3042 unlock_up(p, level, lowest_unlock, 0, NULL);
3048 if (!p->leave_spinning)
3049 btrfs_set_path_blocking(p);
3051 btrfs_release_path(p);
3057 * helper to use instead of search slot if no exact match is needed but
3058 * instead the next or previous item should be returned.
3059 * When find_higher is true, the next higher item is returned, the next lower
3061 * When return_any and find_higher are both true, and no higher item is found,
3062 * return the next lower instead.
3063 * When return_any is true and find_higher is false, and no lower item is found,
3064 * return the next higher instead.
3065 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3068 int btrfs_search_slot_for_read(struct btrfs_root *root,
3069 struct btrfs_key *key, struct btrfs_path *p,
3070 int find_higher, int return_any)
3073 struct extent_buffer *leaf;
3076 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3080 * a return value of 1 means the path is at the position where the
3081 * item should be inserted. Normally this is the next bigger item,
3082 * but in case the previous item is the last in a leaf, path points
3083 * to the first free slot in the previous leaf, i.e. at an invalid
3089 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3090 ret = btrfs_next_leaf(root, p);
3096 * no higher item found, return the next
3101 btrfs_release_path(p);
3105 if (p->slots[0] == 0) {
3106 ret = btrfs_prev_leaf(root, p);
3111 if (p->slots[0] == btrfs_header_nritems(leaf))
3118 * no lower item found, return the next
3123 btrfs_release_path(p);
3133 * adjust the pointers going up the tree, starting at level
3134 * making sure the right key of each node is points to 'key'.
3135 * This is used after shifting pointers to the left, so it stops
3136 * fixing up pointers when a given leaf/node is not in slot 0 of the
3140 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
3141 struct btrfs_disk_key *key, int level)
3144 struct extent_buffer *t;
3146 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3147 int tslot = path->slots[i];
3148 if (!path->nodes[i])
3151 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
3152 btrfs_set_node_key(t, key, tslot);
3153 btrfs_mark_buffer_dirty(path->nodes[i]);
3162 * This function isn't completely safe. It's the caller's responsibility
3163 * that the new key won't break the order
3165 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
3166 struct btrfs_key *new_key)
3168 struct btrfs_disk_key disk_key;
3169 struct extent_buffer *eb;
3172 eb = path->nodes[0];
3173 slot = path->slots[0];
3175 btrfs_item_key(eb, &disk_key, slot - 1);
3176 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3178 if (slot < btrfs_header_nritems(eb) - 1) {
3179 btrfs_item_key(eb, &disk_key, slot + 1);
3180 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3183 btrfs_cpu_key_to_disk(&disk_key, new_key);
3184 btrfs_set_item_key(eb, &disk_key, slot);
3185 btrfs_mark_buffer_dirty(eb);
3187 fixup_low_keys(root, path, &disk_key, 1);
3191 * try to push data from one node into the next node left in the
3194 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3195 * error, and > 0 if there was no room in the left hand block.
3197 static int push_node_left(struct btrfs_trans_handle *trans,
3198 struct btrfs_root *root, struct extent_buffer *dst,
3199 struct extent_buffer *src, int empty)
3206 src_nritems = btrfs_header_nritems(src);
3207 dst_nritems = btrfs_header_nritems(dst);
3208 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3209 WARN_ON(btrfs_header_generation(src) != trans->transid);
3210 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3212 if (!empty && src_nritems <= 8)
3215 if (push_items <= 0)
3219 push_items = min(src_nritems, push_items);
3220 if (push_items < src_nritems) {
3221 /* leave at least 8 pointers in the node if
3222 * we aren't going to empty it
3224 if (src_nritems - push_items < 8) {
3225 if (push_items <= 8)
3231 push_items = min(src_nritems - 8, push_items);
3233 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3236 btrfs_abort_transaction(trans, root, ret);
3239 copy_extent_buffer(dst, src,
3240 btrfs_node_key_ptr_offset(dst_nritems),
3241 btrfs_node_key_ptr_offset(0),
3242 push_items * sizeof(struct btrfs_key_ptr));
3244 if (push_items < src_nritems) {
3246 * don't call tree_mod_log_eb_move here, key removal was already
3247 * fully logged by tree_mod_log_eb_copy above.
3249 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3250 btrfs_node_key_ptr_offset(push_items),
3251 (src_nritems - push_items) *
3252 sizeof(struct btrfs_key_ptr));
3254 btrfs_set_header_nritems(src, src_nritems - push_items);
3255 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3256 btrfs_mark_buffer_dirty(src);
3257 btrfs_mark_buffer_dirty(dst);
3263 * try to push data from one node into the next node right in the
3266 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3267 * error, and > 0 if there was no room in the right hand block.
3269 * this will only push up to 1/2 the contents of the left node over
3271 static int balance_node_right(struct btrfs_trans_handle *trans,
3272 struct btrfs_root *root,
3273 struct extent_buffer *dst,
3274 struct extent_buffer *src)
3282 WARN_ON(btrfs_header_generation(src) != trans->transid);
3283 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3285 src_nritems = btrfs_header_nritems(src);
3286 dst_nritems = btrfs_header_nritems(dst);
3287 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3288 if (push_items <= 0)
3291 if (src_nritems < 4)
3294 max_push = src_nritems / 2 + 1;
3295 /* don't try to empty the node */
3296 if (max_push >= src_nritems)
3299 if (max_push < push_items)
3300 push_items = max_push;
3302 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3303 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3304 btrfs_node_key_ptr_offset(0),
3306 sizeof(struct btrfs_key_ptr));
3308 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3309 src_nritems - push_items, push_items);
3311 btrfs_abort_transaction(trans, root, ret);
3314 copy_extent_buffer(dst, src,
3315 btrfs_node_key_ptr_offset(0),
3316 btrfs_node_key_ptr_offset(src_nritems - push_items),
3317 push_items * sizeof(struct btrfs_key_ptr));
3319 btrfs_set_header_nritems(src, src_nritems - push_items);
3320 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3322 btrfs_mark_buffer_dirty(src);
3323 btrfs_mark_buffer_dirty(dst);
3329 * helper function to insert a new root level in the tree.
3330 * A new node is allocated, and a single item is inserted to
3331 * point to the existing root
3333 * returns zero on success or < 0 on failure.
3335 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3336 struct btrfs_root *root,
3337 struct btrfs_path *path, int level)
3340 struct extent_buffer *lower;
3341 struct extent_buffer *c;
3342 struct extent_buffer *old;
3343 struct btrfs_disk_key lower_key;
3345 BUG_ON(path->nodes[level]);
3346 BUG_ON(path->nodes[level-1] != root->node);
3348 lower = path->nodes[level-1];
3350 btrfs_item_key(lower, &lower_key, 0);
3352 btrfs_node_key(lower, &lower_key, 0);
3354 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3355 &lower_key, level, root->node->start, 0);
3359 root_add_used(root, root->nodesize);
3361 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3362 btrfs_set_header_nritems(c, 1);
3363 btrfs_set_header_level(c, level);
3364 btrfs_set_header_bytenr(c, c->start);
3365 btrfs_set_header_generation(c, trans->transid);
3366 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3367 btrfs_set_header_owner(c, root->root_key.objectid);
3369 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3372 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3373 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3375 btrfs_set_node_key(c, &lower_key, 0);
3376 btrfs_set_node_blockptr(c, 0, lower->start);
3377 lower_gen = btrfs_header_generation(lower);
3378 WARN_ON(lower_gen != trans->transid);
3380 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3382 btrfs_mark_buffer_dirty(c);
3385 tree_mod_log_set_root_pointer(root, c, 0);
3386 rcu_assign_pointer(root->node, c);
3388 /* the super has an extra ref to root->node */
3389 free_extent_buffer(old);
3391 add_root_to_dirty_list(root);
3392 extent_buffer_get(c);
3393 path->nodes[level] = c;
3394 path->locks[level] = BTRFS_WRITE_LOCK;
3395 path->slots[level] = 0;
3400 * worker function to insert a single pointer in a node.
3401 * the node should have enough room for the pointer already
3403 * slot and level indicate where you want the key to go, and
3404 * blocknr is the block the key points to.
3406 static void insert_ptr(struct btrfs_trans_handle *trans,
3407 struct btrfs_root *root, struct btrfs_path *path,
3408 struct btrfs_disk_key *key, u64 bytenr,
3409 int slot, int level)
3411 struct extent_buffer *lower;
3415 BUG_ON(!path->nodes[level]);
3416 btrfs_assert_tree_locked(path->nodes[level]);
3417 lower = path->nodes[level];
3418 nritems = btrfs_header_nritems(lower);
3419 BUG_ON(slot > nritems);
3420 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3421 if (slot != nritems) {
3423 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3424 slot, nritems - slot);
3425 memmove_extent_buffer(lower,
3426 btrfs_node_key_ptr_offset(slot + 1),
3427 btrfs_node_key_ptr_offset(slot),
3428 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3431 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3432 MOD_LOG_KEY_ADD, GFP_NOFS);
3435 btrfs_set_node_key(lower, key, slot);
3436 btrfs_set_node_blockptr(lower, slot, bytenr);
3437 WARN_ON(trans->transid == 0);
3438 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3439 btrfs_set_header_nritems(lower, nritems + 1);
3440 btrfs_mark_buffer_dirty(lower);
3444 * split the node at the specified level in path in two.
3445 * The path is corrected to point to the appropriate node after the split
3447 * Before splitting this tries to make some room in the node by pushing
3448 * left and right, if either one works, it returns right away.
3450 * returns 0 on success and < 0 on failure
3452 static noinline int split_node(struct btrfs_trans_handle *trans,
3453 struct btrfs_root *root,
3454 struct btrfs_path *path, int level)
3456 struct extent_buffer *c;
3457 struct extent_buffer *split;
3458 struct btrfs_disk_key disk_key;
3463 c = path->nodes[level];
3464 WARN_ON(btrfs_header_generation(c) != trans->transid);
3465 if (c == root->node) {
3467 * trying to split the root, lets make a new one
3469 * tree mod log: We don't log_removal old root in
3470 * insert_new_root, because that root buffer will be kept as a
3471 * normal node. We are going to log removal of half of the
3472 * elements below with tree_mod_log_eb_copy. We're holding a
3473 * tree lock on the buffer, which is why we cannot race with
3474 * other tree_mod_log users.
3476 ret = insert_new_root(trans, root, path, level + 1);
3480 ret = push_nodes_for_insert(trans, root, path, level);
3481 c = path->nodes[level];
3482 if (!ret && btrfs_header_nritems(c) <
3483 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3489 c_nritems = btrfs_header_nritems(c);
3490 mid = (c_nritems + 1) / 2;
3491 btrfs_node_key(c, &disk_key, mid);
3493 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3494 &disk_key, level, c->start, 0);
3496 return PTR_ERR(split);
3498 root_add_used(root, root->nodesize);
3500 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3501 btrfs_set_header_level(split, btrfs_header_level(c));
3502 btrfs_set_header_bytenr(split, split->start);
3503 btrfs_set_header_generation(split, trans->transid);
3504 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3505 btrfs_set_header_owner(split, root->root_key.objectid);
3506 write_extent_buffer(split, root->fs_info->fsid,
3507 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3508 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3509 btrfs_header_chunk_tree_uuid(split),
3512 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3513 mid, c_nritems - mid);
3515 btrfs_abort_transaction(trans, root, ret);
3518 copy_extent_buffer(split, c,
3519 btrfs_node_key_ptr_offset(0),
3520 btrfs_node_key_ptr_offset(mid),
3521 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3522 btrfs_set_header_nritems(split, c_nritems - mid);
3523 btrfs_set_header_nritems(c, mid);
3526 btrfs_mark_buffer_dirty(c);
3527 btrfs_mark_buffer_dirty(split);
3529 insert_ptr(trans, root, path, &disk_key, split->start,
3530 path->slots[level + 1] + 1, level + 1);
3532 if (path->slots[level] >= mid) {
3533 path->slots[level] -= mid;
3534 btrfs_tree_unlock(c);
3535 free_extent_buffer(c);
3536 path->nodes[level] = split;
3537 path->slots[level + 1] += 1;
3539 btrfs_tree_unlock(split);
3540 free_extent_buffer(split);
3546 * how many bytes are required to store the items in a leaf. start
3547 * and nr indicate which items in the leaf to check. This totals up the
3548 * space used both by the item structs and the item data
3550 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3552 struct btrfs_item *start_item;
3553 struct btrfs_item *end_item;
3554 struct btrfs_map_token token;
3556 int nritems = btrfs_header_nritems(l);
3557 int end = min(nritems, start + nr) - 1;
3561 btrfs_init_map_token(&token);
3562 start_item = btrfs_item_nr(start);
3563 end_item = btrfs_item_nr(end);
3564 data_len = btrfs_token_item_offset(l, start_item, &token) +
3565 btrfs_token_item_size(l, start_item, &token);
3566 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3567 data_len += sizeof(struct btrfs_item) * nr;
3568 WARN_ON(data_len < 0);
3573 * The space between the end of the leaf items and
3574 * the start of the leaf data. IOW, how much room
3575 * the leaf has left for both items and data
3577 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3578 struct extent_buffer *leaf)
3580 int nritems = btrfs_header_nritems(leaf);
3582 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3584 btrfs_crit(root->fs_info,
3585 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3586 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3587 leaf_space_used(leaf, 0, nritems), nritems);
3593 * min slot controls the lowest index we're willing to push to the
3594 * right. We'll push up to and including min_slot, but no lower
3596 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3597 struct btrfs_root *root,
3598 struct btrfs_path *path,
3599 int data_size, int empty,
3600 struct extent_buffer *right,
3601 int free_space, u32 left_nritems,
3604 struct extent_buffer *left = path->nodes[0];
3605 struct extent_buffer *upper = path->nodes[1];
3606 struct btrfs_map_token token;
3607 struct btrfs_disk_key disk_key;
3612 struct btrfs_item *item;
3618 btrfs_init_map_token(&token);
3623 nr = max_t(u32, 1, min_slot);
3625 if (path->slots[0] >= left_nritems)
3626 push_space += data_size;
3628 slot = path->slots[1];
3629 i = left_nritems - 1;
3631 item = btrfs_item_nr(i);
3633 if (!empty && push_items > 0) {
3634 if (path->slots[0] > i)
3636 if (path->slots[0] == i) {
3637 int space = btrfs_leaf_free_space(root, left);
3638 if (space + push_space * 2 > free_space)
3643 if (path->slots[0] == i)
3644 push_space += data_size;
3646 this_item_size = btrfs_item_size(left, item);
3647 if (this_item_size + sizeof(*item) + push_space > free_space)
3651 push_space += this_item_size + sizeof(*item);
3657 if (push_items == 0)
3660 WARN_ON(!empty && push_items == left_nritems);
3662 /* push left to right */
3663 right_nritems = btrfs_header_nritems(right);
3665 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3666 push_space -= leaf_data_end(root, left);
3668 /* make room in the right data area */
3669 data_end = leaf_data_end(root, right);
3670 memmove_extent_buffer(right,
3671 btrfs_leaf_data(right) + data_end - push_space,
3672 btrfs_leaf_data(right) + data_end,
3673 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3675 /* copy from the left data area */
3676 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3677 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3678 btrfs_leaf_data(left) + leaf_data_end(root, left),
3681 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3682 btrfs_item_nr_offset(0),
3683 right_nritems * sizeof(struct btrfs_item));
3685 /* copy the items from left to right */
3686 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3687 btrfs_item_nr_offset(left_nritems - push_items),
3688 push_items * sizeof(struct btrfs_item));
3690 /* update the item pointers */
3691 right_nritems += push_items;
3692 btrfs_set_header_nritems(right, right_nritems);
3693 push_space = BTRFS_LEAF_DATA_SIZE(root);
3694 for (i = 0; i < right_nritems; i++) {
3695 item = btrfs_item_nr(i);
3696 push_space -= btrfs_token_item_size(right, item, &token);
3697 btrfs_set_token_item_offset(right, item, push_space, &token);
3700 left_nritems -= push_items;
3701 btrfs_set_header_nritems(left, left_nritems);
3704 btrfs_mark_buffer_dirty(left);
3706 clean_tree_block(trans, root, left);
3708 btrfs_mark_buffer_dirty(right);
3710 btrfs_item_key(right, &disk_key, 0);
3711 btrfs_set_node_key(upper, &disk_key, slot + 1);
3712 btrfs_mark_buffer_dirty(upper);
3714 /* then fixup the leaf pointer in the path */
3715 if (path->slots[0] >= left_nritems) {
3716 path->slots[0] -= left_nritems;
3717 if (btrfs_header_nritems(path->nodes[0]) == 0)
3718 clean_tree_block(trans, root, path->nodes[0]);
3719 btrfs_tree_unlock(path->nodes[0]);
3720 free_extent_buffer(path->nodes[0]);
3721 path->nodes[0] = right;
3722 path->slots[1] += 1;
3724 btrfs_tree_unlock(right);
3725 free_extent_buffer(right);
3730 btrfs_tree_unlock(right);
3731 free_extent_buffer(right);
3736 * push some data in the path leaf to the right, trying to free up at
3737 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3739 * returns 1 if the push failed because the other node didn't have enough
3740 * room, 0 if everything worked out and < 0 if there were major errors.
3742 * this will push starting from min_slot to the end of the leaf. It won't
3743 * push any slot lower than min_slot
3745 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3746 *root, struct btrfs_path *path,
3747 int min_data_size, int data_size,
3748 int empty, u32 min_slot)
3750 struct extent_buffer *left = path->nodes[0];
3751 struct extent_buffer *right;
3752 struct extent_buffer *upper;
3758 if (!path->nodes[1])
3761 slot = path->slots[1];
3762 upper = path->nodes[1];
3763 if (slot >= btrfs_header_nritems(upper) - 1)
3766 btrfs_assert_tree_locked(path->nodes[1]);
3768 right = read_node_slot(root, upper, slot + 1);
3772 btrfs_tree_lock(right);
3773 btrfs_set_lock_blocking(right);
3775 free_space = btrfs_leaf_free_space(root, right);
3776 if (free_space < data_size)
3779 /* cow and double check */
3780 ret = btrfs_cow_block(trans, root, right, upper,
3785 free_space = btrfs_leaf_free_space(root, right);
3786 if (free_space < data_size)
3789 left_nritems = btrfs_header_nritems(left);
3790 if (left_nritems == 0)
3793 if (path->slots[0] == left_nritems && !empty) {
3794 /* Key greater than all keys in the leaf, right neighbor has
3795 * enough room for it and we're not emptying our leaf to delete
3796 * it, therefore use right neighbor to insert the new item and
3797 * no need to touch/dirty our left leaft. */
3798 btrfs_tree_unlock(left);
3799 free_extent_buffer(left);
3800 path->nodes[0] = right;
3806 return __push_leaf_right(trans, root, path, min_data_size, empty,
3807 right, free_space, left_nritems, min_slot);
3809 btrfs_tree_unlock(right);
3810 free_extent_buffer(right);
3815 * push some data in the path leaf to the left, trying to free up at
3816 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3818 * max_slot can put a limit on how far into the leaf we'll push items. The
3819 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3822 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3823 struct btrfs_root *root,
3824 struct btrfs_path *path, int data_size,
3825 int empty, struct extent_buffer *left,
3826 int free_space, u32 right_nritems,
3829 struct btrfs_disk_key disk_key;
3830 struct extent_buffer *right = path->nodes[0];
3834 struct btrfs_item *item;
3835 u32 old_left_nritems;
3839 u32 old_left_item_size;
3840 struct btrfs_map_token token;
3842 btrfs_init_map_token(&token);
3845 nr = min(right_nritems, max_slot);
3847 nr = min(right_nritems - 1, max_slot);
3849 for (i = 0; i < nr; i++) {
3850 item = btrfs_item_nr(i);
3852 if (!empty && push_items > 0) {
3853 if (path->slots[0] < i)
3855 if (path->slots[0] == i) {
3856 int space = btrfs_leaf_free_space(root, right);
3857 if (space + push_space * 2 > free_space)
3862 if (path->slots[0] == i)
3863 push_space += data_size;
3865 this_item_size = btrfs_item_size(right, item);
3866 if (this_item_size + sizeof(*item) + push_space > free_space)
3870 push_space += this_item_size + sizeof(*item);
3873 if (push_items == 0) {
3877 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3879 /* push data from right to left */
3880 copy_extent_buffer(left, right,
3881 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3882 btrfs_item_nr_offset(0),
3883 push_items * sizeof(struct btrfs_item));
3885 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3886 btrfs_item_offset_nr(right, push_items - 1);
3888 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3889 leaf_data_end(root, left) - push_space,
3890 btrfs_leaf_data(right) +
3891 btrfs_item_offset_nr(right, push_items - 1),
3893 old_left_nritems = btrfs_header_nritems(left);
3894 BUG_ON(old_left_nritems <= 0);
3896 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3897 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3900 item = btrfs_item_nr(i);
3902 ioff = btrfs_token_item_offset(left, item, &token);
3903 btrfs_set_token_item_offset(left, item,
3904 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3907 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3909 /* fixup right node */
3910 if (push_items > right_nritems)
3911 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3914 if (push_items < right_nritems) {
3915 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3916 leaf_data_end(root, right);
3917 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3918 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3919 btrfs_leaf_data(right) +
3920 leaf_data_end(root, right), push_space);
3922 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3923 btrfs_item_nr_offset(push_items),
3924 (btrfs_header_nritems(right) - push_items) *
3925 sizeof(struct btrfs_item));
3927 right_nritems -= push_items;
3928 btrfs_set_header_nritems(right, right_nritems);
3929 push_space = BTRFS_LEAF_DATA_SIZE(root);
3930 for (i = 0; i < right_nritems; i++) {
3931 item = btrfs_item_nr(i);
3933 push_space = push_space - btrfs_token_item_size(right,
3935 btrfs_set_token_item_offset(right, item, push_space, &token);
3938 btrfs_mark_buffer_dirty(left);
3940 btrfs_mark_buffer_dirty(right);
3942 clean_tree_block(trans, root, right);
3944 btrfs_item_key(right, &disk_key, 0);
3945 fixup_low_keys(root, path, &disk_key, 1);
3947 /* then fixup the leaf pointer in the path */
3948 if (path->slots[0] < push_items) {
3949 path->slots[0] += old_left_nritems;
3950 btrfs_tree_unlock(path->nodes[0]);
3951 free_extent_buffer(path->nodes[0]);
3952 path->nodes[0] = left;
3953 path->slots[1] -= 1;
3955 btrfs_tree_unlock(left);
3956 free_extent_buffer(left);
3957 path->slots[0] -= push_items;
3959 BUG_ON(path->slots[0] < 0);
3962 btrfs_tree_unlock(left);
3963 free_extent_buffer(left);
3968 * push some data in the path leaf to the left, trying to free up at
3969 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3971 * max_slot can put a limit on how far into the leaf we'll push items. The
3972 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3975 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3976 *root, struct btrfs_path *path, int min_data_size,
3977 int data_size, int empty, u32 max_slot)
3979 struct extent_buffer *right = path->nodes[0];
3980 struct extent_buffer *left;
3986 slot = path->slots[1];
3989 if (!path->nodes[1])
3992 right_nritems = btrfs_header_nritems(right);
3993 if (right_nritems == 0)
3996 btrfs_assert_tree_locked(path->nodes[1]);
3998 left = read_node_slot(root, path->nodes[1], slot - 1);
4002 btrfs_tree_lock(left);
4003 btrfs_set_lock_blocking(left);
4005 free_space = btrfs_leaf_free_space(root, left);
4006 if (free_space < data_size) {
4011 /* cow and double check */
4012 ret = btrfs_cow_block(trans, root, left,
4013 path->nodes[1], slot - 1, &left);
4015 /* we hit -ENOSPC, but it isn't fatal here */
4021 free_space = btrfs_leaf_free_space(root, left);
4022 if (free_space < data_size) {
4027 return __push_leaf_left(trans, root, path, min_data_size,
4028 empty, left, free_space, right_nritems,
4031 btrfs_tree_unlock(left);
4032 free_extent_buffer(left);
4037 * split the path's leaf in two, making sure there is at least data_size
4038 * available for the resulting leaf level of the path.
4040 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4041 struct btrfs_root *root,
4042 struct btrfs_path *path,
4043 struct extent_buffer *l,
4044 struct extent_buffer *right,
4045 int slot, int mid, int nritems)
4050 struct btrfs_disk_key disk_key;
4051 struct btrfs_map_token token;
4053 btrfs_init_map_token(&token);
4055 nritems = nritems - mid;
4056 btrfs_set_header_nritems(right, nritems);
4057 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4059 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4060 btrfs_item_nr_offset(mid),
4061 nritems * sizeof(struct btrfs_item));
4063 copy_extent_buffer(right, l,
4064 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4065 data_copy_size, btrfs_leaf_data(l) +
4066 leaf_data_end(root, l), data_copy_size);
4068 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4069 btrfs_item_end_nr(l, mid);
4071 for (i = 0; i < nritems; i++) {
4072 struct btrfs_item *item = btrfs_item_nr(i);
4075 ioff = btrfs_token_item_offset(right, item, &token);
4076 btrfs_set_token_item_offset(right, item,
4077 ioff + rt_data_off, &token);
4080 btrfs_set_header_nritems(l, mid);
4081 btrfs_item_key(right, &disk_key, 0);
4082 insert_ptr(trans, root, path, &disk_key, right->start,
4083 path->slots[1] + 1, 1);
4085 btrfs_mark_buffer_dirty(right);
4086 btrfs_mark_buffer_dirty(l);
4087 BUG_ON(path->slots[0] != slot);
4090 btrfs_tree_unlock(path->nodes[0]);
4091 free_extent_buffer(path->nodes[0]);
4092 path->nodes[0] = right;
4093 path->slots[0] -= mid;
4094 path->slots[1] += 1;
4096 btrfs_tree_unlock(right);
4097 free_extent_buffer(right);
4100 BUG_ON(path->slots[0] < 0);
4104 * double splits happen when we need to insert a big item in the middle
4105 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4106 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4109 * We avoid this by trying to push the items on either side of our target
4110 * into the adjacent leaves. If all goes well we can avoid the double split
4113 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4114 struct btrfs_root *root,
4115 struct btrfs_path *path,
4122 int space_needed = data_size;
4124 slot = path->slots[0];
4125 if (slot < btrfs_header_nritems(path->nodes[0]))
4126 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4129 * try to push all the items after our slot into the
4132 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4139 nritems = btrfs_header_nritems(path->nodes[0]);
4141 * our goal is to get our slot at the start or end of a leaf. If
4142 * we've done so we're done
4144 if (path->slots[0] == 0 || path->slots[0] == nritems)
4147 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4150 /* try to push all the items before our slot into the next leaf */
4151 slot = path->slots[0];
4152 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4165 * split the path's leaf in two, making sure there is at least data_size
4166 * available for the resulting leaf level of the path.
4168 * returns 0 if all went well and < 0 on failure.
4170 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4171 struct btrfs_root *root,
4172 struct btrfs_key *ins_key,
4173 struct btrfs_path *path, int data_size,
4176 struct btrfs_disk_key disk_key;
4177 struct extent_buffer *l;
4181 struct extent_buffer *right;
4185 int num_doubles = 0;
4186 int tried_avoid_double = 0;
4189 slot = path->slots[0];
4190 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4191 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4194 /* first try to make some room by pushing left and right */
4195 if (data_size && path->nodes[1]) {
4196 int space_needed = data_size;
4198 if (slot < btrfs_header_nritems(l))
4199 space_needed -= btrfs_leaf_free_space(root, l);
4201 wret = push_leaf_right(trans, root, path, space_needed,
4202 space_needed, 0, 0);
4206 wret = push_leaf_left(trans, root, path, space_needed,
4207 space_needed, 0, (u32)-1);
4213 /* did the pushes work? */
4214 if (btrfs_leaf_free_space(root, l) >= data_size)
4218 if (!path->nodes[1]) {
4219 ret = insert_new_root(trans, root, path, 1);
4226 slot = path->slots[0];
4227 nritems = btrfs_header_nritems(l);
4228 mid = (nritems + 1) / 2;
4232 leaf_space_used(l, mid, nritems - mid) + data_size >
4233 BTRFS_LEAF_DATA_SIZE(root)) {
4234 if (slot >= nritems) {
4238 if (mid != nritems &&
4239 leaf_space_used(l, mid, nritems - mid) +
4240 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4241 if (data_size && !tried_avoid_double)
4242 goto push_for_double;
4248 if (leaf_space_used(l, 0, mid) + data_size >
4249 BTRFS_LEAF_DATA_SIZE(root)) {
4250 if (!extend && data_size && slot == 0) {
4252 } else if ((extend || !data_size) && slot == 0) {
4256 if (mid != nritems &&
4257 leaf_space_used(l, mid, nritems - mid) +
4258 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4259 if (data_size && !tried_avoid_double)
4260 goto push_for_double;
4268 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4270 btrfs_item_key(l, &disk_key, mid);
4272 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4273 &disk_key, 0, l->start, 0);
4275 return PTR_ERR(right);
4277 root_add_used(root, root->nodesize);
4279 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4280 btrfs_set_header_bytenr(right, right->start);
4281 btrfs_set_header_generation(right, trans->transid);
4282 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4283 btrfs_set_header_owner(right, root->root_key.objectid);
4284 btrfs_set_header_level(right, 0);
4285 write_extent_buffer(right, root->fs_info->fsid,
4286 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4288 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4289 btrfs_header_chunk_tree_uuid(right),
4294 btrfs_set_header_nritems(right, 0);
4295 insert_ptr(trans, root, path, &disk_key, right->start,
4296 path->slots[1] + 1, 1);
4297 btrfs_tree_unlock(path->nodes[0]);
4298 free_extent_buffer(path->nodes[0]);
4299 path->nodes[0] = right;
4301 path->slots[1] += 1;
4303 btrfs_set_header_nritems(right, 0);
4304 insert_ptr(trans, root, path, &disk_key, right->start,
4306 btrfs_tree_unlock(path->nodes[0]);
4307 free_extent_buffer(path->nodes[0]);
4308 path->nodes[0] = right;
4310 if (path->slots[1] == 0)
4311 fixup_low_keys(root, path, &disk_key, 1);
4313 btrfs_mark_buffer_dirty(right);
4317 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4320 BUG_ON(num_doubles != 0);
4328 push_for_double_split(trans, root, path, data_size);
4329 tried_avoid_double = 1;
4330 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4335 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4336 struct btrfs_root *root,
4337 struct btrfs_path *path, int ins_len)
4339 struct btrfs_key key;
4340 struct extent_buffer *leaf;
4341 struct btrfs_file_extent_item *fi;
4346 leaf = path->nodes[0];
4347 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4349 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4350 key.type != BTRFS_EXTENT_CSUM_KEY);
4352 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4355 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4356 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4357 fi = btrfs_item_ptr(leaf, path->slots[0],
4358 struct btrfs_file_extent_item);
4359 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4361 btrfs_release_path(path);
4363 path->keep_locks = 1;
4364 path->search_for_split = 1;
4365 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4366 path->search_for_split = 0;
4371 leaf = path->nodes[0];
4372 /* if our item isn't there or got smaller, return now */
4373 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4376 /* the leaf has changed, it now has room. return now */
4377 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4380 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4381 fi = btrfs_item_ptr(leaf, path->slots[0],
4382 struct btrfs_file_extent_item);
4383 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4387 btrfs_set_path_blocking(path);
4388 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4392 path->keep_locks = 0;
4393 btrfs_unlock_up_safe(path, 1);
4396 path->keep_locks = 0;
4400 static noinline int split_item(struct btrfs_trans_handle *trans,
4401 struct btrfs_root *root,
4402 struct btrfs_path *path,
4403 struct btrfs_key *new_key,
4404 unsigned long split_offset)
4406 struct extent_buffer *leaf;
4407 struct btrfs_item *item;
4408 struct btrfs_item *new_item;
4414 struct btrfs_disk_key disk_key;
4416 leaf = path->nodes[0];
4417 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4419 btrfs_set_path_blocking(path);
4421 item = btrfs_item_nr(path->slots[0]);
4422 orig_offset = btrfs_item_offset(leaf, item);
4423 item_size = btrfs_item_size(leaf, item);
4425 buf = kmalloc(item_size, GFP_NOFS);
4429 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4430 path->slots[0]), item_size);
4432 slot = path->slots[0] + 1;
4433 nritems = btrfs_header_nritems(leaf);
4434 if (slot != nritems) {
4435 /* shift the items */
4436 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4437 btrfs_item_nr_offset(slot),
4438 (nritems - slot) * sizeof(struct btrfs_item));
4441 btrfs_cpu_key_to_disk(&disk_key, new_key);
4442 btrfs_set_item_key(leaf, &disk_key, slot);
4444 new_item = btrfs_item_nr(slot);
4446 btrfs_set_item_offset(leaf, new_item, orig_offset);
4447 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4449 btrfs_set_item_offset(leaf, item,
4450 orig_offset + item_size - split_offset);
4451 btrfs_set_item_size(leaf, item, split_offset);
4453 btrfs_set_header_nritems(leaf, nritems + 1);
4455 /* write the data for the start of the original item */
4456 write_extent_buffer(leaf, buf,
4457 btrfs_item_ptr_offset(leaf, path->slots[0]),
4460 /* write the data for the new item */
4461 write_extent_buffer(leaf, buf + split_offset,
4462 btrfs_item_ptr_offset(leaf, slot),
4463 item_size - split_offset);
4464 btrfs_mark_buffer_dirty(leaf);
4466 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4472 * This function splits a single item into two items,
4473 * giving 'new_key' to the new item and splitting the
4474 * old one at split_offset (from the start of the item).
4476 * The path may be released by this operation. After
4477 * the split, the path is pointing to the old item. The
4478 * new item is going to be in the same node as the old one.
4480 * Note, the item being split must be smaller enough to live alone on
4481 * a tree block with room for one extra struct btrfs_item
4483 * This allows us to split the item in place, keeping a lock on the
4484 * leaf the entire time.
4486 int btrfs_split_item(struct btrfs_trans_handle *trans,
4487 struct btrfs_root *root,
4488 struct btrfs_path *path,
4489 struct btrfs_key *new_key,
4490 unsigned long split_offset)
4493 ret = setup_leaf_for_split(trans, root, path,
4494 sizeof(struct btrfs_item));
4498 ret = split_item(trans, root, path, new_key, split_offset);
4503 * This function duplicate a item, giving 'new_key' to the new item.
4504 * It guarantees both items live in the same tree leaf and the new item
4505 * is contiguous with the original item.
4507 * This allows us to split file extent in place, keeping a lock on the
4508 * leaf the entire time.
4510 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4511 struct btrfs_root *root,
4512 struct btrfs_path *path,
4513 struct btrfs_key *new_key)
4515 struct extent_buffer *leaf;
4519 leaf = path->nodes[0];
4520 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4521 ret = setup_leaf_for_split(trans, root, path,
4522 item_size + sizeof(struct btrfs_item));
4527 setup_items_for_insert(root, path, new_key, &item_size,
4528 item_size, item_size +
4529 sizeof(struct btrfs_item), 1);
4530 leaf = path->nodes[0];
4531 memcpy_extent_buffer(leaf,
4532 btrfs_item_ptr_offset(leaf, path->slots[0]),
4533 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4539 * make the item pointed to by the path smaller. new_size indicates
4540 * how small to make it, and from_end tells us if we just chop bytes
4541 * off the end of the item or if we shift the item to chop bytes off
4544 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4545 u32 new_size, int from_end)
4548 struct extent_buffer *leaf;
4549 struct btrfs_item *item;
4551 unsigned int data_end;
4552 unsigned int old_data_start;
4553 unsigned int old_size;
4554 unsigned int size_diff;
4556 struct btrfs_map_token token;
4558 btrfs_init_map_token(&token);
4560 leaf = path->nodes[0];
4561 slot = path->slots[0];
4563 old_size = btrfs_item_size_nr(leaf, slot);
4564 if (old_size == new_size)
4567 nritems = btrfs_header_nritems(leaf);
4568 data_end = leaf_data_end(root, leaf);
4570 old_data_start = btrfs_item_offset_nr(leaf, slot);
4572 size_diff = old_size - new_size;
4575 BUG_ON(slot >= nritems);
4578 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4580 /* first correct the data pointers */
4581 for (i = slot; i < nritems; i++) {
4583 item = btrfs_item_nr(i);
4585 ioff = btrfs_token_item_offset(leaf, item, &token);
4586 btrfs_set_token_item_offset(leaf, item,
4587 ioff + size_diff, &token);
4590 /* shift the data */
4592 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4593 data_end + size_diff, btrfs_leaf_data(leaf) +
4594 data_end, old_data_start + new_size - data_end);
4596 struct btrfs_disk_key disk_key;
4599 btrfs_item_key(leaf, &disk_key, slot);
4601 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4603 struct btrfs_file_extent_item *fi;
4605 fi = btrfs_item_ptr(leaf, slot,
4606 struct btrfs_file_extent_item);
4607 fi = (struct btrfs_file_extent_item *)(
4608 (unsigned long)fi - size_diff);
4610 if (btrfs_file_extent_type(leaf, fi) ==
4611 BTRFS_FILE_EXTENT_INLINE) {
4612 ptr = btrfs_item_ptr_offset(leaf, slot);
4613 memmove_extent_buffer(leaf, ptr,
4615 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4619 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4620 data_end + size_diff, btrfs_leaf_data(leaf) +
4621 data_end, old_data_start - data_end);
4623 offset = btrfs_disk_key_offset(&disk_key);
4624 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4625 btrfs_set_item_key(leaf, &disk_key, slot);
4627 fixup_low_keys(root, path, &disk_key, 1);
4630 item = btrfs_item_nr(slot);
4631 btrfs_set_item_size(leaf, item, new_size);
4632 btrfs_mark_buffer_dirty(leaf);
4634 if (btrfs_leaf_free_space(root, leaf) < 0) {
4635 btrfs_print_leaf(root, leaf);
4641 * make the item pointed to by the path bigger, data_size is the added size.
4643 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4647 struct extent_buffer *leaf;
4648 struct btrfs_item *item;
4650 unsigned int data_end;
4651 unsigned int old_data;
4652 unsigned int old_size;
4654 struct btrfs_map_token token;
4656 btrfs_init_map_token(&token);
4658 leaf = path->nodes[0];
4660 nritems = btrfs_header_nritems(leaf);
4661 data_end = leaf_data_end(root, leaf);
4663 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4664 btrfs_print_leaf(root, leaf);
4667 slot = path->slots[0];
4668 old_data = btrfs_item_end_nr(leaf, slot);
4671 if (slot >= nritems) {
4672 btrfs_print_leaf(root, leaf);
4673 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4679 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4681 /* first correct the data pointers */
4682 for (i = slot; i < nritems; i++) {
4684 item = btrfs_item_nr(i);
4686 ioff = btrfs_token_item_offset(leaf, item, &token);
4687 btrfs_set_token_item_offset(leaf, item,
4688 ioff - data_size, &token);
4691 /* shift the data */
4692 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4693 data_end - data_size, btrfs_leaf_data(leaf) +
4694 data_end, old_data - data_end);
4696 data_end = old_data;
4697 old_size = btrfs_item_size_nr(leaf, slot);
4698 item = btrfs_item_nr(slot);
4699 btrfs_set_item_size(leaf, item, old_size + data_size);
4700 btrfs_mark_buffer_dirty(leaf);
4702 if (btrfs_leaf_free_space(root, leaf) < 0) {
4703 btrfs_print_leaf(root, leaf);
4709 * this is a helper for btrfs_insert_empty_items, the main goal here is
4710 * to save stack depth by doing the bulk of the work in a function
4711 * that doesn't call btrfs_search_slot
4713 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4714 struct btrfs_key *cpu_key, u32 *data_size,
4715 u32 total_data, u32 total_size, int nr)
4717 struct btrfs_item *item;
4720 unsigned int data_end;
4721 struct btrfs_disk_key disk_key;
4722 struct extent_buffer *leaf;
4724 struct btrfs_map_token token;
4726 if (path->slots[0] == 0) {
4727 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4728 fixup_low_keys(root, path, &disk_key, 1);
4730 btrfs_unlock_up_safe(path, 1);
4732 btrfs_init_map_token(&token);
4734 leaf = path->nodes[0];
4735 slot = path->slots[0];
4737 nritems = btrfs_header_nritems(leaf);
4738 data_end = leaf_data_end(root, leaf);
4740 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4741 btrfs_print_leaf(root, leaf);
4742 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4743 total_size, btrfs_leaf_free_space(root, leaf));
4747 if (slot != nritems) {
4748 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4750 if (old_data < data_end) {
4751 btrfs_print_leaf(root, leaf);
4752 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4753 slot, old_data, data_end);
4757 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4759 /* first correct the data pointers */
4760 for (i = slot; i < nritems; i++) {
4763 item = btrfs_item_nr( i);
4764 ioff = btrfs_token_item_offset(leaf, item, &token);
4765 btrfs_set_token_item_offset(leaf, item,
4766 ioff - total_data, &token);
4768 /* shift the items */
4769 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4770 btrfs_item_nr_offset(slot),
4771 (nritems - slot) * sizeof(struct btrfs_item));
4773 /* shift the data */
4774 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4775 data_end - total_data, btrfs_leaf_data(leaf) +
4776 data_end, old_data - data_end);
4777 data_end = old_data;
4780 /* setup the item for the new data */
4781 for (i = 0; i < nr; i++) {
4782 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4783 btrfs_set_item_key(leaf, &disk_key, slot + i);
4784 item = btrfs_item_nr(slot + i);
4785 btrfs_set_token_item_offset(leaf, item,
4786 data_end - data_size[i], &token);
4787 data_end -= data_size[i];
4788 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4791 btrfs_set_header_nritems(leaf, nritems + nr);
4792 btrfs_mark_buffer_dirty(leaf);
4794 if (btrfs_leaf_free_space(root, leaf) < 0) {
4795 btrfs_print_leaf(root, leaf);
4801 * Given a key and some data, insert items into the tree.
4802 * This does all the path init required, making room in the tree if needed.
4804 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4805 struct btrfs_root *root,
4806 struct btrfs_path *path,
4807 struct btrfs_key *cpu_key, u32 *data_size,
4816 for (i = 0; i < nr; i++)
4817 total_data += data_size[i];
4819 total_size = total_data + (nr * sizeof(struct btrfs_item));
4820 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4826 slot = path->slots[0];
4829 setup_items_for_insert(root, path, cpu_key, data_size,
4830 total_data, total_size, nr);
4835 * Given a key and some data, insert an item into the tree.
4836 * This does all the path init required, making room in the tree if needed.
4838 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4839 *root, struct btrfs_key *cpu_key, void *data, u32
4843 struct btrfs_path *path;
4844 struct extent_buffer *leaf;
4847 path = btrfs_alloc_path();
4850 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4852 leaf = path->nodes[0];
4853 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4854 write_extent_buffer(leaf, data, ptr, data_size);
4855 btrfs_mark_buffer_dirty(leaf);
4857 btrfs_free_path(path);
4862 * delete the pointer from a given node.
4864 * the tree should have been previously balanced so the deletion does not
4867 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4868 int level, int slot)
4870 struct extent_buffer *parent = path->nodes[level];
4874 nritems = btrfs_header_nritems(parent);
4875 if (slot != nritems - 1) {
4877 tree_mod_log_eb_move(root->fs_info, parent, slot,
4878 slot + 1, nritems - slot - 1);
4879 memmove_extent_buffer(parent,
4880 btrfs_node_key_ptr_offset(slot),
4881 btrfs_node_key_ptr_offset(slot + 1),
4882 sizeof(struct btrfs_key_ptr) *
4883 (nritems - slot - 1));
4885 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4886 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4891 btrfs_set_header_nritems(parent, nritems);
4892 if (nritems == 0 && parent == root->node) {
4893 BUG_ON(btrfs_header_level(root->node) != 1);
4894 /* just turn the root into a leaf and break */
4895 btrfs_set_header_level(root->node, 0);
4896 } else if (slot == 0) {
4897 struct btrfs_disk_key disk_key;
4899 btrfs_node_key(parent, &disk_key, 0);
4900 fixup_low_keys(root, path, &disk_key, level + 1);
4902 btrfs_mark_buffer_dirty(parent);
4906 * a helper function to delete the leaf pointed to by path->slots[1] and
4909 * This deletes the pointer in path->nodes[1] and frees the leaf
4910 * block extent. zero is returned if it all worked out, < 0 otherwise.
4912 * The path must have already been setup for deleting the leaf, including
4913 * all the proper balancing. path->nodes[1] must be locked.
4915 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4916 struct btrfs_root *root,
4917 struct btrfs_path *path,
4918 struct extent_buffer *leaf)
4920 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4921 del_ptr(root, path, 1, path->slots[1]);
4924 * btrfs_free_extent is expensive, we want to make sure we
4925 * aren't holding any locks when we call it
4927 btrfs_unlock_up_safe(path, 0);
4929 root_sub_used(root, leaf->len);
4931 extent_buffer_get(leaf);
4932 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4933 free_extent_buffer_stale(leaf);
4936 * delete the item at the leaf level in path. If that empties
4937 * the leaf, remove it from the tree
4939 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4940 struct btrfs_path *path, int slot, int nr)
4942 struct extent_buffer *leaf;
4943 struct btrfs_item *item;
4950 struct btrfs_map_token token;
4952 btrfs_init_map_token(&token);
4954 leaf = path->nodes[0];
4955 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4957 for (i = 0; i < nr; i++)
4958 dsize += btrfs_item_size_nr(leaf, slot + i);
4960 nritems = btrfs_header_nritems(leaf);
4962 if (slot + nr != nritems) {
4963 int data_end = leaf_data_end(root, leaf);
4965 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4967 btrfs_leaf_data(leaf) + data_end,
4968 last_off - data_end);
4970 for (i = slot + nr; i < nritems; i++) {
4973 item = btrfs_item_nr(i);
4974 ioff = btrfs_token_item_offset(leaf, item, &token);
4975 btrfs_set_token_item_offset(leaf, item,
4976 ioff + dsize, &token);
4979 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4980 btrfs_item_nr_offset(slot + nr),
4981 sizeof(struct btrfs_item) *
4982 (nritems - slot - nr));
4984 btrfs_set_header_nritems(leaf, nritems - nr);
4987 /* delete the leaf if we've emptied it */
4989 if (leaf == root->node) {
4990 btrfs_set_header_level(leaf, 0);
4992 btrfs_set_path_blocking(path);
4993 clean_tree_block(trans, root, leaf);
4994 btrfs_del_leaf(trans, root, path, leaf);
4997 int used = leaf_space_used(leaf, 0, nritems);
4999 struct btrfs_disk_key disk_key;
5001 btrfs_item_key(leaf, &disk_key, 0);
5002 fixup_low_keys(root, path, &disk_key, 1);
5005 /* delete the leaf if it is mostly empty */
5006 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5007 /* push_leaf_left fixes the path.
5008 * make sure the path still points to our leaf
5009 * for possible call to del_ptr below
5011 slot = path->slots[1];
5012 extent_buffer_get(leaf);
5014 btrfs_set_path_blocking(path);
5015 wret = push_leaf_left(trans, root, path, 1, 1,
5017 if (wret < 0 && wret != -ENOSPC)
5020 if (path->nodes[0] == leaf &&
5021 btrfs_header_nritems(leaf)) {
5022 wret = push_leaf_right(trans, root, path, 1,
5024 if (wret < 0 && wret != -ENOSPC)
5028 if (btrfs_header_nritems(leaf) == 0) {
5029 path->slots[1] = slot;
5030 btrfs_del_leaf(trans, root, path, leaf);
5031 free_extent_buffer(leaf);
5034 /* if we're still in the path, make sure
5035 * we're dirty. Otherwise, one of the
5036 * push_leaf functions must have already
5037 * dirtied this buffer
5039 if (path->nodes[0] == leaf)
5040 btrfs_mark_buffer_dirty(leaf);
5041 free_extent_buffer(leaf);
5044 btrfs_mark_buffer_dirty(leaf);
5051 * search the tree again to find a leaf with lesser keys
5052 * returns 0 if it found something or 1 if there are no lesser leaves.
5053 * returns < 0 on io errors.
5055 * This may release the path, and so you may lose any locks held at the
5058 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5060 struct btrfs_key key;
5061 struct btrfs_disk_key found_key;
5064 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5066 if (key.offset > 0) {
5068 } else if (key.type > 0) {
5070 key.offset = (u64)-1;
5071 } else if (key.objectid > 0) {
5074 key.offset = (u64)-1;
5079 btrfs_release_path(path);
5080 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5083 btrfs_item_key(path->nodes[0], &found_key, 0);
5084 ret = comp_keys(&found_key, &key);
5086 * We might have had an item with the previous key in the tree right
5087 * before we released our path. And after we released our path, that
5088 * item might have been pushed to the first slot (0) of the leaf we
5089 * were holding due to a tree balance. Alternatively, an item with the
5090 * previous key can exist as the only element of a leaf (big fat item).
5091 * Therefore account for these 2 cases, so that our callers (like
5092 * btrfs_previous_item) don't miss an existing item with a key matching
5093 * the previous key we computed above.
5101 * A helper function to walk down the tree starting at min_key, and looking
5102 * for nodes or leaves that are have a minimum transaction id.
5103 * This is used by the btree defrag code, and tree logging
5105 * This does not cow, but it does stuff the starting key it finds back
5106 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5107 * key and get a writable path.
5109 * This does lock as it descends, and path->keep_locks should be set
5110 * to 1 by the caller.
5112 * This honors path->lowest_level to prevent descent past a given level
5115 * min_trans indicates the oldest transaction that you are interested
5116 * in walking through. Any nodes or leaves older than min_trans are
5117 * skipped over (without reading them).
5119 * returns zero if something useful was found, < 0 on error and 1 if there
5120 * was nothing in the tree that matched the search criteria.
5122 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5123 struct btrfs_path *path,
5126 struct extent_buffer *cur;
5127 struct btrfs_key found_key;
5133 int keep_locks = path->keep_locks;
5135 path->keep_locks = 1;
5137 cur = btrfs_read_lock_root_node(root);
5138 level = btrfs_header_level(cur);
5139 WARN_ON(path->nodes[level]);
5140 path->nodes[level] = cur;
5141 path->locks[level] = BTRFS_READ_LOCK;
5143 if (btrfs_header_generation(cur) < min_trans) {
5148 nritems = btrfs_header_nritems(cur);
5149 level = btrfs_header_level(cur);
5150 sret = bin_search(cur, min_key, level, &slot);
5152 /* at the lowest level, we're done, setup the path and exit */
5153 if (level == path->lowest_level) {
5154 if (slot >= nritems)
5157 path->slots[level] = slot;
5158 btrfs_item_key_to_cpu(cur, &found_key, slot);
5161 if (sret && slot > 0)
5164 * check this node pointer against the min_trans parameters.
5165 * If it is too old, old, skip to the next one.
5167 while (slot < nritems) {
5170 gen = btrfs_node_ptr_generation(cur, slot);
5171 if (gen < min_trans) {
5179 * we didn't find a candidate key in this node, walk forward
5180 * and find another one
5182 if (slot >= nritems) {
5183 path->slots[level] = slot;
5184 btrfs_set_path_blocking(path);
5185 sret = btrfs_find_next_key(root, path, min_key, level,
5188 btrfs_release_path(path);
5194 /* save our key for returning back */
5195 btrfs_node_key_to_cpu(cur, &found_key, slot);
5196 path->slots[level] = slot;
5197 if (level == path->lowest_level) {
5201 btrfs_set_path_blocking(path);
5202 cur = read_node_slot(root, cur, slot);
5203 BUG_ON(!cur); /* -ENOMEM */
5205 btrfs_tree_read_lock(cur);
5207 path->locks[level - 1] = BTRFS_READ_LOCK;
5208 path->nodes[level - 1] = cur;
5209 unlock_up(path, level, 1, 0, NULL);
5210 btrfs_clear_path_blocking(path, NULL, 0);
5213 path->keep_locks = keep_locks;
5215 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5216 btrfs_set_path_blocking(path);
5217 memcpy(min_key, &found_key, sizeof(found_key));
5222 static void tree_move_down(struct btrfs_root *root,
5223 struct btrfs_path *path,
5224 int *level, int root_level)
5226 BUG_ON(*level == 0);
5227 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5228 path->slots[*level]);
5229 path->slots[*level - 1] = 0;
5233 static int tree_move_next_or_upnext(struct btrfs_root *root,
5234 struct btrfs_path *path,
5235 int *level, int root_level)
5239 nritems = btrfs_header_nritems(path->nodes[*level]);
5241 path->slots[*level]++;
5243 while (path->slots[*level] >= nritems) {
5244 if (*level == root_level)
5248 path->slots[*level] = 0;
5249 free_extent_buffer(path->nodes[*level]);
5250 path->nodes[*level] = NULL;
5252 path->slots[*level]++;
5254 nritems = btrfs_header_nritems(path->nodes[*level]);
5261 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5264 static int tree_advance(struct btrfs_root *root,
5265 struct btrfs_path *path,
5266 int *level, int root_level,
5268 struct btrfs_key *key)
5272 if (*level == 0 || !allow_down) {
5273 ret = tree_move_next_or_upnext(root, path, level, root_level);
5275 tree_move_down(root, path, level, root_level);
5280 btrfs_item_key_to_cpu(path->nodes[*level], key,
5281 path->slots[*level]);
5283 btrfs_node_key_to_cpu(path->nodes[*level], key,
5284 path->slots[*level]);
5289 static int tree_compare_item(struct btrfs_root *left_root,
5290 struct btrfs_path *left_path,
5291 struct btrfs_path *right_path,
5296 unsigned long off1, off2;
5298 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5299 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5303 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5304 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5305 right_path->slots[0]);
5307 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5309 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5316 #define ADVANCE_ONLY_NEXT -1
5319 * This function compares two trees and calls the provided callback for
5320 * every changed/new/deleted item it finds.
5321 * If shared tree blocks are encountered, whole subtrees are skipped, making
5322 * the compare pretty fast on snapshotted subvolumes.
5324 * This currently works on commit roots only. As commit roots are read only,
5325 * we don't do any locking. The commit roots are protected with transactions.
5326 * Transactions are ended and rejoined when a commit is tried in between.
5328 * This function checks for modifications done to the trees while comparing.
5329 * If it detects a change, it aborts immediately.
5331 int btrfs_compare_trees(struct btrfs_root *left_root,
5332 struct btrfs_root *right_root,
5333 btrfs_changed_cb_t changed_cb, void *ctx)
5337 struct btrfs_path *left_path = NULL;
5338 struct btrfs_path *right_path = NULL;
5339 struct btrfs_key left_key;
5340 struct btrfs_key right_key;
5341 char *tmp_buf = NULL;
5342 int left_root_level;
5343 int right_root_level;
5346 int left_end_reached;
5347 int right_end_reached;
5355 left_path = btrfs_alloc_path();
5360 right_path = btrfs_alloc_path();
5366 tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
5372 left_path->search_commit_root = 1;
5373 left_path->skip_locking = 1;
5374 right_path->search_commit_root = 1;
5375 right_path->skip_locking = 1;
5378 * Strategy: Go to the first items of both trees. Then do
5380 * If both trees are at level 0
5381 * Compare keys of current items
5382 * If left < right treat left item as new, advance left tree
5384 * If left > right treat right item as deleted, advance right tree
5386 * If left == right do deep compare of items, treat as changed if
5387 * needed, advance both trees and repeat
5388 * If both trees are at the same level but not at level 0
5389 * Compare keys of current nodes/leafs
5390 * If left < right advance left tree and repeat
5391 * If left > right advance right tree and repeat
5392 * If left == right compare blockptrs of the next nodes/leafs
5393 * If they match advance both trees but stay at the same level
5395 * If they don't match advance both trees while allowing to go
5397 * If tree levels are different
5398 * Advance the tree that needs it and repeat
5400 * Advancing a tree means:
5401 * If we are at level 0, try to go to the next slot. If that's not
5402 * possible, go one level up and repeat. Stop when we found a level
5403 * where we could go to the next slot. We may at this point be on a
5406 * If we are not at level 0 and not on shared tree blocks, go one
5409 * If we are not at level 0 and on shared tree blocks, go one slot to
5410 * the right if possible or go up and right.
5413 down_read(&left_root->fs_info->commit_root_sem);
5414 left_level = btrfs_header_level(left_root->commit_root);
5415 left_root_level = left_level;
5416 left_path->nodes[left_level] = left_root->commit_root;
5417 extent_buffer_get(left_path->nodes[left_level]);
5419 right_level = btrfs_header_level(right_root->commit_root);
5420 right_root_level = right_level;
5421 right_path->nodes[right_level] = right_root->commit_root;
5422 extent_buffer_get(right_path->nodes[right_level]);
5423 up_read(&left_root->fs_info->commit_root_sem);
5425 if (left_level == 0)
5426 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5427 &left_key, left_path->slots[left_level]);
5429 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5430 &left_key, left_path->slots[left_level]);
5431 if (right_level == 0)
5432 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5433 &right_key, right_path->slots[right_level]);
5435 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5436 &right_key, right_path->slots[right_level]);
5438 left_end_reached = right_end_reached = 0;
5439 advance_left = advance_right = 0;
5442 if (advance_left && !left_end_reached) {
5443 ret = tree_advance(left_root, left_path, &left_level,
5445 advance_left != ADVANCE_ONLY_NEXT,
5448 left_end_reached = ADVANCE;
5451 if (advance_right && !right_end_reached) {
5452 ret = tree_advance(right_root, right_path, &right_level,
5454 advance_right != ADVANCE_ONLY_NEXT,
5457 right_end_reached = ADVANCE;
5461 if (left_end_reached && right_end_reached) {
5464 } else if (left_end_reached) {
5465 if (right_level == 0) {
5466 ret = changed_cb(left_root, right_root,
5467 left_path, right_path,
5469 BTRFS_COMPARE_TREE_DELETED,
5474 advance_right = ADVANCE;
5476 } else if (right_end_reached) {
5477 if (left_level == 0) {
5478 ret = changed_cb(left_root, right_root,
5479 left_path, right_path,
5481 BTRFS_COMPARE_TREE_NEW,
5486 advance_left = ADVANCE;
5490 if (left_level == 0 && right_level == 0) {
5491 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5493 ret = changed_cb(left_root, right_root,
5494 left_path, right_path,
5496 BTRFS_COMPARE_TREE_NEW,
5500 advance_left = ADVANCE;
5501 } else if (cmp > 0) {
5502 ret = changed_cb(left_root, right_root,
5503 left_path, right_path,
5505 BTRFS_COMPARE_TREE_DELETED,
5509 advance_right = ADVANCE;
5511 enum btrfs_compare_tree_result result;
5513 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5514 ret = tree_compare_item(left_root, left_path,
5515 right_path, tmp_buf);
5517 result = BTRFS_COMPARE_TREE_CHANGED;
5519 result = BTRFS_COMPARE_TREE_SAME;
5520 ret = changed_cb(left_root, right_root,
5521 left_path, right_path,
5522 &left_key, result, ctx);
5525 advance_left = ADVANCE;
5526 advance_right = ADVANCE;
5528 } else if (left_level == right_level) {
5529 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5531 advance_left = ADVANCE;
5532 } else if (cmp > 0) {
5533 advance_right = ADVANCE;
5535 left_blockptr = btrfs_node_blockptr(
5536 left_path->nodes[left_level],
5537 left_path->slots[left_level]);
5538 right_blockptr = btrfs_node_blockptr(
5539 right_path->nodes[right_level],
5540 right_path->slots[right_level]);
5541 left_gen = btrfs_node_ptr_generation(
5542 left_path->nodes[left_level],
5543 left_path->slots[left_level]);
5544 right_gen = btrfs_node_ptr_generation(
5545 right_path->nodes[right_level],
5546 right_path->slots[right_level]);
5547 if (left_blockptr == right_blockptr &&
5548 left_gen == right_gen) {
5550 * As we're on a shared block, don't
5551 * allow to go deeper.
5553 advance_left = ADVANCE_ONLY_NEXT;
5554 advance_right = ADVANCE_ONLY_NEXT;
5556 advance_left = ADVANCE;
5557 advance_right = ADVANCE;
5560 } else if (left_level < right_level) {
5561 advance_right = ADVANCE;
5563 advance_left = ADVANCE;
5568 btrfs_free_path(left_path);
5569 btrfs_free_path(right_path);
5575 * this is similar to btrfs_next_leaf, but does not try to preserve
5576 * and fixup the path. It looks for and returns the next key in the
5577 * tree based on the current path and the min_trans parameters.
5579 * 0 is returned if another key is found, < 0 if there are any errors
5580 * and 1 is returned if there are no higher keys in the tree
5582 * path->keep_locks should be set to 1 on the search made before
5583 * calling this function.
5585 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5586 struct btrfs_key *key, int level, u64 min_trans)
5589 struct extent_buffer *c;
5591 WARN_ON(!path->keep_locks);
5592 while (level < BTRFS_MAX_LEVEL) {
5593 if (!path->nodes[level])
5596 slot = path->slots[level] + 1;
5597 c = path->nodes[level];
5599 if (slot >= btrfs_header_nritems(c)) {
5602 struct btrfs_key cur_key;
5603 if (level + 1 >= BTRFS_MAX_LEVEL ||
5604 !path->nodes[level + 1])
5607 if (path->locks[level + 1]) {
5612 slot = btrfs_header_nritems(c) - 1;
5614 btrfs_item_key_to_cpu(c, &cur_key, slot);
5616 btrfs_node_key_to_cpu(c, &cur_key, slot);
5618 orig_lowest = path->lowest_level;
5619 btrfs_release_path(path);
5620 path->lowest_level = level;
5621 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5623 path->lowest_level = orig_lowest;
5627 c = path->nodes[level];
5628 slot = path->slots[level];
5635 btrfs_item_key_to_cpu(c, key, slot);
5637 u64 gen = btrfs_node_ptr_generation(c, slot);
5639 if (gen < min_trans) {
5643 btrfs_node_key_to_cpu(c, key, slot);
5651 * search the tree again to find a leaf with greater keys
5652 * returns 0 if it found something or 1 if there are no greater leaves.
5653 * returns < 0 on io errors.
5655 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5657 return btrfs_next_old_leaf(root, path, 0);
5660 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5665 struct extent_buffer *c;
5666 struct extent_buffer *next;
5667 struct btrfs_key key;
5670 int old_spinning = path->leave_spinning;
5671 int next_rw_lock = 0;
5673 nritems = btrfs_header_nritems(path->nodes[0]);
5677 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5682 btrfs_release_path(path);
5684 path->keep_locks = 1;
5685 path->leave_spinning = 1;
5688 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5690 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5691 path->keep_locks = 0;
5696 nritems = btrfs_header_nritems(path->nodes[0]);
5698 * by releasing the path above we dropped all our locks. A balance
5699 * could have added more items next to the key that used to be
5700 * at the very end of the block. So, check again here and
5701 * advance the path if there are now more items available.
5703 if (nritems > 0 && path->slots[0] < nritems - 1) {
5710 * So the above check misses one case:
5711 * - after releasing the path above, someone has removed the item that
5712 * used to be at the very end of the block, and balance between leafs
5713 * gets another one with bigger key.offset to replace it.
5715 * This one should be returned as well, or we can get leaf corruption
5716 * later(esp. in __btrfs_drop_extents()).
5718 * And a bit more explanation about this check,
5719 * with ret > 0, the key isn't found, the path points to the slot
5720 * where it should be inserted, so the path->slots[0] item must be the
5723 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5728 while (level < BTRFS_MAX_LEVEL) {
5729 if (!path->nodes[level]) {
5734 slot = path->slots[level] + 1;
5735 c = path->nodes[level];
5736 if (slot >= btrfs_header_nritems(c)) {
5738 if (level == BTRFS_MAX_LEVEL) {
5746 btrfs_tree_unlock_rw(next, next_rw_lock);
5747 free_extent_buffer(next);
5751 next_rw_lock = path->locks[level];
5752 ret = read_block_for_search(NULL, root, path, &next, level,
5758 btrfs_release_path(path);
5762 if (!path->skip_locking) {
5763 ret = btrfs_try_tree_read_lock(next);
5764 if (!ret && time_seq) {
5766 * If we don't get the lock, we may be racing
5767 * with push_leaf_left, holding that lock while
5768 * itself waiting for the leaf we've currently
5769 * locked. To solve this situation, we give up
5770 * on our lock and cycle.
5772 free_extent_buffer(next);
5773 btrfs_release_path(path);
5778 btrfs_set_path_blocking(path);
5779 btrfs_tree_read_lock(next);
5780 btrfs_clear_path_blocking(path, next,
5783 next_rw_lock = BTRFS_READ_LOCK;
5787 path->slots[level] = slot;
5790 c = path->nodes[level];
5791 if (path->locks[level])
5792 btrfs_tree_unlock_rw(c, path->locks[level]);
5794 free_extent_buffer(c);
5795 path->nodes[level] = next;
5796 path->slots[level] = 0;
5797 if (!path->skip_locking)
5798 path->locks[level] = next_rw_lock;
5802 ret = read_block_for_search(NULL, root, path, &next, level,
5808 btrfs_release_path(path);
5812 if (!path->skip_locking) {
5813 ret = btrfs_try_tree_read_lock(next);
5815 btrfs_set_path_blocking(path);
5816 btrfs_tree_read_lock(next);
5817 btrfs_clear_path_blocking(path, next,
5820 next_rw_lock = BTRFS_READ_LOCK;
5825 unlock_up(path, 0, 1, 0, NULL);
5826 path->leave_spinning = old_spinning;
5828 btrfs_set_path_blocking(path);
5834 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5835 * searching until it gets past min_objectid or finds an item of 'type'
5837 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5839 int btrfs_previous_item(struct btrfs_root *root,
5840 struct btrfs_path *path, u64 min_objectid,
5843 struct btrfs_key found_key;
5844 struct extent_buffer *leaf;
5849 if (path->slots[0] == 0) {
5850 btrfs_set_path_blocking(path);
5851 ret = btrfs_prev_leaf(root, path);
5857 leaf = path->nodes[0];
5858 nritems = btrfs_header_nritems(leaf);
5861 if (path->slots[0] == nritems)
5864 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5865 if (found_key.objectid < min_objectid)
5867 if (found_key.type == type)
5869 if (found_key.objectid == min_objectid &&
5870 found_key.type < type)
5877 * search in extent tree to find a previous Metadata/Data extent item with
5880 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5882 int btrfs_previous_extent_item(struct btrfs_root *root,
5883 struct btrfs_path *path, u64 min_objectid)
5885 struct btrfs_key found_key;
5886 struct extent_buffer *leaf;
5891 if (path->slots[0] == 0) {
5892 btrfs_set_path_blocking(path);
5893 ret = btrfs_prev_leaf(root, path);
5899 leaf = path->nodes[0];
5900 nritems = btrfs_header_nritems(leaf);
5903 if (path->slots[0] == nritems)
5906 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5907 if (found_key.objectid < min_objectid)
5909 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5910 found_key.type == BTRFS_METADATA_ITEM_KEY)
5912 if (found_key.objectid == min_objectid &&
5913 found_key.type < BTRFS_EXTENT_ITEM_KEY)