2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
22 #include "transaction.h"
23 #include "print-tree.h"
26 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
27 *root, struct btrfs_path *path, int level);
28 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_key *ins_key,
30 struct btrfs_path *path, int data_size, int extend);
31 static int push_node_left(struct btrfs_trans_handle *trans,
32 struct btrfs_root *root, struct extent_buffer *dst,
33 struct extent_buffer *src, int empty);
34 static int balance_node_right(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root,
36 struct extent_buffer *dst_buf,
37 struct extent_buffer *src_buf);
38 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
39 struct btrfs_path *path, int level, int slot);
40 static int setup_items_for_insert(struct btrfs_trans_handle *trans,
41 struct btrfs_root *root, struct btrfs_path *path,
42 struct btrfs_key *cpu_key, u32 *data_size,
43 u32 total_data, u32 total_size, int nr);
46 struct btrfs_path *btrfs_alloc_path(void)
48 struct btrfs_path *path;
49 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
56 * set all locked nodes in the path to blocking locks. This should
57 * be done before scheduling
59 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
62 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
63 if (p->nodes[i] && p->locks[i])
64 btrfs_set_lock_blocking(p->nodes[i]);
69 * reset all the locked nodes in the patch to spinning locks.
71 * held is used to keep lockdep happy, when lockdep is enabled
72 * we set held to a blocking lock before we go around and
73 * retake all the spinlocks in the path. You can safely use NULL
76 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
77 struct extent_buffer *held)
81 #ifdef CONFIG_DEBUG_LOCK_ALLOC
82 /* lockdep really cares that we take all of these spinlocks
83 * in the right order. If any of the locks in the path are not
84 * currently blocking, it is going to complain. So, make really
85 * really sure by forcing the path to blocking before we clear
89 btrfs_set_lock_blocking(held);
90 btrfs_set_path_blocking(p);
93 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
94 if (p->nodes[i] && p->locks[i])
95 btrfs_clear_lock_blocking(p->nodes[i]);
98 #ifdef CONFIG_DEBUG_LOCK_ALLOC
100 btrfs_clear_lock_blocking(held);
104 /* this also releases the path */
105 void btrfs_free_path(struct btrfs_path *p)
107 btrfs_release_path(NULL, p);
108 kmem_cache_free(btrfs_path_cachep, p);
112 * path release drops references on the extent buffers in the path
113 * and it drops any locks held by this path
115 * It is safe to call this on paths that no locks or extent buffers held.
117 noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
121 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
126 btrfs_tree_unlock(p->nodes[i]);
129 free_extent_buffer(p->nodes[i]);
135 * safely gets a reference on the root node of a tree. A lock
136 * is not taken, so a concurrent writer may put a different node
137 * at the root of the tree. See btrfs_lock_root_node for the
140 * The extent buffer returned by this has a reference taken, so
141 * it won't disappear. It may stop being the root of the tree
142 * at any time because there are no locks held.
144 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
146 struct extent_buffer *eb;
147 spin_lock(&root->node_lock);
149 extent_buffer_get(eb);
150 spin_unlock(&root->node_lock);
154 /* loop around taking references on and locking the root node of the
155 * tree until you end up with a lock on the root. A locked buffer
156 * is returned, with a reference held.
158 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
160 struct extent_buffer *eb;
163 eb = btrfs_root_node(root);
166 spin_lock(&root->node_lock);
167 if (eb == root->node) {
168 spin_unlock(&root->node_lock);
171 spin_unlock(&root->node_lock);
173 btrfs_tree_unlock(eb);
174 free_extent_buffer(eb);
179 /* cowonly root (everything not a reference counted cow subvolume), just get
180 * put onto a simple dirty list. transaction.c walks this to make sure they
181 * get properly updated on disk.
183 static void add_root_to_dirty_list(struct btrfs_root *root)
185 if (root->track_dirty && list_empty(&root->dirty_list)) {
186 list_add(&root->dirty_list,
187 &root->fs_info->dirty_cowonly_roots);
192 * used by snapshot creation to make a copy of a root for a tree with
193 * a given objectid. The buffer with the new root node is returned in
194 * cow_ret, and this func returns zero on success or a negative error code.
196 int btrfs_copy_root(struct btrfs_trans_handle *trans,
197 struct btrfs_root *root,
198 struct extent_buffer *buf,
199 struct extent_buffer **cow_ret, u64 new_root_objectid)
201 struct extent_buffer *cow;
205 struct btrfs_disk_key disk_key;
207 WARN_ON(root->ref_cows && trans->transid !=
208 root->fs_info->running_transaction->transid);
209 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
211 level = btrfs_header_level(buf);
212 nritems = btrfs_header_nritems(buf);
214 btrfs_item_key(buf, &disk_key, 0);
216 btrfs_node_key(buf, &disk_key, 0);
218 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
219 new_root_objectid, &disk_key, level,
224 copy_extent_buffer(cow, buf, 0, 0, cow->len);
225 btrfs_set_header_bytenr(cow, cow->start);
226 btrfs_set_header_generation(cow, trans->transid);
227 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
228 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
229 BTRFS_HEADER_FLAG_RELOC);
230 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
231 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
233 btrfs_set_header_owner(cow, new_root_objectid);
235 write_extent_buffer(cow, root->fs_info->fsid,
236 (unsigned long)btrfs_header_fsid(cow),
239 WARN_ON(btrfs_header_generation(buf) > trans->transid);
240 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
241 ret = btrfs_inc_ref(trans, root, cow, 1);
243 ret = btrfs_inc_ref(trans, root, cow, 0);
248 btrfs_mark_buffer_dirty(cow);
254 * check if the tree block can be shared by multiple trees
256 int btrfs_block_can_be_shared(struct btrfs_root *root,
257 struct extent_buffer *buf)
260 * Tree blocks not in refernece counted trees and tree roots
261 * are never shared. If a block was allocated after the last
262 * snapshot and the block was not allocated by tree relocation,
263 * we know the block is not shared.
265 if (root->ref_cows &&
266 buf != root->node && buf != root->commit_root &&
267 (btrfs_header_generation(buf) <=
268 btrfs_root_last_snapshot(&root->root_item) ||
269 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
271 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
272 if (root->ref_cows &&
273 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
279 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
280 struct btrfs_root *root,
281 struct extent_buffer *buf,
282 struct extent_buffer *cow)
291 * Backrefs update rules:
293 * Always use full backrefs for extent pointers in tree block
294 * allocated by tree relocation.
296 * If a shared tree block is no longer referenced by its owner
297 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
298 * use full backrefs for extent pointers in tree block.
300 * If a tree block is been relocating
301 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
302 * use full backrefs for extent pointers in tree block.
303 * The reason for this is some operations (such as drop tree)
304 * are only allowed for blocks use full backrefs.
307 if (btrfs_block_can_be_shared(root, buf)) {
308 ret = btrfs_lookup_extent_info(trans, root, buf->start,
309 buf->len, &refs, &flags);
314 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
315 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
316 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
321 owner = btrfs_header_owner(buf);
322 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
323 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
326 if ((owner == root->root_key.objectid ||
327 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
328 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
329 ret = btrfs_inc_ref(trans, root, buf, 1);
332 if (root->root_key.objectid ==
333 BTRFS_TREE_RELOC_OBJECTID) {
334 ret = btrfs_dec_ref(trans, root, buf, 0);
336 ret = btrfs_inc_ref(trans, root, cow, 1);
339 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
342 if (root->root_key.objectid ==
343 BTRFS_TREE_RELOC_OBJECTID)
344 ret = btrfs_inc_ref(trans, root, cow, 1);
346 ret = btrfs_inc_ref(trans, root, cow, 0);
349 if (new_flags != 0) {
350 ret = btrfs_set_disk_extent_flags(trans, root,
357 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
358 if (root->root_key.objectid ==
359 BTRFS_TREE_RELOC_OBJECTID)
360 ret = btrfs_inc_ref(trans, root, cow, 1);
362 ret = btrfs_inc_ref(trans, root, cow, 0);
364 ret = btrfs_dec_ref(trans, root, buf, 1);
367 clean_tree_block(trans, root, buf);
373 * does the dirty work in cow of a single block. The parent block (if
374 * supplied) is updated to point to the new cow copy. The new buffer is marked
375 * dirty and returned locked. If you modify the block it needs to be marked
378 * search_start -- an allocation hint for the new block
380 * empty_size -- a hint that you plan on doing more cow. This is the size in
381 * bytes the allocator should try to find free next to the block it returns.
382 * This is just a hint and may be ignored by the allocator.
384 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
385 struct btrfs_root *root,
386 struct extent_buffer *buf,
387 struct extent_buffer *parent, int parent_slot,
388 struct extent_buffer **cow_ret,
389 u64 search_start, u64 empty_size)
391 struct btrfs_disk_key disk_key;
392 struct extent_buffer *cow;
400 btrfs_assert_tree_locked(buf);
402 WARN_ON(root->ref_cows && trans->transid !=
403 root->fs_info->running_transaction->transid);
404 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
406 level = btrfs_header_level(buf);
409 btrfs_item_key(buf, &disk_key, 0);
411 btrfs_node_key(buf, &disk_key, 0);
413 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
415 parent_start = parent->start;
421 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
422 root->root_key.objectid, &disk_key,
423 level, search_start, empty_size);
427 /* cow is set to blocking by btrfs_init_new_buffer */
429 copy_extent_buffer(cow, buf, 0, 0, cow->len);
430 btrfs_set_header_bytenr(cow, cow->start);
431 btrfs_set_header_generation(cow, trans->transid);
432 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
433 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
434 BTRFS_HEADER_FLAG_RELOC);
435 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
436 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
438 btrfs_set_header_owner(cow, root->root_key.objectid);
440 write_extent_buffer(cow, root->fs_info->fsid,
441 (unsigned long)btrfs_header_fsid(cow),
444 update_ref_for_cow(trans, root, buf, cow);
446 if (buf == root->node) {
447 WARN_ON(parent && parent != buf);
448 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
449 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
450 parent_start = buf->start;
454 spin_lock(&root->node_lock);
456 extent_buffer_get(cow);
457 spin_unlock(&root->node_lock);
459 btrfs_free_extent(trans, root, buf->start, buf->len,
460 parent_start, root->root_key.objectid,
462 free_extent_buffer(buf);
463 add_root_to_dirty_list(root);
465 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
466 parent_start = parent->start;
470 WARN_ON(trans->transid != btrfs_header_generation(parent));
471 btrfs_set_node_blockptr(parent, parent_slot,
473 btrfs_set_node_ptr_generation(parent, parent_slot,
475 btrfs_mark_buffer_dirty(parent);
476 btrfs_free_extent(trans, root, buf->start, buf->len,
477 parent_start, root->root_key.objectid,
481 btrfs_tree_unlock(buf);
482 free_extent_buffer(buf);
483 btrfs_mark_buffer_dirty(cow);
488 static inline int should_cow_block(struct btrfs_trans_handle *trans,
489 struct btrfs_root *root,
490 struct extent_buffer *buf)
492 if (btrfs_header_generation(buf) == trans->transid &&
493 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
494 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
495 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
501 * cows a single block, see __btrfs_cow_block for the real work.
502 * This version of it has extra checks so that a block isn't cow'd more than
503 * once per transaction, as long as it hasn't been written yet
505 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
506 struct btrfs_root *root, struct extent_buffer *buf,
507 struct extent_buffer *parent, int parent_slot,
508 struct extent_buffer **cow_ret)
513 if (trans->transaction != root->fs_info->running_transaction) {
514 printk(KERN_CRIT "trans %llu running %llu\n",
515 (unsigned long long)trans->transid,
517 root->fs_info->running_transaction->transid);
520 if (trans->transid != root->fs_info->generation) {
521 printk(KERN_CRIT "trans %llu running %llu\n",
522 (unsigned long long)trans->transid,
523 (unsigned long long)root->fs_info->generation);
527 if (!should_cow_block(trans, root, buf)) {
532 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
535 btrfs_set_lock_blocking(parent);
536 btrfs_set_lock_blocking(buf);
538 ret = __btrfs_cow_block(trans, root, buf, parent,
539 parent_slot, cow_ret, search_start, 0);
544 * helper function for defrag to decide if two blocks pointed to by a
545 * node are actually close by
547 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
549 if (blocknr < other && other - (blocknr + blocksize) < 32768)
551 if (blocknr > other && blocknr - (other + blocksize) < 32768)
557 * compare two keys in a memcmp fashion
559 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
563 btrfs_disk_key_to_cpu(&k1, disk);
565 return btrfs_comp_cpu_keys(&k1, k2);
569 * same as comp_keys only with two btrfs_key's
571 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
573 if (k1->objectid > k2->objectid)
575 if (k1->objectid < k2->objectid)
577 if (k1->type > k2->type)
579 if (k1->type < k2->type)
581 if (k1->offset > k2->offset)
583 if (k1->offset < k2->offset)
589 * this is used by the defrag code to go through all the
590 * leaves pointed to by a node and reallocate them so that
591 * disk order is close to key order
593 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
594 struct btrfs_root *root, struct extent_buffer *parent,
595 int start_slot, int cache_only, u64 *last_ret,
596 struct btrfs_key *progress)
598 struct extent_buffer *cur;
601 u64 search_start = *last_ret;
611 int progress_passed = 0;
612 struct btrfs_disk_key disk_key;
614 parent_level = btrfs_header_level(parent);
615 if (cache_only && parent_level != 1)
618 if (trans->transaction != root->fs_info->running_transaction)
620 if (trans->transid != root->fs_info->generation)
623 parent_nritems = btrfs_header_nritems(parent);
624 blocksize = btrfs_level_size(root, parent_level - 1);
625 end_slot = parent_nritems;
627 if (parent_nritems == 1)
630 btrfs_set_lock_blocking(parent);
632 for (i = start_slot; i < end_slot; i++) {
635 if (!parent->map_token) {
636 map_extent_buffer(parent,
637 btrfs_node_key_ptr_offset(i),
638 sizeof(struct btrfs_key_ptr),
639 &parent->map_token, &parent->kaddr,
640 &parent->map_start, &parent->map_len,
643 btrfs_node_key(parent, &disk_key, i);
644 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
648 blocknr = btrfs_node_blockptr(parent, i);
649 gen = btrfs_node_ptr_generation(parent, i);
651 last_block = blocknr;
654 other = btrfs_node_blockptr(parent, i - 1);
655 close = close_blocks(blocknr, other, blocksize);
657 if (!close && i < end_slot - 2) {
658 other = btrfs_node_blockptr(parent, i + 1);
659 close = close_blocks(blocknr, other, blocksize);
662 last_block = blocknr;
665 if (parent->map_token) {
666 unmap_extent_buffer(parent, parent->map_token,
668 parent->map_token = NULL;
671 cur = btrfs_find_tree_block(root, blocknr, blocksize);
673 uptodate = btrfs_buffer_uptodate(cur, gen);
676 if (!cur || !uptodate) {
678 free_extent_buffer(cur);
682 cur = read_tree_block(root, blocknr,
684 } else if (!uptodate) {
685 btrfs_read_buffer(cur, gen);
688 if (search_start == 0)
689 search_start = last_block;
691 btrfs_tree_lock(cur);
692 btrfs_set_lock_blocking(cur);
693 err = __btrfs_cow_block(trans, root, cur, parent, i,
696 (end_slot - i) * blocksize));
698 btrfs_tree_unlock(cur);
699 free_extent_buffer(cur);
702 search_start = cur->start;
703 last_block = cur->start;
704 *last_ret = search_start;
705 btrfs_tree_unlock(cur);
706 free_extent_buffer(cur);
708 if (parent->map_token) {
709 unmap_extent_buffer(parent, parent->map_token,
711 parent->map_token = NULL;
717 * The leaf data grows from end-to-front in the node.
718 * this returns the address of the start of the last item,
719 * which is the stop of the leaf data stack
721 static inline unsigned int leaf_data_end(struct btrfs_root *root,
722 struct extent_buffer *leaf)
724 u32 nr = btrfs_header_nritems(leaf);
726 return BTRFS_LEAF_DATA_SIZE(root);
727 return btrfs_item_offset_nr(leaf, nr - 1);
731 * extra debugging checks to make sure all the items in a key are
732 * well formed and in the proper order
734 static int check_node(struct btrfs_root *root, struct btrfs_path *path,
737 struct extent_buffer *parent = NULL;
738 struct extent_buffer *node = path->nodes[level];
739 struct btrfs_disk_key parent_key;
740 struct btrfs_disk_key node_key;
743 struct btrfs_key cpukey;
744 u32 nritems = btrfs_header_nritems(node);
746 if (path->nodes[level + 1])
747 parent = path->nodes[level + 1];
749 slot = path->slots[level];
750 BUG_ON(nritems == 0);
752 parent_slot = path->slots[level + 1];
753 btrfs_node_key(parent, &parent_key, parent_slot);
754 btrfs_node_key(node, &node_key, 0);
755 BUG_ON(memcmp(&parent_key, &node_key,
756 sizeof(struct btrfs_disk_key)));
757 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
758 btrfs_header_bytenr(node));
760 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
762 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
763 btrfs_node_key(node, &node_key, slot);
764 BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
766 if (slot < nritems - 1) {
767 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
768 btrfs_node_key(node, &node_key, slot);
769 BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
775 * extra checking to make sure all the items in a leaf are
776 * well formed and in the proper order
778 static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
781 struct extent_buffer *leaf = path->nodes[level];
782 struct extent_buffer *parent = NULL;
784 struct btrfs_key cpukey;
785 struct btrfs_disk_key parent_key;
786 struct btrfs_disk_key leaf_key;
787 int slot = path->slots[0];
789 u32 nritems = btrfs_header_nritems(leaf);
791 if (path->nodes[level + 1])
792 parent = path->nodes[level + 1];
798 parent_slot = path->slots[level + 1];
799 btrfs_node_key(parent, &parent_key, parent_slot);
800 btrfs_item_key(leaf, &leaf_key, 0);
802 BUG_ON(memcmp(&parent_key, &leaf_key,
803 sizeof(struct btrfs_disk_key)));
804 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
805 btrfs_header_bytenr(leaf));
807 if (slot != 0 && slot < nritems - 1) {
808 btrfs_item_key(leaf, &leaf_key, slot);
809 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
810 if (comp_keys(&leaf_key, &cpukey) <= 0) {
811 btrfs_print_leaf(root, leaf);
812 printk(KERN_CRIT "slot %d offset bad key\n", slot);
815 if (btrfs_item_offset_nr(leaf, slot - 1) !=
816 btrfs_item_end_nr(leaf, slot)) {
817 btrfs_print_leaf(root, leaf);
818 printk(KERN_CRIT "slot %d offset bad\n", slot);
822 if (slot < nritems - 1) {
823 btrfs_item_key(leaf, &leaf_key, slot);
824 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
825 BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
826 if (btrfs_item_offset_nr(leaf, slot) !=
827 btrfs_item_end_nr(leaf, slot + 1)) {
828 btrfs_print_leaf(root, leaf);
829 printk(KERN_CRIT "slot %d offset bad\n", slot);
833 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
834 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
838 static noinline int check_block(struct btrfs_root *root,
839 struct btrfs_path *path, int level)
843 return check_leaf(root, path, level);
844 return check_node(root, path, level);
848 * search for key in the extent_buffer. The items start at offset p,
849 * and they are item_size apart. There are 'max' items in p.
851 * the slot in the array is returned via slot, and it points to
852 * the place where you would insert key if it is not found in
855 * slot may point to max if the key is bigger than all of the keys
857 static noinline int generic_bin_search(struct extent_buffer *eb,
859 int item_size, struct btrfs_key *key,
866 struct btrfs_disk_key *tmp = NULL;
867 struct btrfs_disk_key unaligned;
868 unsigned long offset;
869 char *map_token = NULL;
871 unsigned long map_start = 0;
872 unsigned long map_len = 0;
876 mid = (low + high) / 2;
877 offset = p + mid * item_size;
879 if (!map_token || offset < map_start ||
880 (offset + sizeof(struct btrfs_disk_key)) >
881 map_start + map_len) {
883 unmap_extent_buffer(eb, map_token, KM_USER0);
887 err = map_private_extent_buffer(eb, offset,
888 sizeof(struct btrfs_disk_key),
890 &map_start, &map_len, KM_USER0);
893 tmp = (struct btrfs_disk_key *)(kaddr + offset -
896 read_extent_buffer(eb, &unaligned,
897 offset, sizeof(unaligned));
902 tmp = (struct btrfs_disk_key *)(kaddr + offset -
905 ret = comp_keys(tmp, key);
914 unmap_extent_buffer(eb, map_token, KM_USER0);
920 unmap_extent_buffer(eb, map_token, KM_USER0);
925 * simple bin_search frontend that does the right thing for
928 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
929 int level, int *slot)
932 return generic_bin_search(eb,
933 offsetof(struct btrfs_leaf, items),
934 sizeof(struct btrfs_item),
935 key, btrfs_header_nritems(eb),
938 return generic_bin_search(eb,
939 offsetof(struct btrfs_node, ptrs),
940 sizeof(struct btrfs_key_ptr),
941 key, btrfs_header_nritems(eb),
947 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
948 int level, int *slot)
950 return bin_search(eb, key, level, slot);
953 /* given a node and slot number, this reads the blocks it points to. The
954 * extent buffer is returned with a reference taken (but unlocked).
955 * NULL is returned on error.
957 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
958 struct extent_buffer *parent, int slot)
960 int level = btrfs_header_level(parent);
963 if (slot >= btrfs_header_nritems(parent))
968 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
969 btrfs_level_size(root, level - 1),
970 btrfs_node_ptr_generation(parent, slot));
974 * node level balancing, used to make sure nodes are in proper order for
975 * item deletion. We balance from the top down, so we have to make sure
976 * that a deletion won't leave an node completely empty later on.
978 static noinline int balance_level(struct btrfs_trans_handle *trans,
979 struct btrfs_root *root,
980 struct btrfs_path *path, int level)
982 struct extent_buffer *right = NULL;
983 struct extent_buffer *mid;
984 struct extent_buffer *left = NULL;
985 struct extent_buffer *parent = NULL;
989 int orig_slot = path->slots[level];
990 int err_on_enospc = 0;
996 mid = path->nodes[level];
998 WARN_ON(!path->locks[level]);
999 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1001 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1003 if (level < BTRFS_MAX_LEVEL - 1)
1004 parent = path->nodes[level + 1];
1005 pslot = path->slots[level + 1];
1008 * deal with the case where there is only one pointer in the root
1009 * by promoting the node below to a root
1012 struct extent_buffer *child;
1014 if (btrfs_header_nritems(mid) != 1)
1017 /* promote the child to a root */
1018 child = read_node_slot(root, mid, 0);
1020 btrfs_tree_lock(child);
1021 btrfs_set_lock_blocking(child);
1022 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1025 spin_lock(&root->node_lock);
1027 spin_unlock(&root->node_lock);
1029 add_root_to_dirty_list(root);
1030 btrfs_tree_unlock(child);
1032 path->locks[level] = 0;
1033 path->nodes[level] = NULL;
1034 clean_tree_block(trans, root, mid);
1035 btrfs_tree_unlock(mid);
1036 /* once for the path */
1037 free_extent_buffer(mid);
1038 ret = btrfs_free_extent(trans, root, mid->start, mid->len,
1039 0, root->root_key.objectid, level, 1);
1040 /* once for the root ptr */
1041 free_extent_buffer(mid);
1044 if (btrfs_header_nritems(mid) >
1045 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1048 if (btrfs_header_nritems(mid) < 2)
1051 left = read_node_slot(root, parent, pslot - 1);
1053 btrfs_tree_lock(left);
1054 btrfs_set_lock_blocking(left);
1055 wret = btrfs_cow_block(trans, root, left,
1056 parent, pslot - 1, &left);
1062 right = read_node_slot(root, parent, pslot + 1);
1064 btrfs_tree_lock(right);
1065 btrfs_set_lock_blocking(right);
1066 wret = btrfs_cow_block(trans, root, right,
1067 parent, pslot + 1, &right);
1074 /* first, try to make some room in the middle buffer */
1076 orig_slot += btrfs_header_nritems(left);
1077 wret = push_node_left(trans, root, left, mid, 1);
1080 if (btrfs_header_nritems(mid) < 2)
1085 * then try to empty the right most buffer into the middle
1088 wret = push_node_left(trans, root, mid, right, 1);
1089 if (wret < 0 && wret != -ENOSPC)
1091 if (btrfs_header_nritems(right) == 0) {
1092 u64 bytenr = right->start;
1093 u32 blocksize = right->len;
1095 clean_tree_block(trans, root, right);
1096 btrfs_tree_unlock(right);
1097 free_extent_buffer(right);
1099 wret = del_ptr(trans, root, path, level + 1, pslot +
1103 wret = btrfs_free_extent(trans, root, bytenr,
1105 root->root_key.objectid,
1110 struct btrfs_disk_key right_key;
1111 btrfs_node_key(right, &right_key, 0);
1112 btrfs_set_node_key(parent, &right_key, pslot + 1);
1113 btrfs_mark_buffer_dirty(parent);
1116 if (btrfs_header_nritems(mid) == 1) {
1118 * we're not allowed to leave a node with one item in the
1119 * tree during a delete. A deletion from lower in the tree
1120 * could try to delete the only pointer in this node.
1121 * So, pull some keys from the left.
1122 * There has to be a left pointer at this point because
1123 * otherwise we would have pulled some pointers from the
1127 wret = balance_node_right(trans, root, mid, left);
1133 wret = push_node_left(trans, root, left, mid, 1);
1139 if (btrfs_header_nritems(mid) == 0) {
1140 /* we've managed to empty the middle node, drop it */
1141 u64 bytenr = mid->start;
1142 u32 blocksize = mid->len;
1144 clean_tree_block(trans, root, mid);
1145 btrfs_tree_unlock(mid);
1146 free_extent_buffer(mid);
1148 wret = del_ptr(trans, root, path, level + 1, pslot);
1151 wret = btrfs_free_extent(trans, root, bytenr, blocksize,
1152 0, root->root_key.objectid,
1157 /* update the parent key to reflect our changes */
1158 struct btrfs_disk_key mid_key;
1159 btrfs_node_key(mid, &mid_key, 0);
1160 btrfs_set_node_key(parent, &mid_key, pslot);
1161 btrfs_mark_buffer_dirty(parent);
1164 /* update the path */
1166 if (btrfs_header_nritems(left) > orig_slot) {
1167 extent_buffer_get(left);
1168 /* left was locked after cow */
1169 path->nodes[level] = left;
1170 path->slots[level + 1] -= 1;
1171 path->slots[level] = orig_slot;
1173 btrfs_tree_unlock(mid);
1174 free_extent_buffer(mid);
1177 orig_slot -= btrfs_header_nritems(left);
1178 path->slots[level] = orig_slot;
1181 /* double check we haven't messed things up */
1182 check_block(root, path, level);
1184 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1188 btrfs_tree_unlock(right);
1189 free_extent_buffer(right);
1192 if (path->nodes[level] != left)
1193 btrfs_tree_unlock(left);
1194 free_extent_buffer(left);
1199 /* Node balancing for insertion. Here we only split or push nodes around
1200 * when they are completely full. This is also done top down, so we
1201 * have to be pessimistic.
1203 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1204 struct btrfs_root *root,
1205 struct btrfs_path *path, int level)
1207 struct extent_buffer *right = NULL;
1208 struct extent_buffer *mid;
1209 struct extent_buffer *left = NULL;
1210 struct extent_buffer *parent = NULL;
1214 int orig_slot = path->slots[level];
1220 mid = path->nodes[level];
1221 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1222 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1224 if (level < BTRFS_MAX_LEVEL - 1)
1225 parent = path->nodes[level + 1];
1226 pslot = path->slots[level + 1];
1231 left = read_node_slot(root, parent, pslot - 1);
1233 /* first, try to make some room in the middle buffer */
1237 btrfs_tree_lock(left);
1238 btrfs_set_lock_blocking(left);
1240 left_nr = btrfs_header_nritems(left);
1241 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1244 ret = btrfs_cow_block(trans, root, left, parent,
1249 wret = push_node_left(trans, root,
1256 struct btrfs_disk_key disk_key;
1257 orig_slot += left_nr;
1258 btrfs_node_key(mid, &disk_key, 0);
1259 btrfs_set_node_key(parent, &disk_key, pslot);
1260 btrfs_mark_buffer_dirty(parent);
1261 if (btrfs_header_nritems(left) > orig_slot) {
1262 path->nodes[level] = left;
1263 path->slots[level + 1] -= 1;
1264 path->slots[level] = orig_slot;
1265 btrfs_tree_unlock(mid);
1266 free_extent_buffer(mid);
1269 btrfs_header_nritems(left);
1270 path->slots[level] = orig_slot;
1271 btrfs_tree_unlock(left);
1272 free_extent_buffer(left);
1276 btrfs_tree_unlock(left);
1277 free_extent_buffer(left);
1279 right = read_node_slot(root, parent, pslot + 1);
1282 * then try to empty the right most buffer into the middle
1287 btrfs_tree_lock(right);
1288 btrfs_set_lock_blocking(right);
1290 right_nr = btrfs_header_nritems(right);
1291 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1294 ret = btrfs_cow_block(trans, root, right,
1300 wret = balance_node_right(trans, root,
1307 struct btrfs_disk_key disk_key;
1309 btrfs_node_key(right, &disk_key, 0);
1310 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1311 btrfs_mark_buffer_dirty(parent);
1313 if (btrfs_header_nritems(mid) <= orig_slot) {
1314 path->nodes[level] = right;
1315 path->slots[level + 1] += 1;
1316 path->slots[level] = orig_slot -
1317 btrfs_header_nritems(mid);
1318 btrfs_tree_unlock(mid);
1319 free_extent_buffer(mid);
1321 btrfs_tree_unlock(right);
1322 free_extent_buffer(right);
1326 btrfs_tree_unlock(right);
1327 free_extent_buffer(right);
1333 * readahead one full node of leaves, finding things that are close
1334 * to the block in 'slot', and triggering ra on them.
1336 static void reada_for_search(struct btrfs_root *root,
1337 struct btrfs_path *path,
1338 int level, int slot, u64 objectid)
1340 struct extent_buffer *node;
1341 struct btrfs_disk_key disk_key;
1346 int direction = path->reada;
1347 struct extent_buffer *eb;
1355 if (!path->nodes[level])
1358 node = path->nodes[level];
1360 search = btrfs_node_blockptr(node, slot);
1361 blocksize = btrfs_level_size(root, level - 1);
1362 eb = btrfs_find_tree_block(root, search, blocksize);
1364 free_extent_buffer(eb);
1370 nritems = btrfs_header_nritems(node);
1373 if (direction < 0) {
1377 } else if (direction > 0) {
1382 if (path->reada < 0 && objectid) {
1383 btrfs_node_key(node, &disk_key, nr);
1384 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1387 search = btrfs_node_blockptr(node, nr);
1388 if ((search <= target && target - search <= 65536) ||
1389 (search > target && search - target <= 65536)) {
1390 readahead_tree_block(root, search, blocksize,
1391 btrfs_node_ptr_generation(node, nr));
1395 if ((nread > 65536 || nscan > 32))
1401 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1404 static noinline int reada_for_balance(struct btrfs_root *root,
1405 struct btrfs_path *path, int level)
1409 struct extent_buffer *parent;
1410 struct extent_buffer *eb;
1417 parent = path->nodes[level + 1];
1421 nritems = btrfs_header_nritems(parent);
1422 slot = path->slots[level + 1];
1423 blocksize = btrfs_level_size(root, level);
1426 block1 = btrfs_node_blockptr(parent, slot - 1);
1427 gen = btrfs_node_ptr_generation(parent, slot - 1);
1428 eb = btrfs_find_tree_block(root, block1, blocksize);
1429 if (eb && btrfs_buffer_uptodate(eb, gen))
1431 free_extent_buffer(eb);
1433 if (slot + 1 < nritems) {
1434 block2 = btrfs_node_blockptr(parent, slot + 1);
1435 gen = btrfs_node_ptr_generation(parent, slot + 1);
1436 eb = btrfs_find_tree_block(root, block2, blocksize);
1437 if (eb && btrfs_buffer_uptodate(eb, gen))
1439 free_extent_buffer(eb);
1441 if (block1 || block2) {
1444 /* release the whole path */
1445 btrfs_release_path(root, path);
1447 /* read the blocks */
1449 readahead_tree_block(root, block1, blocksize, 0);
1451 readahead_tree_block(root, block2, blocksize, 0);
1454 eb = read_tree_block(root, block1, blocksize, 0);
1455 free_extent_buffer(eb);
1458 eb = read_tree_block(root, block2, blocksize, 0);
1459 free_extent_buffer(eb);
1467 * when we walk down the tree, it is usually safe to unlock the higher layers
1468 * in the tree. The exceptions are when our path goes through slot 0, because
1469 * operations on the tree might require changing key pointers higher up in the
1472 * callers might also have set path->keep_locks, which tells this code to keep
1473 * the lock if the path points to the last slot in the block. This is part of
1474 * walking through the tree, and selecting the next slot in the higher block.
1476 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1477 * if lowest_unlock is 1, level 0 won't be unlocked
1479 static noinline void unlock_up(struct btrfs_path *path, int level,
1483 int skip_level = level;
1485 struct extent_buffer *t;
1487 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1488 if (!path->nodes[i])
1490 if (!path->locks[i])
1492 if (!no_skips && path->slots[i] == 0) {
1496 if (!no_skips && path->keep_locks) {
1499 nritems = btrfs_header_nritems(t);
1500 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1505 if (skip_level < i && i >= lowest_unlock)
1509 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1510 btrfs_tree_unlock(t);
1517 * This releases any locks held in the path starting at level and
1518 * going all the way up to the root.
1520 * btrfs_search_slot will keep the lock held on higher nodes in a few
1521 * corner cases, such as COW of the block at slot zero in the node. This
1522 * ignores those rules, and it should only be called when there are no
1523 * more updates to be done higher up in the tree.
1525 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1529 if (path->keep_locks)
1532 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1533 if (!path->nodes[i])
1535 if (!path->locks[i])
1537 btrfs_tree_unlock(path->nodes[i]);
1543 * helper function for btrfs_search_slot. The goal is to find a block
1544 * in cache without setting the path to blocking. If we find the block
1545 * we return zero and the path is unchanged.
1547 * If we can't find the block, we set the path blocking and do some
1548 * reada. -EAGAIN is returned and the search must be repeated.
1551 read_block_for_search(struct btrfs_trans_handle *trans,
1552 struct btrfs_root *root, struct btrfs_path *p,
1553 struct extent_buffer **eb_ret, int level, int slot,
1554 struct btrfs_key *key)
1559 struct extent_buffer *b = *eb_ret;
1560 struct extent_buffer *tmp;
1563 blocknr = btrfs_node_blockptr(b, slot);
1564 gen = btrfs_node_ptr_generation(b, slot);
1565 blocksize = btrfs_level_size(root, level - 1);
1567 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1568 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
1570 * we found an up to date block without sleeping, return
1578 * reduce lock contention at high levels
1579 * of the btree by dropping locks before
1580 * we read. Don't release the lock on the current
1581 * level because we need to walk this node to figure
1582 * out which blocks to read.
1584 btrfs_unlock_up_safe(p, level + 1);
1585 btrfs_set_path_blocking(p);
1588 free_extent_buffer(tmp);
1590 reada_for_search(root, p, level, slot, key->objectid);
1592 btrfs_release_path(NULL, p);
1595 tmp = read_tree_block(root, blocknr, blocksize, gen);
1598 * If the read above didn't mark this buffer up to date,
1599 * it will never end up being up to date. Set ret to EIO now
1600 * and give up so that our caller doesn't loop forever
1603 if (!btrfs_buffer_uptodate(tmp, 0))
1605 free_extent_buffer(tmp);
1611 * helper function for btrfs_search_slot. This does all of the checks
1612 * for node-level blocks and does any balancing required based on
1615 * If no extra work was required, zero is returned. If we had to
1616 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1620 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1621 struct btrfs_root *root, struct btrfs_path *p,
1622 struct extent_buffer *b, int level, int ins_len)
1625 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1626 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1629 sret = reada_for_balance(root, p, level);
1633 btrfs_set_path_blocking(p);
1634 sret = split_node(trans, root, p, level);
1635 btrfs_clear_path_blocking(p, NULL);
1642 b = p->nodes[level];
1643 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1644 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
1647 sret = reada_for_balance(root, p, level);
1651 btrfs_set_path_blocking(p);
1652 sret = balance_level(trans, root, p, level);
1653 btrfs_clear_path_blocking(p, NULL);
1659 b = p->nodes[level];
1661 btrfs_release_path(NULL, p);
1664 BUG_ON(btrfs_header_nritems(b) == 1);
1675 * look for key in the tree. path is filled in with nodes along the way
1676 * if key is found, we return zero and you can find the item in the leaf
1677 * level of the path (level 0)
1679 * If the key isn't found, the path points to the slot where it should
1680 * be inserted, and 1 is returned. If there are other errors during the
1681 * search a negative error number is returned.
1683 * if ins_len > 0, nodes and leaves will be split as we walk down the
1684 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1687 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1688 *root, struct btrfs_key *key, struct btrfs_path *p, int
1691 struct extent_buffer *b;
1696 int lowest_unlock = 1;
1697 u8 lowest_level = 0;
1699 lowest_level = p->lowest_level;
1700 WARN_ON(lowest_level && ins_len > 0);
1701 WARN_ON(p->nodes[0] != NULL);
1707 if (p->search_commit_root) {
1708 b = root->commit_root;
1709 extent_buffer_get(b);
1710 if (!p->skip_locking)
1713 if (p->skip_locking)
1714 b = btrfs_root_node(root);
1716 b = btrfs_lock_root_node(root);
1720 level = btrfs_header_level(b);
1723 * setup the path here so we can release it under lock
1724 * contention with the cow code
1726 p->nodes[level] = b;
1727 if (!p->skip_locking)
1728 p->locks[level] = 1;
1732 * if we don't really need to cow this block
1733 * then we don't want to set the path blocking,
1734 * so we test it here
1736 if (!should_cow_block(trans, root, b))
1739 btrfs_set_path_blocking(p);
1741 err = btrfs_cow_block(trans, root, b,
1742 p->nodes[level + 1],
1743 p->slots[level + 1], &b);
1745 free_extent_buffer(b);
1751 BUG_ON(!cow && ins_len);
1752 if (level != btrfs_header_level(b))
1754 level = btrfs_header_level(b);
1756 p->nodes[level] = b;
1757 if (!p->skip_locking)
1758 p->locks[level] = 1;
1760 btrfs_clear_path_blocking(p, NULL);
1763 * we have a lock on b and as long as we aren't changing
1764 * the tree, there is no way to for the items in b to change.
1765 * It is safe to drop the lock on our parent before we
1766 * go through the expensive btree search on b.
1768 * If cow is true, then we might be changing slot zero,
1769 * which may require changing the parent. So, we can't
1770 * drop the lock until after we know which slot we're
1774 btrfs_unlock_up_safe(p, level + 1);
1776 ret = check_block(root, p, level);
1782 ret = bin_search(b, key, level, &slot);
1786 if (ret && slot > 0) {
1790 p->slots[level] = slot;
1791 err = setup_nodes_for_search(trans, root, p, b, level,
1799 b = p->nodes[level];
1800 slot = p->slots[level];
1802 unlock_up(p, level, lowest_unlock);
1804 if (level == lowest_level) {
1810 err = read_block_for_search(trans, root, p,
1811 &b, level, slot, key);
1819 if (!p->skip_locking) {
1820 btrfs_clear_path_blocking(p, NULL);
1821 err = btrfs_try_spin_lock(b);
1824 btrfs_set_path_blocking(p);
1826 btrfs_clear_path_blocking(p, b);
1830 p->slots[level] = slot;
1832 btrfs_leaf_free_space(root, b) < ins_len) {
1833 btrfs_set_path_blocking(p);
1834 err = split_leaf(trans, root, key,
1835 p, ins_len, ret == 0);
1836 btrfs_clear_path_blocking(p, NULL);
1844 if (!p->search_for_split)
1845 unlock_up(p, level, lowest_unlock);
1852 * we don't really know what they plan on doing with the path
1853 * from here on, so for now just mark it as blocking
1855 if (!p->leave_spinning)
1856 btrfs_set_path_blocking(p);
1858 btrfs_release_path(root, p);
1863 * adjust the pointers going up the tree, starting at level
1864 * making sure the right key of each node is points to 'key'.
1865 * This is used after shifting pointers to the left, so it stops
1866 * fixing up pointers when a given leaf/node is not in slot 0 of the
1869 * If this fails to write a tree block, it returns -1, but continues
1870 * fixing up the blocks in ram so the tree is consistent.
1872 static int fixup_low_keys(struct btrfs_trans_handle *trans,
1873 struct btrfs_root *root, struct btrfs_path *path,
1874 struct btrfs_disk_key *key, int level)
1878 struct extent_buffer *t;
1880 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1881 int tslot = path->slots[i];
1882 if (!path->nodes[i])
1885 btrfs_set_node_key(t, key, tslot);
1886 btrfs_mark_buffer_dirty(path->nodes[i]);
1896 * This function isn't completely safe. It's the caller's responsibility
1897 * that the new key won't break the order
1899 int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1900 struct btrfs_root *root, struct btrfs_path *path,
1901 struct btrfs_key *new_key)
1903 struct btrfs_disk_key disk_key;
1904 struct extent_buffer *eb;
1907 eb = path->nodes[0];
1908 slot = path->slots[0];
1910 btrfs_item_key(eb, &disk_key, slot - 1);
1911 if (comp_keys(&disk_key, new_key) >= 0)
1914 if (slot < btrfs_header_nritems(eb) - 1) {
1915 btrfs_item_key(eb, &disk_key, slot + 1);
1916 if (comp_keys(&disk_key, new_key) <= 0)
1920 btrfs_cpu_key_to_disk(&disk_key, new_key);
1921 btrfs_set_item_key(eb, &disk_key, slot);
1922 btrfs_mark_buffer_dirty(eb);
1924 fixup_low_keys(trans, root, path, &disk_key, 1);
1929 * try to push data from one node into the next node left in the
1932 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1933 * error, and > 0 if there was no room in the left hand block.
1935 static int push_node_left(struct btrfs_trans_handle *trans,
1936 struct btrfs_root *root, struct extent_buffer *dst,
1937 struct extent_buffer *src, int empty)
1944 src_nritems = btrfs_header_nritems(src);
1945 dst_nritems = btrfs_header_nritems(dst);
1946 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1947 WARN_ON(btrfs_header_generation(src) != trans->transid);
1948 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1950 if (!empty && src_nritems <= 8)
1953 if (push_items <= 0)
1957 push_items = min(src_nritems, push_items);
1958 if (push_items < src_nritems) {
1959 /* leave at least 8 pointers in the node if
1960 * we aren't going to empty it
1962 if (src_nritems - push_items < 8) {
1963 if (push_items <= 8)
1969 push_items = min(src_nritems - 8, push_items);
1971 copy_extent_buffer(dst, src,
1972 btrfs_node_key_ptr_offset(dst_nritems),
1973 btrfs_node_key_ptr_offset(0),
1974 push_items * sizeof(struct btrfs_key_ptr));
1976 if (push_items < src_nritems) {
1977 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1978 btrfs_node_key_ptr_offset(push_items),
1979 (src_nritems - push_items) *
1980 sizeof(struct btrfs_key_ptr));
1982 btrfs_set_header_nritems(src, src_nritems - push_items);
1983 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1984 btrfs_mark_buffer_dirty(src);
1985 btrfs_mark_buffer_dirty(dst);
1991 * try to push data from one node into the next node right in the
1994 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
1995 * error, and > 0 if there was no room in the right hand block.
1997 * this will only push up to 1/2 the contents of the left node over
1999 static int balance_node_right(struct btrfs_trans_handle *trans,
2000 struct btrfs_root *root,
2001 struct extent_buffer *dst,
2002 struct extent_buffer *src)
2010 WARN_ON(btrfs_header_generation(src) != trans->transid);
2011 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2013 src_nritems = btrfs_header_nritems(src);
2014 dst_nritems = btrfs_header_nritems(dst);
2015 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2016 if (push_items <= 0)
2019 if (src_nritems < 4)
2022 max_push = src_nritems / 2 + 1;
2023 /* don't try to empty the node */
2024 if (max_push >= src_nritems)
2027 if (max_push < push_items)
2028 push_items = max_push;
2030 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2031 btrfs_node_key_ptr_offset(0),
2033 sizeof(struct btrfs_key_ptr));
2035 copy_extent_buffer(dst, src,
2036 btrfs_node_key_ptr_offset(0),
2037 btrfs_node_key_ptr_offset(src_nritems - push_items),
2038 push_items * sizeof(struct btrfs_key_ptr));
2040 btrfs_set_header_nritems(src, src_nritems - push_items);
2041 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2043 btrfs_mark_buffer_dirty(src);
2044 btrfs_mark_buffer_dirty(dst);
2050 * helper function to insert a new root level in the tree.
2051 * A new node is allocated, and a single item is inserted to
2052 * point to the existing root
2054 * returns zero on success or < 0 on failure.
2056 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2057 struct btrfs_root *root,
2058 struct btrfs_path *path, int level)
2061 struct extent_buffer *lower;
2062 struct extent_buffer *c;
2063 struct extent_buffer *old;
2064 struct btrfs_disk_key lower_key;
2066 BUG_ON(path->nodes[level]);
2067 BUG_ON(path->nodes[level-1] != root->node);
2069 lower = path->nodes[level-1];
2071 btrfs_item_key(lower, &lower_key, 0);
2073 btrfs_node_key(lower, &lower_key, 0);
2075 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2076 root->root_key.objectid, &lower_key,
2077 level, root->node->start, 0);
2081 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2082 btrfs_set_header_nritems(c, 1);
2083 btrfs_set_header_level(c, level);
2084 btrfs_set_header_bytenr(c, c->start);
2085 btrfs_set_header_generation(c, trans->transid);
2086 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
2087 btrfs_set_header_owner(c, root->root_key.objectid);
2089 write_extent_buffer(c, root->fs_info->fsid,
2090 (unsigned long)btrfs_header_fsid(c),
2093 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2094 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2097 btrfs_set_node_key(c, &lower_key, 0);
2098 btrfs_set_node_blockptr(c, 0, lower->start);
2099 lower_gen = btrfs_header_generation(lower);
2100 WARN_ON(lower_gen != trans->transid);
2102 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2104 btrfs_mark_buffer_dirty(c);
2106 spin_lock(&root->node_lock);
2109 spin_unlock(&root->node_lock);
2111 /* the super has an extra ref to root->node */
2112 free_extent_buffer(old);
2114 add_root_to_dirty_list(root);
2115 extent_buffer_get(c);
2116 path->nodes[level] = c;
2117 path->locks[level] = 1;
2118 path->slots[level] = 0;
2123 * worker function to insert a single pointer in a node.
2124 * the node should have enough room for the pointer already
2126 * slot and level indicate where you want the key to go, and
2127 * blocknr is the block the key points to.
2129 * returns zero on success and < 0 on any error
2131 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2132 *root, struct btrfs_path *path, struct btrfs_disk_key
2133 *key, u64 bytenr, int slot, int level)
2135 struct extent_buffer *lower;
2138 BUG_ON(!path->nodes[level]);
2139 lower = path->nodes[level];
2140 nritems = btrfs_header_nritems(lower);
2141 BUG_ON(slot > nritems);
2142 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
2144 if (slot != nritems) {
2145 memmove_extent_buffer(lower,
2146 btrfs_node_key_ptr_offset(slot + 1),
2147 btrfs_node_key_ptr_offset(slot),
2148 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2150 btrfs_set_node_key(lower, key, slot);
2151 btrfs_set_node_blockptr(lower, slot, bytenr);
2152 WARN_ON(trans->transid == 0);
2153 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2154 btrfs_set_header_nritems(lower, nritems + 1);
2155 btrfs_mark_buffer_dirty(lower);
2160 * split the node at the specified level in path in two.
2161 * The path is corrected to point to the appropriate node after the split
2163 * Before splitting this tries to make some room in the node by pushing
2164 * left and right, if either one works, it returns right away.
2166 * returns 0 on success and < 0 on failure
2168 static noinline int split_node(struct btrfs_trans_handle *trans,
2169 struct btrfs_root *root,
2170 struct btrfs_path *path, int level)
2172 struct extent_buffer *c;
2173 struct extent_buffer *split;
2174 struct btrfs_disk_key disk_key;
2180 c = path->nodes[level];
2181 WARN_ON(btrfs_header_generation(c) != trans->transid);
2182 if (c == root->node) {
2183 /* trying to split the root, lets make a new one */
2184 ret = insert_new_root(trans, root, path, level + 1);
2188 ret = push_nodes_for_insert(trans, root, path, level);
2189 c = path->nodes[level];
2190 if (!ret && btrfs_header_nritems(c) <
2191 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
2197 c_nritems = btrfs_header_nritems(c);
2198 mid = (c_nritems + 1) / 2;
2199 btrfs_node_key(c, &disk_key, mid);
2201 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2202 root->root_key.objectid,
2203 &disk_key, level, c->start, 0);
2205 return PTR_ERR(split);
2207 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
2208 btrfs_set_header_level(split, btrfs_header_level(c));
2209 btrfs_set_header_bytenr(split, split->start);
2210 btrfs_set_header_generation(split, trans->transid);
2211 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
2212 btrfs_set_header_owner(split, root->root_key.objectid);
2213 write_extent_buffer(split, root->fs_info->fsid,
2214 (unsigned long)btrfs_header_fsid(split),
2216 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2217 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2221 copy_extent_buffer(split, c,
2222 btrfs_node_key_ptr_offset(0),
2223 btrfs_node_key_ptr_offset(mid),
2224 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2225 btrfs_set_header_nritems(split, c_nritems - mid);
2226 btrfs_set_header_nritems(c, mid);
2229 btrfs_mark_buffer_dirty(c);
2230 btrfs_mark_buffer_dirty(split);
2232 wret = insert_ptr(trans, root, path, &disk_key, split->start,
2233 path->slots[level + 1] + 1,
2238 if (path->slots[level] >= mid) {
2239 path->slots[level] -= mid;
2240 btrfs_tree_unlock(c);
2241 free_extent_buffer(c);
2242 path->nodes[level] = split;
2243 path->slots[level + 1] += 1;
2245 btrfs_tree_unlock(split);
2246 free_extent_buffer(split);
2252 * how many bytes are required to store the items in a leaf. start
2253 * and nr indicate which items in the leaf to check. This totals up the
2254 * space used both by the item structs and the item data
2256 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2259 int nritems = btrfs_header_nritems(l);
2260 int end = min(nritems, start + nr) - 1;
2264 data_len = btrfs_item_end_nr(l, start);
2265 data_len = data_len - btrfs_item_offset_nr(l, end);
2266 data_len += sizeof(struct btrfs_item) * nr;
2267 WARN_ON(data_len < 0);
2272 * The space between the end of the leaf items and
2273 * the start of the leaf data. IOW, how much room
2274 * the leaf has left for both items and data
2276 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2277 struct extent_buffer *leaf)
2279 int nritems = btrfs_header_nritems(leaf);
2281 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2283 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2284 "used %d nritems %d\n",
2285 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2286 leaf_space_used(leaf, 0, nritems), nritems);
2291 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2292 struct btrfs_root *root,
2293 struct btrfs_path *path,
2294 int data_size, int empty,
2295 struct extent_buffer *right,
2296 int free_space, u32 left_nritems)
2298 struct extent_buffer *left = path->nodes[0];
2299 struct extent_buffer *upper = path->nodes[1];
2300 struct btrfs_disk_key disk_key;
2305 struct btrfs_item *item;
2316 if (path->slots[0] >= left_nritems)
2317 push_space += data_size;
2319 slot = path->slots[1];
2320 i = left_nritems - 1;
2322 item = btrfs_item_nr(left, i);
2324 if (!empty && push_items > 0) {
2325 if (path->slots[0] > i)
2327 if (path->slots[0] == i) {
2328 int space = btrfs_leaf_free_space(root, left);
2329 if (space + push_space * 2 > free_space)
2334 if (path->slots[0] == i)
2335 push_space += data_size;
2337 if (!left->map_token) {
2338 map_extent_buffer(left, (unsigned long)item,
2339 sizeof(struct btrfs_item),
2340 &left->map_token, &left->kaddr,
2341 &left->map_start, &left->map_len,
2345 this_item_size = btrfs_item_size(left, item);
2346 if (this_item_size + sizeof(*item) + push_space > free_space)
2350 push_space += this_item_size + sizeof(*item);
2355 if (left->map_token) {
2356 unmap_extent_buffer(left, left->map_token, KM_USER1);
2357 left->map_token = NULL;
2360 if (push_items == 0)
2363 if (!empty && push_items == left_nritems)
2366 /* push left to right */
2367 right_nritems = btrfs_header_nritems(right);
2369 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2370 push_space -= leaf_data_end(root, left);
2372 /* make room in the right data area */
2373 data_end = leaf_data_end(root, right);
2374 memmove_extent_buffer(right,
2375 btrfs_leaf_data(right) + data_end - push_space,
2376 btrfs_leaf_data(right) + data_end,
2377 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2379 /* copy from the left data area */
2380 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
2381 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2382 btrfs_leaf_data(left) + leaf_data_end(root, left),
2385 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2386 btrfs_item_nr_offset(0),
2387 right_nritems * sizeof(struct btrfs_item));
2389 /* copy the items from left to right */
2390 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2391 btrfs_item_nr_offset(left_nritems - push_items),
2392 push_items * sizeof(struct btrfs_item));
2394 /* update the item pointers */
2395 right_nritems += push_items;
2396 btrfs_set_header_nritems(right, right_nritems);
2397 push_space = BTRFS_LEAF_DATA_SIZE(root);
2398 for (i = 0; i < right_nritems; i++) {
2399 item = btrfs_item_nr(right, i);
2400 if (!right->map_token) {
2401 map_extent_buffer(right, (unsigned long)item,
2402 sizeof(struct btrfs_item),
2403 &right->map_token, &right->kaddr,
2404 &right->map_start, &right->map_len,
2407 push_space -= btrfs_item_size(right, item);
2408 btrfs_set_item_offset(right, item, push_space);
2411 if (right->map_token) {
2412 unmap_extent_buffer(right, right->map_token, KM_USER1);
2413 right->map_token = NULL;
2415 left_nritems -= push_items;
2416 btrfs_set_header_nritems(left, left_nritems);
2419 btrfs_mark_buffer_dirty(left);
2420 btrfs_mark_buffer_dirty(right);
2422 btrfs_item_key(right, &disk_key, 0);
2423 btrfs_set_node_key(upper, &disk_key, slot + 1);
2424 btrfs_mark_buffer_dirty(upper);
2426 /* then fixup the leaf pointer in the path */
2427 if (path->slots[0] >= left_nritems) {
2428 path->slots[0] -= left_nritems;
2429 if (btrfs_header_nritems(path->nodes[0]) == 0)
2430 clean_tree_block(trans, root, path->nodes[0]);
2431 btrfs_tree_unlock(path->nodes[0]);
2432 free_extent_buffer(path->nodes[0]);
2433 path->nodes[0] = right;
2434 path->slots[1] += 1;
2436 btrfs_tree_unlock(right);
2437 free_extent_buffer(right);
2442 btrfs_tree_unlock(right);
2443 free_extent_buffer(right);
2448 * push some data in the path leaf to the right, trying to free up at
2449 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2451 * returns 1 if the push failed because the other node didn't have enough
2452 * room, 0 if everything worked out and < 0 if there were major errors.
2454 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2455 *root, struct btrfs_path *path, int data_size,
2458 struct extent_buffer *left = path->nodes[0];
2459 struct extent_buffer *right;
2460 struct extent_buffer *upper;
2466 if (!path->nodes[1])
2469 slot = path->slots[1];
2470 upper = path->nodes[1];
2471 if (slot >= btrfs_header_nritems(upper) - 1)
2474 btrfs_assert_tree_locked(path->nodes[1]);
2476 right = read_node_slot(root, upper, slot + 1);
2477 btrfs_tree_lock(right);
2478 btrfs_set_lock_blocking(right);
2480 free_space = btrfs_leaf_free_space(root, right);
2481 if (free_space < data_size)
2484 /* cow and double check */
2485 ret = btrfs_cow_block(trans, root, right, upper,
2490 free_space = btrfs_leaf_free_space(root, right);
2491 if (free_space < data_size)
2494 left_nritems = btrfs_header_nritems(left);
2495 if (left_nritems == 0)
2498 return __push_leaf_right(trans, root, path, data_size, empty,
2499 right, free_space, left_nritems);
2501 btrfs_tree_unlock(right);
2502 free_extent_buffer(right);
2507 * push some data in the path leaf to the left, trying to free up at
2508 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2510 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2511 struct btrfs_root *root,
2512 struct btrfs_path *path, int data_size,
2513 int empty, struct extent_buffer *left,
2514 int free_space, int right_nritems)
2516 struct btrfs_disk_key disk_key;
2517 struct extent_buffer *right = path->nodes[0];
2522 struct btrfs_item *item;
2523 u32 old_left_nritems;
2528 u32 old_left_item_size;
2530 slot = path->slots[1];
2535 nr = right_nritems - 1;
2537 for (i = 0; i < nr; i++) {
2538 item = btrfs_item_nr(right, i);
2539 if (!right->map_token) {
2540 map_extent_buffer(right, (unsigned long)item,
2541 sizeof(struct btrfs_item),
2542 &right->map_token, &right->kaddr,
2543 &right->map_start, &right->map_len,
2547 if (!empty && push_items > 0) {
2548 if (path->slots[0] < i)
2550 if (path->slots[0] == i) {
2551 int space = btrfs_leaf_free_space(root, right);
2552 if (space + push_space * 2 > free_space)
2557 if (path->slots[0] == i)
2558 push_space += data_size;
2560 this_item_size = btrfs_item_size(right, item);
2561 if (this_item_size + sizeof(*item) + push_space > free_space)
2565 push_space += this_item_size + sizeof(*item);
2568 if (right->map_token) {
2569 unmap_extent_buffer(right, right->map_token, KM_USER1);
2570 right->map_token = NULL;
2573 if (push_items == 0) {
2577 if (!empty && push_items == btrfs_header_nritems(right))
2580 /* push data from right to left */
2581 copy_extent_buffer(left, right,
2582 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2583 btrfs_item_nr_offset(0),
2584 push_items * sizeof(struct btrfs_item));
2586 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2587 btrfs_item_offset_nr(right, push_items - 1);
2589 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2590 leaf_data_end(root, left) - push_space,
2591 btrfs_leaf_data(right) +
2592 btrfs_item_offset_nr(right, push_items - 1),
2594 old_left_nritems = btrfs_header_nritems(left);
2595 BUG_ON(old_left_nritems <= 0);
2597 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2598 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2601 item = btrfs_item_nr(left, i);
2602 if (!left->map_token) {
2603 map_extent_buffer(left, (unsigned long)item,
2604 sizeof(struct btrfs_item),
2605 &left->map_token, &left->kaddr,
2606 &left->map_start, &left->map_len,
2610 ioff = btrfs_item_offset(left, item);
2611 btrfs_set_item_offset(left, item,
2612 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
2614 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2615 if (left->map_token) {
2616 unmap_extent_buffer(left, left->map_token, KM_USER1);
2617 left->map_token = NULL;
2620 /* fixup right node */
2621 if (push_items > right_nritems) {
2622 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2627 if (push_items < right_nritems) {
2628 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2629 leaf_data_end(root, right);
2630 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2631 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2632 btrfs_leaf_data(right) +
2633 leaf_data_end(root, right), push_space);
2635 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2636 btrfs_item_nr_offset(push_items),
2637 (btrfs_header_nritems(right) - push_items) *
2638 sizeof(struct btrfs_item));
2640 right_nritems -= push_items;
2641 btrfs_set_header_nritems(right, right_nritems);
2642 push_space = BTRFS_LEAF_DATA_SIZE(root);
2643 for (i = 0; i < right_nritems; i++) {
2644 item = btrfs_item_nr(right, i);
2646 if (!right->map_token) {
2647 map_extent_buffer(right, (unsigned long)item,
2648 sizeof(struct btrfs_item),
2649 &right->map_token, &right->kaddr,
2650 &right->map_start, &right->map_len,
2654 push_space = push_space - btrfs_item_size(right, item);
2655 btrfs_set_item_offset(right, item, push_space);
2657 if (right->map_token) {
2658 unmap_extent_buffer(right, right->map_token, KM_USER1);
2659 right->map_token = NULL;
2662 btrfs_mark_buffer_dirty(left);
2664 btrfs_mark_buffer_dirty(right);
2666 btrfs_item_key(right, &disk_key, 0);
2667 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2671 /* then fixup the leaf pointer in the path */
2672 if (path->slots[0] < push_items) {
2673 path->slots[0] += old_left_nritems;
2674 if (btrfs_header_nritems(path->nodes[0]) == 0)
2675 clean_tree_block(trans, root, path->nodes[0]);
2676 btrfs_tree_unlock(path->nodes[0]);
2677 free_extent_buffer(path->nodes[0]);
2678 path->nodes[0] = left;
2679 path->slots[1] -= 1;
2681 btrfs_tree_unlock(left);
2682 free_extent_buffer(left);
2683 path->slots[0] -= push_items;
2685 BUG_ON(path->slots[0] < 0);
2688 btrfs_tree_unlock(left);
2689 free_extent_buffer(left);
2694 * push some data in the path leaf to the left, trying to free up at
2695 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2697 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2698 *root, struct btrfs_path *path, int data_size,
2701 struct extent_buffer *right = path->nodes[0];
2702 struct extent_buffer *left;
2708 slot = path->slots[1];
2711 if (!path->nodes[1])
2714 right_nritems = btrfs_header_nritems(right);
2715 if (right_nritems == 0)
2718 btrfs_assert_tree_locked(path->nodes[1]);
2720 left = read_node_slot(root, path->nodes[1], slot - 1);
2721 btrfs_tree_lock(left);
2722 btrfs_set_lock_blocking(left);
2724 free_space = btrfs_leaf_free_space(root, left);
2725 if (free_space < data_size) {
2730 /* cow and double check */
2731 ret = btrfs_cow_block(trans, root, left,
2732 path->nodes[1], slot - 1, &left);
2734 /* we hit -ENOSPC, but it isn't fatal here */
2739 free_space = btrfs_leaf_free_space(root, left);
2740 if (free_space < data_size) {
2745 return __push_leaf_left(trans, root, path, data_size,
2746 empty, left, free_space, right_nritems);
2748 btrfs_tree_unlock(left);
2749 free_extent_buffer(left);
2754 * split the path's leaf in two, making sure there is at least data_size
2755 * available for the resulting leaf level of the path.
2757 * returns 0 if all went well and < 0 on failure.
2759 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2760 struct btrfs_root *root,
2761 struct btrfs_path *path,
2762 struct extent_buffer *l,
2763 struct extent_buffer *right,
2764 int slot, int mid, int nritems)
2771 struct btrfs_disk_key disk_key;
2773 nritems = nritems - mid;
2774 btrfs_set_header_nritems(right, nritems);
2775 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2777 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2778 btrfs_item_nr_offset(mid),
2779 nritems * sizeof(struct btrfs_item));
2781 copy_extent_buffer(right, l,
2782 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2783 data_copy_size, btrfs_leaf_data(l) +
2784 leaf_data_end(root, l), data_copy_size);
2786 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2787 btrfs_item_end_nr(l, mid);
2789 for (i = 0; i < nritems; i++) {
2790 struct btrfs_item *item = btrfs_item_nr(right, i);
2793 if (!right->map_token) {
2794 map_extent_buffer(right, (unsigned long)item,
2795 sizeof(struct btrfs_item),
2796 &right->map_token, &right->kaddr,
2797 &right->map_start, &right->map_len,
2801 ioff = btrfs_item_offset(right, item);
2802 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2805 if (right->map_token) {
2806 unmap_extent_buffer(right, right->map_token, KM_USER1);
2807 right->map_token = NULL;
2810 btrfs_set_header_nritems(l, mid);
2812 btrfs_item_key(right, &disk_key, 0);
2813 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2814 path->slots[1] + 1, 1);
2818 btrfs_mark_buffer_dirty(right);
2819 btrfs_mark_buffer_dirty(l);
2820 BUG_ON(path->slots[0] != slot);
2823 btrfs_tree_unlock(path->nodes[0]);
2824 free_extent_buffer(path->nodes[0]);
2825 path->nodes[0] = right;
2826 path->slots[0] -= mid;
2827 path->slots[1] += 1;
2829 btrfs_tree_unlock(right);
2830 free_extent_buffer(right);
2833 BUG_ON(path->slots[0] < 0);
2839 * split the path's leaf in two, making sure there is at least data_size
2840 * available for the resulting leaf level of the path.
2842 * returns 0 if all went well and < 0 on failure.
2844 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2845 struct btrfs_root *root,
2846 struct btrfs_key *ins_key,
2847 struct btrfs_path *path, int data_size,
2850 struct btrfs_disk_key disk_key;
2851 struct extent_buffer *l;
2855 struct extent_buffer *right;
2859 int num_doubles = 0;
2862 slot = path->slots[0];
2863 if (extend && data_size + btrfs_item_size_nr(l, slot) +
2864 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
2867 /* first try to make some room by pushing left and right */
2868 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
2869 wret = push_leaf_right(trans, root, path, data_size, 0);
2873 wret = push_leaf_left(trans, root, path, data_size, 0);
2879 /* did the pushes work? */
2880 if (btrfs_leaf_free_space(root, l) >= data_size)
2884 if (!path->nodes[1]) {
2885 ret = insert_new_root(trans, root, path, 1);
2892 slot = path->slots[0];
2893 nritems = btrfs_header_nritems(l);
2894 mid = (nritems + 1) / 2;
2898 leaf_space_used(l, mid, nritems - mid) + data_size >
2899 BTRFS_LEAF_DATA_SIZE(root)) {
2900 if (slot >= nritems) {
2904 if (mid != nritems &&
2905 leaf_space_used(l, mid, nritems - mid) +
2906 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2912 if (leaf_space_used(l, 0, mid) + data_size >
2913 BTRFS_LEAF_DATA_SIZE(root)) {
2914 if (!extend && data_size && slot == 0) {
2916 } else if ((extend || !data_size) && slot == 0) {
2920 if (mid != nritems &&
2921 leaf_space_used(l, mid, nritems - mid) +
2922 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2930 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2932 btrfs_item_key(l, &disk_key, mid);
2934 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
2935 root->root_key.objectid,
2936 &disk_key, 0, l->start, 0);
2937 if (IS_ERR(right)) {
2939 return PTR_ERR(right);
2942 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2943 btrfs_set_header_bytenr(right, right->start);
2944 btrfs_set_header_generation(right, trans->transid);
2945 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
2946 btrfs_set_header_owner(right, root->root_key.objectid);
2947 btrfs_set_header_level(right, 0);
2948 write_extent_buffer(right, root->fs_info->fsid,
2949 (unsigned long)btrfs_header_fsid(right),
2952 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2953 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2958 btrfs_set_header_nritems(right, 0);
2959 wret = insert_ptr(trans, root, path,
2960 &disk_key, right->start,
2961 path->slots[1] + 1, 1);
2965 btrfs_tree_unlock(path->nodes[0]);
2966 free_extent_buffer(path->nodes[0]);
2967 path->nodes[0] = right;
2969 path->slots[1] += 1;
2971 btrfs_set_header_nritems(right, 0);
2972 wret = insert_ptr(trans, root, path,
2978 btrfs_tree_unlock(path->nodes[0]);
2979 free_extent_buffer(path->nodes[0]);
2980 path->nodes[0] = right;
2982 if (path->slots[1] == 0) {
2983 wret = fixup_low_keys(trans, root,
2984 path, &disk_key, 1);
2989 btrfs_mark_buffer_dirty(right);
2993 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
2997 BUG_ON(num_doubles != 0);
3005 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3006 struct btrfs_root *root,
3007 struct btrfs_path *path, int ins_len)
3009 struct btrfs_key key;
3010 struct extent_buffer *leaf;
3011 struct btrfs_file_extent_item *fi;
3016 leaf = path->nodes[0];
3017 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3019 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3020 key.type != BTRFS_EXTENT_CSUM_KEY);
3022 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3025 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3026 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3027 fi = btrfs_item_ptr(leaf, path->slots[0],
3028 struct btrfs_file_extent_item);
3029 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3031 btrfs_release_path(root, path);
3033 path->keep_locks = 1;
3034 path->search_for_split = 1;
3035 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3036 path->search_for_split = 0;
3041 leaf = path->nodes[0];
3042 /* if our item isn't there or got smaller, return now */
3043 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3046 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3047 fi = btrfs_item_ptr(leaf, path->slots[0],
3048 struct btrfs_file_extent_item);
3049 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3053 btrfs_set_path_blocking(path);
3054 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3057 path->keep_locks = 0;
3058 btrfs_unlock_up_safe(path, 1);
3061 path->keep_locks = 0;
3065 static noinline int split_item(struct btrfs_trans_handle *trans,
3066 struct btrfs_root *root,
3067 struct btrfs_path *path,
3068 struct btrfs_key *new_key,
3069 unsigned long split_offset)
3071 struct extent_buffer *leaf;
3072 struct btrfs_item *item;
3073 struct btrfs_item *new_item;
3079 struct btrfs_disk_key disk_key;
3081 leaf = path->nodes[0];
3082 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3084 btrfs_set_path_blocking(path);
3086 item = btrfs_item_nr(leaf, path->slots[0]);
3087 orig_offset = btrfs_item_offset(leaf, item);
3088 item_size = btrfs_item_size(leaf, item);
3090 buf = kmalloc(item_size, GFP_NOFS);
3094 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3095 path->slots[0]), item_size);
3097 slot = path->slots[0] + 1;
3098 nritems = btrfs_header_nritems(leaf);
3099 if (slot != nritems) {
3100 /* shift the items */
3101 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3102 btrfs_item_nr_offset(slot),
3103 (nritems - slot) * sizeof(struct btrfs_item));
3106 btrfs_cpu_key_to_disk(&disk_key, new_key);
3107 btrfs_set_item_key(leaf, &disk_key, slot);
3109 new_item = btrfs_item_nr(leaf, slot);
3111 btrfs_set_item_offset(leaf, new_item, orig_offset);
3112 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3114 btrfs_set_item_offset(leaf, item,
3115 orig_offset + item_size - split_offset);
3116 btrfs_set_item_size(leaf, item, split_offset);
3118 btrfs_set_header_nritems(leaf, nritems + 1);
3120 /* write the data for the start of the original item */
3121 write_extent_buffer(leaf, buf,
3122 btrfs_item_ptr_offset(leaf, path->slots[0]),
3125 /* write the data for the new item */
3126 write_extent_buffer(leaf, buf + split_offset,
3127 btrfs_item_ptr_offset(leaf, slot),
3128 item_size - split_offset);
3129 btrfs_mark_buffer_dirty(leaf);
3131 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
3137 * This function splits a single item into two items,
3138 * giving 'new_key' to the new item and splitting the
3139 * old one at split_offset (from the start of the item).
3141 * The path may be released by this operation. After
3142 * the split, the path is pointing to the old item. The
3143 * new item is going to be in the same node as the old one.
3145 * Note, the item being split must be smaller enough to live alone on
3146 * a tree block with room for one extra struct btrfs_item
3148 * This allows us to split the item in place, keeping a lock on the
3149 * leaf the entire time.
3151 int btrfs_split_item(struct btrfs_trans_handle *trans,
3152 struct btrfs_root *root,
3153 struct btrfs_path *path,
3154 struct btrfs_key *new_key,
3155 unsigned long split_offset)
3158 ret = setup_leaf_for_split(trans, root, path,
3159 sizeof(struct btrfs_item));
3163 ret = split_item(trans, root, path, new_key, split_offset);
3168 * This function duplicate a item, giving 'new_key' to the new item.
3169 * It guarantees both items live in the same tree leaf and the new item
3170 * is contiguous with the original item.
3172 * This allows us to split file extent in place, keeping a lock on the
3173 * leaf the entire time.
3175 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3176 struct btrfs_root *root,
3177 struct btrfs_path *path,
3178 struct btrfs_key *new_key)
3180 struct extent_buffer *leaf;
3184 leaf = path->nodes[0];
3185 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3186 ret = setup_leaf_for_split(trans, root, path,
3187 item_size + sizeof(struct btrfs_item));
3192 ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
3193 item_size, item_size +
3194 sizeof(struct btrfs_item), 1);
3197 leaf = path->nodes[0];
3198 memcpy_extent_buffer(leaf,
3199 btrfs_item_ptr_offset(leaf, path->slots[0]),
3200 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3206 * make the item pointed to by the path smaller. new_size indicates
3207 * how small to make it, and from_end tells us if we just chop bytes
3208 * off the end of the item or if we shift the item to chop bytes off
3211 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3212 struct btrfs_root *root,
3213 struct btrfs_path *path,
3214 u32 new_size, int from_end)
3219 struct extent_buffer *leaf;
3220 struct btrfs_item *item;
3222 unsigned int data_end;
3223 unsigned int old_data_start;
3224 unsigned int old_size;
3225 unsigned int size_diff;
3228 slot_orig = path->slots[0];
3229 leaf = path->nodes[0];
3230 slot = path->slots[0];
3232 old_size = btrfs_item_size_nr(leaf, slot);
3233 if (old_size == new_size)
3236 nritems = btrfs_header_nritems(leaf);
3237 data_end = leaf_data_end(root, leaf);
3239 old_data_start = btrfs_item_offset_nr(leaf, slot);
3241 size_diff = old_size - new_size;
3244 BUG_ON(slot >= nritems);
3247 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3249 /* first correct the data pointers */
3250 for (i = slot; i < nritems; i++) {
3252 item = btrfs_item_nr(leaf, i);
3254 if (!leaf->map_token) {
3255 map_extent_buffer(leaf, (unsigned long)item,
3256 sizeof(struct btrfs_item),
3257 &leaf->map_token, &leaf->kaddr,
3258 &leaf->map_start, &leaf->map_len,
3262 ioff = btrfs_item_offset(leaf, item);
3263 btrfs_set_item_offset(leaf, item, ioff + size_diff);
3266 if (leaf->map_token) {
3267 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3268 leaf->map_token = NULL;
3271 /* shift the data */
3273 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3274 data_end + size_diff, btrfs_leaf_data(leaf) +
3275 data_end, old_data_start + new_size - data_end);
3277 struct btrfs_disk_key disk_key;
3280 btrfs_item_key(leaf, &disk_key, slot);
3282 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3284 struct btrfs_file_extent_item *fi;
3286 fi = btrfs_item_ptr(leaf, slot,
3287 struct btrfs_file_extent_item);
3288 fi = (struct btrfs_file_extent_item *)(
3289 (unsigned long)fi - size_diff);
3291 if (btrfs_file_extent_type(leaf, fi) ==
3292 BTRFS_FILE_EXTENT_INLINE) {
3293 ptr = btrfs_item_ptr_offset(leaf, slot);
3294 memmove_extent_buffer(leaf, ptr,
3296 offsetof(struct btrfs_file_extent_item,
3301 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3302 data_end + size_diff, btrfs_leaf_data(leaf) +
3303 data_end, old_data_start - data_end);
3305 offset = btrfs_disk_key_offset(&disk_key);
3306 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3307 btrfs_set_item_key(leaf, &disk_key, slot);
3309 fixup_low_keys(trans, root, path, &disk_key, 1);
3312 item = btrfs_item_nr(leaf, slot);
3313 btrfs_set_item_size(leaf, item, new_size);
3314 btrfs_mark_buffer_dirty(leaf);
3317 if (btrfs_leaf_free_space(root, leaf) < 0) {
3318 btrfs_print_leaf(root, leaf);
3325 * make the item pointed to by the path bigger, data_size is the new size.
3327 int btrfs_extend_item(struct btrfs_trans_handle *trans,
3328 struct btrfs_root *root, struct btrfs_path *path,
3334 struct extent_buffer *leaf;
3335 struct btrfs_item *item;
3337 unsigned int data_end;
3338 unsigned int old_data;
3339 unsigned int old_size;
3342 slot_orig = path->slots[0];
3343 leaf = path->nodes[0];
3345 nritems = btrfs_header_nritems(leaf);
3346 data_end = leaf_data_end(root, leaf);
3348 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3349 btrfs_print_leaf(root, leaf);
3352 slot = path->slots[0];
3353 old_data = btrfs_item_end_nr(leaf, slot);
3356 if (slot >= nritems) {
3357 btrfs_print_leaf(root, leaf);
3358 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3364 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3366 /* first correct the data pointers */
3367 for (i = slot; i < nritems; i++) {
3369 item = btrfs_item_nr(leaf, i);
3371 if (!leaf->map_token) {
3372 map_extent_buffer(leaf, (unsigned long)item,
3373 sizeof(struct btrfs_item),
3374 &leaf->map_token, &leaf->kaddr,
3375 &leaf->map_start, &leaf->map_len,
3378 ioff = btrfs_item_offset(leaf, item);
3379 btrfs_set_item_offset(leaf, item, ioff - data_size);
3382 if (leaf->map_token) {
3383 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3384 leaf->map_token = NULL;
3387 /* shift the data */
3388 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3389 data_end - data_size, btrfs_leaf_data(leaf) +
3390 data_end, old_data - data_end);
3392 data_end = old_data;
3393 old_size = btrfs_item_size_nr(leaf, slot);
3394 item = btrfs_item_nr(leaf, slot);
3395 btrfs_set_item_size(leaf, item, old_size + data_size);
3396 btrfs_mark_buffer_dirty(leaf);
3399 if (btrfs_leaf_free_space(root, leaf) < 0) {
3400 btrfs_print_leaf(root, leaf);
3407 * Given a key and some data, insert items into the tree.
3408 * This does all the path init required, making room in the tree if needed.
3409 * Returns the number of keys that were inserted.
3411 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3412 struct btrfs_root *root,
3413 struct btrfs_path *path,
3414 struct btrfs_key *cpu_key, u32 *data_size,
3417 struct extent_buffer *leaf;
3418 struct btrfs_item *item;
3425 unsigned int data_end;
3426 struct btrfs_disk_key disk_key;
3427 struct btrfs_key found_key;
3429 for (i = 0; i < nr; i++) {
3430 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3431 BTRFS_LEAF_DATA_SIZE(root)) {
3435 total_data += data_size[i];
3436 total_size += data_size[i] + sizeof(struct btrfs_item);
3440 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3446 leaf = path->nodes[0];
3448 nritems = btrfs_header_nritems(leaf);
3449 data_end = leaf_data_end(root, leaf);
3451 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3452 for (i = nr; i >= 0; i--) {
3453 total_data -= data_size[i];
3454 total_size -= data_size[i] + sizeof(struct btrfs_item);
3455 if (total_size < btrfs_leaf_free_space(root, leaf))
3461 slot = path->slots[0];
3464 if (slot != nritems) {
3465 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3467 item = btrfs_item_nr(leaf, slot);
3468 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3470 /* figure out how many keys we can insert in here */
3471 total_data = data_size[0];
3472 for (i = 1; i < nr; i++) {
3473 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
3475 total_data += data_size[i];
3479 if (old_data < data_end) {
3480 btrfs_print_leaf(root, leaf);
3481 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3482 slot, old_data, data_end);
3486 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3488 /* first correct the data pointers */
3489 WARN_ON(leaf->map_token);
3490 for (i = slot; i < nritems; i++) {
3493 item = btrfs_item_nr(leaf, i);
3494 if (!leaf->map_token) {
3495 map_extent_buffer(leaf, (unsigned long)item,
3496 sizeof(struct btrfs_item),
3497 &leaf->map_token, &leaf->kaddr,
3498 &leaf->map_start, &leaf->map_len,
3502 ioff = btrfs_item_offset(leaf, item);
3503 btrfs_set_item_offset(leaf, item, ioff - total_data);
3505 if (leaf->map_token) {
3506 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3507 leaf->map_token = NULL;
3510 /* shift the items */
3511 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3512 btrfs_item_nr_offset(slot),
3513 (nritems - slot) * sizeof(struct btrfs_item));
3515 /* shift the data */
3516 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3517 data_end - total_data, btrfs_leaf_data(leaf) +
3518 data_end, old_data - data_end);
3519 data_end = old_data;
3522 * this sucks but it has to be done, if we are inserting at
3523 * the end of the leaf only insert 1 of the items, since we
3524 * have no way of knowing whats on the next leaf and we'd have
3525 * to drop our current locks to figure it out
3530 /* setup the item for the new data */
3531 for (i = 0; i < nr; i++) {
3532 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3533 btrfs_set_item_key(leaf, &disk_key, slot + i);
3534 item = btrfs_item_nr(leaf, slot + i);
3535 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3536 data_end -= data_size[i];
3537 btrfs_set_item_size(leaf, item, data_size[i]);
3539 btrfs_set_header_nritems(leaf, nritems + nr);
3540 btrfs_mark_buffer_dirty(leaf);
3544 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3545 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3548 if (btrfs_leaf_free_space(root, leaf) < 0) {
3549 btrfs_print_leaf(root, leaf);
3559 * this is a helper for btrfs_insert_empty_items, the main goal here is
3560 * to save stack depth by doing the bulk of the work in a function
3561 * that doesn't call btrfs_search_slot
3563 static noinline_for_stack int
3564 setup_items_for_insert(struct btrfs_trans_handle *trans,
3565 struct btrfs_root *root, struct btrfs_path *path,
3566 struct btrfs_key *cpu_key, u32 *data_size,
3567 u32 total_data, u32 total_size, int nr)
3569 struct btrfs_item *item;
3572 unsigned int data_end;
3573 struct btrfs_disk_key disk_key;
3575 struct extent_buffer *leaf;
3578 leaf = path->nodes[0];
3579 slot = path->slots[0];
3581 nritems = btrfs_header_nritems(leaf);
3582 data_end = leaf_data_end(root, leaf);
3584 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3585 btrfs_print_leaf(root, leaf);
3586 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3587 total_size, btrfs_leaf_free_space(root, leaf));
3591 if (slot != nritems) {
3592 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3594 if (old_data < data_end) {
3595 btrfs_print_leaf(root, leaf);
3596 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3597 slot, old_data, data_end);
3601 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3603 /* first correct the data pointers */
3604 WARN_ON(leaf->map_token);
3605 for (i = slot; i < nritems; i++) {
3608 item = btrfs_item_nr(leaf, i);
3609 if (!leaf->map_token) {
3610 map_extent_buffer(leaf, (unsigned long)item,
3611 sizeof(struct btrfs_item),
3612 &leaf->map_token, &leaf->kaddr,
3613 &leaf->map_start, &leaf->map_len,
3617 ioff = btrfs_item_offset(leaf, item);
3618 btrfs_set_item_offset(leaf, item, ioff - total_data);
3620 if (leaf->map_token) {
3621 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3622 leaf->map_token = NULL;
3625 /* shift the items */
3626 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3627 btrfs_item_nr_offset(slot),
3628 (nritems - slot) * sizeof(struct btrfs_item));
3630 /* shift the data */
3631 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3632 data_end - total_data, btrfs_leaf_data(leaf) +
3633 data_end, old_data - data_end);
3634 data_end = old_data;
3637 /* setup the item for the new data */
3638 for (i = 0; i < nr; i++) {
3639 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3640 btrfs_set_item_key(leaf, &disk_key, slot + i);
3641 item = btrfs_item_nr(leaf, slot + i);
3642 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3643 data_end -= data_size[i];
3644 btrfs_set_item_size(leaf, item, data_size[i]);
3647 btrfs_set_header_nritems(leaf, nritems + nr);
3651 struct btrfs_disk_key disk_key;
3652 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3653 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3655 btrfs_unlock_up_safe(path, 1);
3656 btrfs_mark_buffer_dirty(leaf);
3658 if (btrfs_leaf_free_space(root, leaf) < 0) {
3659 btrfs_print_leaf(root, leaf);
3666 * Given a key and some data, insert items into the tree.
3667 * This does all the path init required, making room in the tree if needed.
3669 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3670 struct btrfs_root *root,
3671 struct btrfs_path *path,
3672 struct btrfs_key *cpu_key, u32 *data_size,
3675 struct extent_buffer *leaf;
3682 for (i = 0; i < nr; i++)
3683 total_data += data_size[i];
3685 total_size = total_data + (nr * sizeof(struct btrfs_item));
3686 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3692 leaf = path->nodes[0];
3693 slot = path->slots[0];
3696 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
3697 total_data, total_size, nr);
3704 * Given a key and some data, insert an item into the tree.
3705 * This does all the path init required, making room in the tree if needed.
3707 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3708 *root, struct btrfs_key *cpu_key, void *data, u32
3712 struct btrfs_path *path;
3713 struct extent_buffer *leaf;
3716 path = btrfs_alloc_path();
3718 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3720 leaf = path->nodes[0];
3721 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3722 write_extent_buffer(leaf, data, ptr, data_size);
3723 btrfs_mark_buffer_dirty(leaf);
3725 btrfs_free_path(path);
3730 * delete the pointer from a given node.
3732 * the tree should have been previously balanced so the deletion does not
3735 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3736 struct btrfs_path *path, int level, int slot)
3738 struct extent_buffer *parent = path->nodes[level];
3743 nritems = btrfs_header_nritems(parent);
3744 if (slot != nritems - 1) {
3745 memmove_extent_buffer(parent,
3746 btrfs_node_key_ptr_offset(slot),
3747 btrfs_node_key_ptr_offset(slot + 1),
3748 sizeof(struct btrfs_key_ptr) *
3749 (nritems - slot - 1));
3752 btrfs_set_header_nritems(parent, nritems);
3753 if (nritems == 0 && parent == root->node) {
3754 BUG_ON(btrfs_header_level(root->node) != 1);
3755 /* just turn the root into a leaf and break */
3756 btrfs_set_header_level(root->node, 0);
3757 } else if (slot == 0) {
3758 struct btrfs_disk_key disk_key;
3760 btrfs_node_key(parent, &disk_key, 0);
3761 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
3765 btrfs_mark_buffer_dirty(parent);
3770 * a helper function to delete the leaf pointed to by path->slots[1] and
3773 * This deletes the pointer in path->nodes[1] and frees the leaf
3774 * block extent. zero is returned if it all worked out, < 0 otherwise.
3776 * The path must have already been setup for deleting the leaf, including
3777 * all the proper balancing. path->nodes[1] must be locked.
3779 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3780 struct btrfs_root *root,
3781 struct btrfs_path *path,
3782 struct extent_buffer *leaf)
3786 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3787 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3792 * btrfs_free_extent is expensive, we want to make sure we
3793 * aren't holding any locks when we call it
3795 btrfs_unlock_up_safe(path, 0);
3797 ret = btrfs_free_extent(trans, root, leaf->start, leaf->len,
3798 0, root->root_key.objectid, 0, 0);
3802 * delete the item at the leaf level in path. If that empties
3803 * the leaf, remove it from the tree
3805 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3806 struct btrfs_path *path, int slot, int nr)
3808 struct extent_buffer *leaf;
3809 struct btrfs_item *item;
3817 leaf = path->nodes[0];
3818 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3820 for (i = 0; i < nr; i++)
3821 dsize += btrfs_item_size_nr(leaf, slot + i);
3823 nritems = btrfs_header_nritems(leaf);
3825 if (slot + nr != nritems) {
3826 int data_end = leaf_data_end(root, leaf);
3828 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3830 btrfs_leaf_data(leaf) + data_end,
3831 last_off - data_end);
3833 for (i = slot + nr; i < nritems; i++) {
3836 item = btrfs_item_nr(leaf, i);
3837 if (!leaf->map_token) {
3838 map_extent_buffer(leaf, (unsigned long)item,
3839 sizeof(struct btrfs_item),
3840 &leaf->map_token, &leaf->kaddr,
3841 &leaf->map_start, &leaf->map_len,
3844 ioff = btrfs_item_offset(leaf, item);
3845 btrfs_set_item_offset(leaf, item, ioff + dsize);
3848 if (leaf->map_token) {
3849 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3850 leaf->map_token = NULL;
3853 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
3854 btrfs_item_nr_offset(slot + nr),
3855 sizeof(struct btrfs_item) *
3856 (nritems - slot - nr));
3858 btrfs_set_header_nritems(leaf, nritems - nr);
3861 /* delete the leaf if we've emptied it */
3863 if (leaf == root->node) {
3864 btrfs_set_header_level(leaf, 0);
3866 ret = btrfs_del_leaf(trans, root, path, leaf);
3870 int used = leaf_space_used(leaf, 0, nritems);
3872 struct btrfs_disk_key disk_key;
3874 btrfs_item_key(leaf, &disk_key, 0);
3875 wret = fixup_low_keys(trans, root, path,
3881 /* delete the leaf if it is mostly empty */
3882 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
3883 /* push_leaf_left fixes the path.
3884 * make sure the path still points to our leaf
3885 * for possible call to del_ptr below
3887 slot = path->slots[1];
3888 extent_buffer_get(leaf);
3890 btrfs_set_path_blocking(path);
3891 wret = push_leaf_left(trans, root, path, 1, 1);
3892 if (wret < 0 && wret != -ENOSPC)
3895 if (path->nodes[0] == leaf &&
3896 btrfs_header_nritems(leaf)) {
3897 wret = push_leaf_right(trans, root, path, 1, 1);
3898 if (wret < 0 && wret != -ENOSPC)
3902 if (btrfs_header_nritems(leaf) == 0) {
3903 path->slots[1] = slot;
3904 ret = btrfs_del_leaf(trans, root, path, leaf);
3906 free_extent_buffer(leaf);
3908 /* if we're still in the path, make sure
3909 * we're dirty. Otherwise, one of the
3910 * push_leaf functions must have already
3911 * dirtied this buffer
3913 if (path->nodes[0] == leaf)
3914 btrfs_mark_buffer_dirty(leaf);
3915 free_extent_buffer(leaf);
3918 btrfs_mark_buffer_dirty(leaf);
3925 * search the tree again to find a leaf with lesser keys
3926 * returns 0 if it found something or 1 if there are no lesser leaves.
3927 * returns < 0 on io errors.
3929 * This may release the path, and so you may lose any locks held at the
3932 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3934 struct btrfs_key key;
3935 struct btrfs_disk_key found_key;
3938 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
3942 else if (key.type > 0)
3944 else if (key.objectid > 0)
3949 btrfs_release_path(root, path);
3950 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3953 btrfs_item_key(path->nodes[0], &found_key, 0);
3954 ret = comp_keys(&found_key, &key);
3961 * A helper function to walk down the tree starting at min_key, and looking
3962 * for nodes or leaves that are either in cache or have a minimum
3963 * transaction id. This is used by the btree defrag code, and tree logging
3965 * This does not cow, but it does stuff the starting key it finds back
3966 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3967 * key and get a writable path.
3969 * This does lock as it descends, and path->keep_locks should be set
3970 * to 1 by the caller.
3972 * This honors path->lowest_level to prevent descent past a given level
3975 * min_trans indicates the oldest transaction that you are interested
3976 * in walking through. Any nodes or leaves older than min_trans are
3977 * skipped over (without reading them).
3979 * returns zero if something useful was found, < 0 on error and 1 if there
3980 * was nothing in the tree that matched the search criteria.
3982 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
3983 struct btrfs_key *max_key,
3984 struct btrfs_path *path, int cache_only,
3987 struct extent_buffer *cur;
3988 struct btrfs_key found_key;
3995 WARN_ON(!path->keep_locks);
3997 cur = btrfs_lock_root_node(root);
3998 level = btrfs_header_level(cur);
3999 WARN_ON(path->nodes[level]);
4000 path->nodes[level] = cur;
4001 path->locks[level] = 1;
4003 if (btrfs_header_generation(cur) < min_trans) {
4008 nritems = btrfs_header_nritems(cur);
4009 level = btrfs_header_level(cur);
4010 sret = bin_search(cur, min_key, level, &slot);
4012 /* at the lowest level, we're done, setup the path and exit */
4013 if (level == path->lowest_level) {
4014 if (slot >= nritems)
4017 path->slots[level] = slot;
4018 btrfs_item_key_to_cpu(cur, &found_key, slot);
4021 if (sret && slot > 0)
4024 * check this node pointer against the cache_only and
4025 * min_trans parameters. If it isn't in cache or is too
4026 * old, skip to the next one.
4028 while (slot < nritems) {
4031 struct extent_buffer *tmp;
4032 struct btrfs_disk_key disk_key;
4034 blockptr = btrfs_node_blockptr(cur, slot);
4035 gen = btrfs_node_ptr_generation(cur, slot);
4036 if (gen < min_trans) {
4044 btrfs_node_key(cur, &disk_key, slot);
4045 if (comp_keys(&disk_key, max_key) >= 0) {
4051 tmp = btrfs_find_tree_block(root, blockptr,
4052 btrfs_level_size(root, level - 1));
4054 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
4055 free_extent_buffer(tmp);
4059 free_extent_buffer(tmp);
4064 * we didn't find a candidate key in this node, walk forward
4065 * and find another one
4067 if (slot >= nritems) {
4068 path->slots[level] = slot;
4069 btrfs_set_path_blocking(path);
4070 sret = btrfs_find_next_key(root, path, min_key, level,
4071 cache_only, min_trans);
4073 btrfs_release_path(root, path);
4079 /* save our key for returning back */
4080 btrfs_node_key_to_cpu(cur, &found_key, slot);
4081 path->slots[level] = slot;
4082 if (level == path->lowest_level) {
4084 unlock_up(path, level, 1);
4087 btrfs_set_path_blocking(path);
4088 cur = read_node_slot(root, cur, slot);
4090 btrfs_tree_lock(cur);
4092 path->locks[level - 1] = 1;
4093 path->nodes[level - 1] = cur;
4094 unlock_up(path, level, 1);
4095 btrfs_clear_path_blocking(path, NULL);
4099 memcpy(min_key, &found_key, sizeof(found_key));
4100 btrfs_set_path_blocking(path);
4105 * this is similar to btrfs_next_leaf, but does not try to preserve
4106 * and fixup the path. It looks for and returns the next key in the
4107 * tree based on the current path and the cache_only and min_trans
4110 * 0 is returned if another key is found, < 0 if there are any errors
4111 * and 1 is returned if there are no higher keys in the tree
4113 * path->keep_locks should be set to 1 on the search made before
4114 * calling this function.
4116 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4117 struct btrfs_key *key, int level,
4118 int cache_only, u64 min_trans)
4121 struct extent_buffer *c;
4123 WARN_ON(!path->keep_locks);
4124 while (level < BTRFS_MAX_LEVEL) {
4125 if (!path->nodes[level])
4128 slot = path->slots[level] + 1;
4129 c = path->nodes[level];
4131 if (slot >= btrfs_header_nritems(c)) {
4134 struct btrfs_key cur_key;
4135 if (level + 1 >= BTRFS_MAX_LEVEL ||
4136 !path->nodes[level + 1])
4139 if (path->locks[level + 1]) {
4144 slot = btrfs_header_nritems(c) - 1;
4146 btrfs_item_key_to_cpu(c, &cur_key, slot);
4148 btrfs_node_key_to_cpu(c, &cur_key, slot);
4150 orig_lowest = path->lowest_level;
4151 btrfs_release_path(root, path);
4152 path->lowest_level = level;
4153 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4155 path->lowest_level = orig_lowest;
4159 c = path->nodes[level];
4160 slot = path->slots[level];
4167 btrfs_item_key_to_cpu(c, key, slot);
4169 u64 blockptr = btrfs_node_blockptr(c, slot);
4170 u64 gen = btrfs_node_ptr_generation(c, slot);
4173 struct extent_buffer *cur;
4174 cur = btrfs_find_tree_block(root, blockptr,
4175 btrfs_level_size(root, level - 1));
4176 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
4179 free_extent_buffer(cur);
4182 free_extent_buffer(cur);
4184 if (gen < min_trans) {
4188 btrfs_node_key_to_cpu(c, key, slot);
4196 * search the tree again to find a leaf with greater keys
4197 * returns 0 if it found something or 1 if there are no greater leaves.
4198 * returns < 0 on io errors.
4200 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4204 struct extent_buffer *c;
4205 struct extent_buffer *next;
4206 struct btrfs_key key;
4209 int old_spinning = path->leave_spinning;
4210 int force_blocking = 0;
4212 nritems = btrfs_header_nritems(path->nodes[0]);
4217 * we take the blocks in an order that upsets lockdep. Using
4218 * blocking mode is the only way around it.
4220 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4224 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4228 btrfs_release_path(root, path);
4230 path->keep_locks = 1;
4232 if (!force_blocking)
4233 path->leave_spinning = 1;
4235 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4236 path->keep_locks = 0;
4241 nritems = btrfs_header_nritems(path->nodes[0]);
4243 * by releasing the path above we dropped all our locks. A balance
4244 * could have added more items next to the key that used to be
4245 * at the very end of the block. So, check again here and
4246 * advance the path if there are now more items available.
4248 if (nritems > 0 && path->slots[0] < nritems - 1) {
4255 while (level < BTRFS_MAX_LEVEL) {
4256 if (!path->nodes[level]) {
4261 slot = path->slots[level] + 1;
4262 c = path->nodes[level];
4263 if (slot >= btrfs_header_nritems(c)) {
4265 if (level == BTRFS_MAX_LEVEL) {
4273 btrfs_tree_unlock(next);
4274 free_extent_buffer(next);
4278 ret = read_block_for_search(NULL, root, path, &next, level,
4284 btrfs_release_path(root, path);
4288 if (!path->skip_locking) {
4289 ret = btrfs_try_spin_lock(next);
4291 btrfs_set_path_blocking(path);
4292 btrfs_tree_lock(next);
4293 if (!force_blocking)
4294 btrfs_clear_path_blocking(path, next);
4297 btrfs_set_lock_blocking(next);
4301 path->slots[level] = slot;
4304 c = path->nodes[level];
4305 if (path->locks[level])
4306 btrfs_tree_unlock(c);
4308 free_extent_buffer(c);
4309 path->nodes[level] = next;
4310 path->slots[level] = 0;
4311 if (!path->skip_locking)
4312 path->locks[level] = 1;
4317 ret = read_block_for_search(NULL, root, path, &next, level,
4323 btrfs_release_path(root, path);
4327 if (!path->skip_locking) {
4328 btrfs_assert_tree_locked(path->nodes[level]);
4329 ret = btrfs_try_spin_lock(next);
4331 btrfs_set_path_blocking(path);
4332 btrfs_tree_lock(next);
4333 if (!force_blocking)
4334 btrfs_clear_path_blocking(path, next);
4337 btrfs_set_lock_blocking(next);
4342 unlock_up(path, 0, 1);
4343 path->leave_spinning = old_spinning;
4345 btrfs_set_path_blocking(path);
4351 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4352 * searching until it gets past min_objectid or finds an item of 'type'
4354 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4356 int btrfs_previous_item(struct btrfs_root *root,
4357 struct btrfs_path *path, u64 min_objectid,
4360 struct btrfs_key found_key;
4361 struct extent_buffer *leaf;
4366 if (path->slots[0] == 0) {
4367 btrfs_set_path_blocking(path);
4368 ret = btrfs_prev_leaf(root, path);
4374 leaf = path->nodes[0];
4375 nritems = btrfs_header_nritems(leaf);
4378 if (path->slots[0] == nritems)
4381 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4382 if (found_key.objectid < min_objectid)
4384 if (found_key.type == type)
4386 if (found_key.objectid == min_objectid &&
4387 found_key.type < type)