2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
23 #include "transaction.h"
24 #include "print-tree.h"
27 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
28 *root, struct btrfs_path *path, int level);
29 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_key *ins_key,
31 struct btrfs_path *path, int data_size, int extend);
32 static int push_node_left(struct btrfs_trans_handle *trans,
33 struct btrfs_root *root, struct extent_buffer *dst,
34 struct extent_buffer *src, int empty);
35 static int balance_node_right(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root,
37 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf);
39 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot);
41 static int setup_items_for_insert(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root, struct btrfs_path *path,
43 struct btrfs_key *cpu_key, u32 *data_size,
44 u32 total_data, u32 total_size, int nr);
47 struct btrfs_path *btrfs_alloc_path(void)
49 struct btrfs_path *path;
50 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
57 * set all locked nodes in the path to blocking locks. This should
58 * be done before scheduling
60 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
63 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
64 if (p->nodes[i] && p->locks[i])
65 btrfs_set_lock_blocking(p->nodes[i]);
70 * reset all the locked nodes in the patch to spinning locks.
72 * held is used to keep lockdep happy, when lockdep is enabled
73 * we set held to a blocking lock before we go around and
74 * retake all the spinlocks in the path. You can safely use NULL
77 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
78 struct extent_buffer *held)
82 #ifdef CONFIG_DEBUG_LOCK_ALLOC
83 /* lockdep really cares that we take all of these spinlocks
84 * in the right order. If any of the locks in the path are not
85 * currently blocking, it is going to complain. So, make really
86 * really sure by forcing the path to blocking before we clear
90 btrfs_set_lock_blocking(held);
91 btrfs_set_path_blocking(p);
94 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
95 if (p->nodes[i] && p->locks[i])
96 btrfs_clear_lock_blocking(p->nodes[i]);
99 #ifdef CONFIG_DEBUG_LOCK_ALLOC
101 btrfs_clear_lock_blocking(held);
105 /* this also releases the path */
106 void btrfs_free_path(struct btrfs_path *p)
110 btrfs_release_path(NULL, p);
111 kmem_cache_free(btrfs_path_cachep, p);
115 * path release drops references on the extent buffers in the path
116 * and it drops any locks held by this path
118 * It is safe to call this on paths that no locks or extent buffers held.
120 noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
124 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
129 btrfs_tree_unlock(p->nodes[i]);
132 free_extent_buffer(p->nodes[i]);
138 * safely gets a reference on the root node of a tree. A lock
139 * is not taken, so a concurrent writer may put a different node
140 * at the root of the tree. See btrfs_lock_root_node for the
143 * The extent buffer returned by this has a reference taken, so
144 * it won't disappear. It may stop being the root of the tree
145 * at any time because there are no locks held.
147 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
149 struct extent_buffer *eb;
150 spin_lock(&root->node_lock);
152 extent_buffer_get(eb);
153 spin_unlock(&root->node_lock);
157 /* loop around taking references on and locking the root node of the
158 * tree until you end up with a lock on the root. A locked buffer
159 * is returned, with a reference held.
161 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
163 struct extent_buffer *eb;
166 eb = btrfs_root_node(root);
169 spin_lock(&root->node_lock);
170 if (eb == root->node) {
171 spin_unlock(&root->node_lock);
174 spin_unlock(&root->node_lock);
176 btrfs_tree_unlock(eb);
177 free_extent_buffer(eb);
182 /* cowonly root (everything not a reference counted cow subvolume), just get
183 * put onto a simple dirty list. transaction.c walks this to make sure they
184 * get properly updated on disk.
186 static void add_root_to_dirty_list(struct btrfs_root *root)
188 if (root->track_dirty && list_empty(&root->dirty_list)) {
189 list_add(&root->dirty_list,
190 &root->fs_info->dirty_cowonly_roots);
195 * used by snapshot creation to make a copy of a root for a tree with
196 * a given objectid. The buffer with the new root node is returned in
197 * cow_ret, and this func returns zero on success or a negative error code.
199 int btrfs_copy_root(struct btrfs_trans_handle *trans,
200 struct btrfs_root *root,
201 struct extent_buffer *buf,
202 struct extent_buffer **cow_ret, u64 new_root_objectid)
204 struct extent_buffer *cow;
207 struct btrfs_disk_key disk_key;
209 WARN_ON(root->ref_cows && trans->transid !=
210 root->fs_info->running_transaction->transid);
211 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
213 level = btrfs_header_level(buf);
215 btrfs_item_key(buf, &disk_key, 0);
217 btrfs_node_key(buf, &disk_key, 0);
219 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
220 new_root_objectid, &disk_key, level,
225 copy_extent_buffer(cow, buf, 0, 0, cow->len);
226 btrfs_set_header_bytenr(cow, cow->start);
227 btrfs_set_header_generation(cow, trans->transid);
228 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
229 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
230 BTRFS_HEADER_FLAG_RELOC);
231 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
232 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
234 btrfs_set_header_owner(cow, new_root_objectid);
236 write_extent_buffer(cow, root->fs_info->fsid,
237 (unsigned long)btrfs_header_fsid(cow),
240 WARN_ON(btrfs_header_generation(buf) > trans->transid);
241 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
242 ret = btrfs_inc_ref(trans, root, cow, 1);
244 ret = btrfs_inc_ref(trans, root, cow, 0);
249 btrfs_mark_buffer_dirty(cow);
255 * check if the tree block can be shared by multiple trees
257 int btrfs_block_can_be_shared(struct btrfs_root *root,
258 struct extent_buffer *buf)
261 * Tree blocks not in refernece counted trees and tree roots
262 * are never shared. If a block was allocated after the last
263 * snapshot and the block was not allocated by tree relocation,
264 * we know the block is not shared.
266 if (root->ref_cows &&
267 buf != root->node && buf != root->commit_root &&
268 (btrfs_header_generation(buf) <=
269 btrfs_root_last_snapshot(&root->root_item) ||
270 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
272 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
273 if (root->ref_cows &&
274 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
280 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
281 struct btrfs_root *root,
282 struct extent_buffer *buf,
283 struct extent_buffer *cow,
293 * Backrefs update rules:
295 * Always use full backrefs for extent pointers in tree block
296 * allocated by tree relocation.
298 * If a shared tree block is no longer referenced by its owner
299 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
300 * use full backrefs for extent pointers in tree block.
302 * If a tree block is been relocating
303 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
304 * use full backrefs for extent pointers in tree block.
305 * The reason for this is some operations (such as drop tree)
306 * are only allowed for blocks use full backrefs.
309 if (btrfs_block_can_be_shared(root, buf)) {
310 ret = btrfs_lookup_extent_info(trans, root, buf->start,
311 buf->len, &refs, &flags);
316 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
317 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
318 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
323 owner = btrfs_header_owner(buf);
324 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
325 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
328 if ((owner == root->root_key.objectid ||
329 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
330 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
331 ret = btrfs_inc_ref(trans, root, buf, 1);
334 if (root->root_key.objectid ==
335 BTRFS_TREE_RELOC_OBJECTID) {
336 ret = btrfs_dec_ref(trans, root, buf, 0);
338 ret = btrfs_inc_ref(trans, root, cow, 1);
341 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
344 if (root->root_key.objectid ==
345 BTRFS_TREE_RELOC_OBJECTID)
346 ret = btrfs_inc_ref(trans, root, cow, 1);
348 ret = btrfs_inc_ref(trans, root, cow, 0);
351 if (new_flags != 0) {
352 ret = btrfs_set_disk_extent_flags(trans, root,
359 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
360 if (root->root_key.objectid ==
361 BTRFS_TREE_RELOC_OBJECTID)
362 ret = btrfs_inc_ref(trans, root, cow, 1);
364 ret = btrfs_inc_ref(trans, root, cow, 0);
366 ret = btrfs_dec_ref(trans, root, buf, 1);
369 clean_tree_block(trans, root, buf);
376 * does the dirty work in cow of a single block. The parent block (if
377 * supplied) is updated to point to the new cow copy. The new buffer is marked
378 * dirty and returned locked. If you modify the block it needs to be marked
381 * search_start -- an allocation hint for the new block
383 * empty_size -- a hint that you plan on doing more cow. This is the size in
384 * bytes the allocator should try to find free next to the block it returns.
385 * This is just a hint and may be ignored by the allocator.
387 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
388 struct btrfs_root *root,
389 struct extent_buffer *buf,
390 struct extent_buffer *parent, int parent_slot,
391 struct extent_buffer **cow_ret,
392 u64 search_start, u64 empty_size)
394 struct btrfs_disk_key disk_key;
395 struct extent_buffer *cow;
404 btrfs_assert_tree_locked(buf);
406 WARN_ON(root->ref_cows && trans->transid !=
407 root->fs_info->running_transaction->transid);
408 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
410 level = btrfs_header_level(buf);
413 btrfs_item_key(buf, &disk_key, 0);
415 btrfs_node_key(buf, &disk_key, 0);
417 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
419 parent_start = parent->start;
425 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
426 root->root_key.objectid, &disk_key,
427 level, search_start, empty_size);
431 /* cow is set to blocking by btrfs_init_new_buffer */
433 copy_extent_buffer(cow, buf, 0, 0, cow->len);
434 btrfs_set_header_bytenr(cow, cow->start);
435 btrfs_set_header_generation(cow, trans->transid);
436 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
437 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
438 BTRFS_HEADER_FLAG_RELOC);
439 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
440 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
442 btrfs_set_header_owner(cow, root->root_key.objectid);
444 write_extent_buffer(cow, root->fs_info->fsid,
445 (unsigned long)btrfs_header_fsid(cow),
448 update_ref_for_cow(trans, root, buf, cow, &last_ref);
451 btrfs_reloc_cow_block(trans, root, buf, cow);
453 if (buf == root->node) {
454 WARN_ON(parent && parent != buf);
455 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
456 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
457 parent_start = buf->start;
461 spin_lock(&root->node_lock);
463 extent_buffer_get(cow);
464 spin_unlock(&root->node_lock);
466 btrfs_free_tree_block(trans, root, buf, parent_start,
468 free_extent_buffer(buf);
469 add_root_to_dirty_list(root);
471 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
472 parent_start = parent->start;
476 WARN_ON(trans->transid != btrfs_header_generation(parent));
477 btrfs_set_node_blockptr(parent, parent_slot,
479 btrfs_set_node_ptr_generation(parent, parent_slot,
481 btrfs_mark_buffer_dirty(parent);
482 btrfs_free_tree_block(trans, root, buf, parent_start,
486 btrfs_tree_unlock(buf);
487 free_extent_buffer(buf);
488 btrfs_mark_buffer_dirty(cow);
493 static inline int should_cow_block(struct btrfs_trans_handle *trans,
494 struct btrfs_root *root,
495 struct extent_buffer *buf)
497 if (btrfs_header_generation(buf) == trans->transid &&
498 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
499 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
500 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
506 * cows a single block, see __btrfs_cow_block for the real work.
507 * This version of it has extra checks so that a block isn't cow'd more than
508 * once per transaction, as long as it hasn't been written yet
510 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
511 struct btrfs_root *root, struct extent_buffer *buf,
512 struct extent_buffer *parent, int parent_slot,
513 struct extent_buffer **cow_ret)
518 if (trans->transaction != root->fs_info->running_transaction) {
519 printk(KERN_CRIT "trans %llu running %llu\n",
520 (unsigned long long)trans->transid,
522 root->fs_info->running_transaction->transid);
525 if (trans->transid != root->fs_info->generation) {
526 printk(KERN_CRIT "trans %llu running %llu\n",
527 (unsigned long long)trans->transid,
528 (unsigned long long)root->fs_info->generation);
532 if (!should_cow_block(trans, root, buf)) {
537 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
540 btrfs_set_lock_blocking(parent);
541 btrfs_set_lock_blocking(buf);
543 ret = __btrfs_cow_block(trans, root, buf, parent,
544 parent_slot, cow_ret, search_start, 0);
549 * helper function for defrag to decide if two blocks pointed to by a
550 * node are actually close by
552 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
554 if (blocknr < other && other - (blocknr + blocksize) < 32768)
556 if (blocknr > other && blocknr - (other + blocksize) < 32768)
562 * compare two keys in a memcmp fashion
564 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
568 btrfs_disk_key_to_cpu(&k1, disk);
570 return btrfs_comp_cpu_keys(&k1, k2);
574 * same as comp_keys only with two btrfs_key's
576 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
578 if (k1->objectid > k2->objectid)
580 if (k1->objectid < k2->objectid)
582 if (k1->type > k2->type)
584 if (k1->type < k2->type)
586 if (k1->offset > k2->offset)
588 if (k1->offset < k2->offset)
594 * this is used by the defrag code to go through all the
595 * leaves pointed to by a node and reallocate them so that
596 * disk order is close to key order
598 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
599 struct btrfs_root *root, struct extent_buffer *parent,
600 int start_slot, int cache_only, u64 *last_ret,
601 struct btrfs_key *progress)
603 struct extent_buffer *cur;
606 u64 search_start = *last_ret;
616 int progress_passed = 0;
617 struct btrfs_disk_key disk_key;
619 parent_level = btrfs_header_level(parent);
620 if (cache_only && parent_level != 1)
623 if (trans->transaction != root->fs_info->running_transaction)
625 if (trans->transid != root->fs_info->generation)
628 parent_nritems = btrfs_header_nritems(parent);
629 blocksize = btrfs_level_size(root, parent_level - 1);
630 end_slot = parent_nritems;
632 if (parent_nritems == 1)
635 btrfs_set_lock_blocking(parent);
637 for (i = start_slot; i < end_slot; i++) {
640 if (!parent->map_token) {
641 map_extent_buffer(parent,
642 btrfs_node_key_ptr_offset(i),
643 sizeof(struct btrfs_key_ptr),
644 &parent->map_token, &parent->kaddr,
645 &parent->map_start, &parent->map_len,
648 btrfs_node_key(parent, &disk_key, i);
649 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
653 blocknr = btrfs_node_blockptr(parent, i);
654 gen = btrfs_node_ptr_generation(parent, i);
656 last_block = blocknr;
659 other = btrfs_node_blockptr(parent, i - 1);
660 close = close_blocks(blocknr, other, blocksize);
662 if (!close && i < end_slot - 2) {
663 other = btrfs_node_blockptr(parent, i + 1);
664 close = close_blocks(blocknr, other, blocksize);
667 last_block = blocknr;
670 if (parent->map_token) {
671 unmap_extent_buffer(parent, parent->map_token,
673 parent->map_token = NULL;
676 cur = btrfs_find_tree_block(root, blocknr, blocksize);
678 uptodate = btrfs_buffer_uptodate(cur, gen);
681 if (!cur || !uptodate) {
683 free_extent_buffer(cur);
687 cur = read_tree_block(root, blocknr,
689 } else if (!uptodate) {
690 btrfs_read_buffer(cur, gen);
693 if (search_start == 0)
694 search_start = last_block;
696 btrfs_tree_lock(cur);
697 btrfs_set_lock_blocking(cur);
698 err = __btrfs_cow_block(trans, root, cur, parent, i,
701 (end_slot - i) * blocksize));
703 btrfs_tree_unlock(cur);
704 free_extent_buffer(cur);
707 search_start = cur->start;
708 last_block = cur->start;
709 *last_ret = search_start;
710 btrfs_tree_unlock(cur);
711 free_extent_buffer(cur);
713 if (parent->map_token) {
714 unmap_extent_buffer(parent, parent->map_token,
716 parent->map_token = NULL;
722 * The leaf data grows from end-to-front in the node.
723 * this returns the address of the start of the last item,
724 * which is the stop of the leaf data stack
726 static inline unsigned int leaf_data_end(struct btrfs_root *root,
727 struct extent_buffer *leaf)
729 u32 nr = btrfs_header_nritems(leaf);
731 return BTRFS_LEAF_DATA_SIZE(root);
732 return btrfs_item_offset_nr(leaf, nr - 1);
736 * extra debugging checks to make sure all the items in a key are
737 * well formed and in the proper order
739 static int check_node(struct btrfs_root *root, struct btrfs_path *path,
742 struct extent_buffer *parent = NULL;
743 struct extent_buffer *node = path->nodes[level];
744 struct btrfs_disk_key parent_key;
745 struct btrfs_disk_key node_key;
748 struct btrfs_key cpukey;
749 u32 nritems = btrfs_header_nritems(node);
751 if (path->nodes[level + 1])
752 parent = path->nodes[level + 1];
754 slot = path->slots[level];
755 BUG_ON(nritems == 0);
757 parent_slot = path->slots[level + 1];
758 btrfs_node_key(parent, &parent_key, parent_slot);
759 btrfs_node_key(node, &node_key, 0);
760 BUG_ON(memcmp(&parent_key, &node_key,
761 sizeof(struct btrfs_disk_key)));
762 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
763 btrfs_header_bytenr(node));
765 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
767 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
768 btrfs_node_key(node, &node_key, slot);
769 BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
771 if (slot < nritems - 1) {
772 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
773 btrfs_node_key(node, &node_key, slot);
774 BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
780 * extra checking to make sure all the items in a leaf are
781 * well formed and in the proper order
783 static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
786 struct extent_buffer *leaf = path->nodes[level];
787 struct extent_buffer *parent = NULL;
789 struct btrfs_key cpukey;
790 struct btrfs_disk_key parent_key;
791 struct btrfs_disk_key leaf_key;
792 int slot = path->slots[0];
794 u32 nritems = btrfs_header_nritems(leaf);
796 if (path->nodes[level + 1])
797 parent = path->nodes[level + 1];
803 parent_slot = path->slots[level + 1];
804 btrfs_node_key(parent, &parent_key, parent_slot);
805 btrfs_item_key(leaf, &leaf_key, 0);
807 BUG_ON(memcmp(&parent_key, &leaf_key,
808 sizeof(struct btrfs_disk_key)));
809 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
810 btrfs_header_bytenr(leaf));
812 if (slot != 0 && slot < nritems - 1) {
813 btrfs_item_key(leaf, &leaf_key, slot);
814 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
815 if (comp_keys(&leaf_key, &cpukey) <= 0) {
816 btrfs_print_leaf(root, leaf);
817 printk(KERN_CRIT "slot %d offset bad key\n", slot);
820 if (btrfs_item_offset_nr(leaf, slot - 1) !=
821 btrfs_item_end_nr(leaf, slot)) {
822 btrfs_print_leaf(root, leaf);
823 printk(KERN_CRIT "slot %d offset bad\n", slot);
827 if (slot < nritems - 1) {
828 btrfs_item_key(leaf, &leaf_key, slot);
829 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
830 BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
831 if (btrfs_item_offset_nr(leaf, slot) !=
832 btrfs_item_end_nr(leaf, slot + 1)) {
833 btrfs_print_leaf(root, leaf);
834 printk(KERN_CRIT "slot %d offset bad\n", slot);
838 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
839 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
843 static noinline int check_block(struct btrfs_root *root,
844 struct btrfs_path *path, int level)
848 return check_leaf(root, path, level);
849 return check_node(root, path, level);
853 * search for key in the extent_buffer. The items start at offset p,
854 * and they are item_size apart. There are 'max' items in p.
856 * the slot in the array is returned via slot, and it points to
857 * the place where you would insert key if it is not found in
860 * slot may point to max if the key is bigger than all of the keys
862 static noinline int generic_bin_search(struct extent_buffer *eb,
864 int item_size, struct btrfs_key *key,
871 struct btrfs_disk_key *tmp = NULL;
872 struct btrfs_disk_key unaligned;
873 unsigned long offset;
874 char *map_token = NULL;
876 unsigned long map_start = 0;
877 unsigned long map_len = 0;
881 mid = (low + high) / 2;
882 offset = p + mid * item_size;
884 if (!map_token || offset < map_start ||
885 (offset + sizeof(struct btrfs_disk_key)) >
886 map_start + map_len) {
888 unmap_extent_buffer(eb, map_token, KM_USER0);
892 err = map_private_extent_buffer(eb, offset,
893 sizeof(struct btrfs_disk_key),
895 &map_start, &map_len, KM_USER0);
898 tmp = (struct btrfs_disk_key *)(kaddr + offset -
901 read_extent_buffer(eb, &unaligned,
902 offset, sizeof(unaligned));
907 tmp = (struct btrfs_disk_key *)(kaddr + offset -
910 ret = comp_keys(tmp, key);
919 unmap_extent_buffer(eb, map_token, KM_USER0);
925 unmap_extent_buffer(eb, map_token, KM_USER0);
930 * simple bin_search frontend that does the right thing for
933 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
934 int level, int *slot)
937 return generic_bin_search(eb,
938 offsetof(struct btrfs_leaf, items),
939 sizeof(struct btrfs_item),
940 key, btrfs_header_nritems(eb),
943 return generic_bin_search(eb,
944 offsetof(struct btrfs_node, ptrs),
945 sizeof(struct btrfs_key_ptr),
946 key, btrfs_header_nritems(eb),
952 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
953 int level, int *slot)
955 return bin_search(eb, key, level, slot);
958 static void root_add_used(struct btrfs_root *root, u32 size)
960 spin_lock(&root->accounting_lock);
961 btrfs_set_root_used(&root->root_item,
962 btrfs_root_used(&root->root_item) + size);
963 spin_unlock(&root->accounting_lock);
966 static void root_sub_used(struct btrfs_root *root, u32 size)
968 spin_lock(&root->accounting_lock);
969 btrfs_set_root_used(&root->root_item,
970 btrfs_root_used(&root->root_item) - size);
971 spin_unlock(&root->accounting_lock);
974 /* given a node and slot number, this reads the blocks it points to. The
975 * extent buffer is returned with a reference taken (but unlocked).
976 * NULL is returned on error.
978 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
979 struct extent_buffer *parent, int slot)
981 int level = btrfs_header_level(parent);
984 if (slot >= btrfs_header_nritems(parent))
989 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
990 btrfs_level_size(root, level - 1),
991 btrfs_node_ptr_generation(parent, slot));
995 * node level balancing, used to make sure nodes are in proper order for
996 * item deletion. We balance from the top down, so we have to make sure
997 * that a deletion won't leave an node completely empty later on.
999 static noinline int balance_level(struct btrfs_trans_handle *trans,
1000 struct btrfs_root *root,
1001 struct btrfs_path *path, int level)
1003 struct extent_buffer *right = NULL;
1004 struct extent_buffer *mid;
1005 struct extent_buffer *left = NULL;
1006 struct extent_buffer *parent = NULL;
1010 int orig_slot = path->slots[level];
1016 mid = path->nodes[level];
1018 WARN_ON(!path->locks[level]);
1019 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1021 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1023 if (level < BTRFS_MAX_LEVEL - 1)
1024 parent = path->nodes[level + 1];
1025 pslot = path->slots[level + 1];
1028 * deal with the case where there is only one pointer in the root
1029 * by promoting the node below to a root
1032 struct extent_buffer *child;
1034 if (btrfs_header_nritems(mid) != 1)
1037 /* promote the child to a root */
1038 child = read_node_slot(root, mid, 0);
1040 btrfs_tree_lock(child);
1041 btrfs_set_lock_blocking(child);
1042 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1044 btrfs_tree_unlock(child);
1045 free_extent_buffer(child);
1049 spin_lock(&root->node_lock);
1051 spin_unlock(&root->node_lock);
1053 add_root_to_dirty_list(root);
1054 btrfs_tree_unlock(child);
1056 path->locks[level] = 0;
1057 path->nodes[level] = NULL;
1058 clean_tree_block(trans, root, mid);
1059 btrfs_tree_unlock(mid);
1060 /* once for the path */
1061 free_extent_buffer(mid);
1063 root_sub_used(root, mid->len);
1064 btrfs_free_tree_block(trans, root, mid, 0, 1);
1065 /* once for the root ptr */
1066 free_extent_buffer(mid);
1069 if (btrfs_header_nritems(mid) >
1070 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1073 btrfs_header_nritems(mid);
1075 left = read_node_slot(root, parent, pslot - 1);
1077 btrfs_tree_lock(left);
1078 btrfs_set_lock_blocking(left);
1079 wret = btrfs_cow_block(trans, root, left,
1080 parent, pslot - 1, &left);
1086 right = read_node_slot(root, parent, pslot + 1);
1088 btrfs_tree_lock(right);
1089 btrfs_set_lock_blocking(right);
1090 wret = btrfs_cow_block(trans, root, right,
1091 parent, pslot + 1, &right);
1098 /* first, try to make some room in the middle buffer */
1100 orig_slot += btrfs_header_nritems(left);
1101 wret = push_node_left(trans, root, left, mid, 1);
1104 btrfs_header_nritems(mid);
1108 * then try to empty the right most buffer into the middle
1111 wret = push_node_left(trans, root, mid, right, 1);
1112 if (wret < 0 && wret != -ENOSPC)
1114 if (btrfs_header_nritems(right) == 0) {
1115 clean_tree_block(trans, root, right);
1116 btrfs_tree_unlock(right);
1117 wret = del_ptr(trans, root, path, level + 1, pslot +
1121 root_sub_used(root, right->len);
1122 btrfs_free_tree_block(trans, root, right, 0, 1);
1123 free_extent_buffer(right);
1126 struct btrfs_disk_key right_key;
1127 btrfs_node_key(right, &right_key, 0);
1128 btrfs_set_node_key(parent, &right_key, pslot + 1);
1129 btrfs_mark_buffer_dirty(parent);
1132 if (btrfs_header_nritems(mid) == 1) {
1134 * we're not allowed to leave a node with one item in the
1135 * tree during a delete. A deletion from lower in the tree
1136 * could try to delete the only pointer in this node.
1137 * So, pull some keys from the left.
1138 * There has to be a left pointer at this point because
1139 * otherwise we would have pulled some pointers from the
1143 wret = balance_node_right(trans, root, mid, left);
1149 wret = push_node_left(trans, root, left, mid, 1);
1155 if (btrfs_header_nritems(mid) == 0) {
1156 clean_tree_block(trans, root, mid);
1157 btrfs_tree_unlock(mid);
1158 wret = del_ptr(trans, root, path, level + 1, pslot);
1161 root_sub_used(root, mid->len);
1162 btrfs_free_tree_block(trans, root, mid, 0, 1);
1163 free_extent_buffer(mid);
1166 /* update the parent key to reflect our changes */
1167 struct btrfs_disk_key mid_key;
1168 btrfs_node_key(mid, &mid_key, 0);
1169 btrfs_set_node_key(parent, &mid_key, pslot);
1170 btrfs_mark_buffer_dirty(parent);
1173 /* update the path */
1175 if (btrfs_header_nritems(left) > orig_slot) {
1176 extent_buffer_get(left);
1177 /* left was locked after cow */
1178 path->nodes[level] = left;
1179 path->slots[level + 1] -= 1;
1180 path->slots[level] = orig_slot;
1182 btrfs_tree_unlock(mid);
1183 free_extent_buffer(mid);
1186 orig_slot -= btrfs_header_nritems(left);
1187 path->slots[level] = orig_slot;
1190 /* double check we haven't messed things up */
1191 check_block(root, path, level);
1193 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1197 btrfs_tree_unlock(right);
1198 free_extent_buffer(right);
1201 if (path->nodes[level] != left)
1202 btrfs_tree_unlock(left);
1203 free_extent_buffer(left);
1208 /* Node balancing for insertion. Here we only split or push nodes around
1209 * when they are completely full. This is also done top down, so we
1210 * have to be pessimistic.
1212 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1213 struct btrfs_root *root,
1214 struct btrfs_path *path, int level)
1216 struct extent_buffer *right = NULL;
1217 struct extent_buffer *mid;
1218 struct extent_buffer *left = NULL;
1219 struct extent_buffer *parent = NULL;
1223 int orig_slot = path->slots[level];
1228 mid = path->nodes[level];
1229 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1231 if (level < BTRFS_MAX_LEVEL - 1)
1232 parent = path->nodes[level + 1];
1233 pslot = path->slots[level + 1];
1238 left = read_node_slot(root, parent, pslot - 1);
1240 /* first, try to make some room in the middle buffer */
1244 btrfs_tree_lock(left);
1245 btrfs_set_lock_blocking(left);
1247 left_nr = btrfs_header_nritems(left);
1248 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1251 ret = btrfs_cow_block(trans, root, left, parent,
1256 wret = push_node_left(trans, root,
1263 struct btrfs_disk_key disk_key;
1264 orig_slot += left_nr;
1265 btrfs_node_key(mid, &disk_key, 0);
1266 btrfs_set_node_key(parent, &disk_key, pslot);
1267 btrfs_mark_buffer_dirty(parent);
1268 if (btrfs_header_nritems(left) > orig_slot) {
1269 path->nodes[level] = left;
1270 path->slots[level + 1] -= 1;
1271 path->slots[level] = orig_slot;
1272 btrfs_tree_unlock(mid);
1273 free_extent_buffer(mid);
1276 btrfs_header_nritems(left);
1277 path->slots[level] = orig_slot;
1278 btrfs_tree_unlock(left);
1279 free_extent_buffer(left);
1283 btrfs_tree_unlock(left);
1284 free_extent_buffer(left);
1286 right = read_node_slot(root, parent, pslot + 1);
1289 * then try to empty the right most buffer into the middle
1294 btrfs_tree_lock(right);
1295 btrfs_set_lock_blocking(right);
1297 right_nr = btrfs_header_nritems(right);
1298 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1301 ret = btrfs_cow_block(trans, root, right,
1307 wret = balance_node_right(trans, root,
1314 struct btrfs_disk_key disk_key;
1316 btrfs_node_key(right, &disk_key, 0);
1317 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1318 btrfs_mark_buffer_dirty(parent);
1320 if (btrfs_header_nritems(mid) <= orig_slot) {
1321 path->nodes[level] = right;
1322 path->slots[level + 1] += 1;
1323 path->slots[level] = orig_slot -
1324 btrfs_header_nritems(mid);
1325 btrfs_tree_unlock(mid);
1326 free_extent_buffer(mid);
1328 btrfs_tree_unlock(right);
1329 free_extent_buffer(right);
1333 btrfs_tree_unlock(right);
1334 free_extent_buffer(right);
1340 * readahead one full node of leaves, finding things that are close
1341 * to the block in 'slot', and triggering ra on them.
1343 static void reada_for_search(struct btrfs_root *root,
1344 struct btrfs_path *path,
1345 int level, int slot, u64 objectid)
1347 struct extent_buffer *node;
1348 struct btrfs_disk_key disk_key;
1353 int direction = path->reada;
1354 struct extent_buffer *eb;
1362 if (!path->nodes[level])
1365 node = path->nodes[level];
1367 search = btrfs_node_blockptr(node, slot);
1368 blocksize = btrfs_level_size(root, level - 1);
1369 eb = btrfs_find_tree_block(root, search, blocksize);
1371 free_extent_buffer(eb);
1377 nritems = btrfs_header_nritems(node);
1380 if (direction < 0) {
1384 } else if (direction > 0) {
1389 if (path->reada < 0 && objectid) {
1390 btrfs_node_key(node, &disk_key, nr);
1391 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1394 search = btrfs_node_blockptr(node, nr);
1395 if ((search <= target && target - search <= 65536) ||
1396 (search > target && search - target <= 65536)) {
1397 readahead_tree_block(root, search, blocksize,
1398 btrfs_node_ptr_generation(node, nr));
1402 if ((nread > 65536 || nscan > 32))
1408 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1411 static noinline int reada_for_balance(struct btrfs_root *root,
1412 struct btrfs_path *path, int level)
1416 struct extent_buffer *parent;
1417 struct extent_buffer *eb;
1424 parent = path->nodes[level + 1];
1428 nritems = btrfs_header_nritems(parent);
1429 slot = path->slots[level + 1];
1430 blocksize = btrfs_level_size(root, level);
1433 block1 = btrfs_node_blockptr(parent, slot - 1);
1434 gen = btrfs_node_ptr_generation(parent, slot - 1);
1435 eb = btrfs_find_tree_block(root, block1, blocksize);
1436 if (eb && btrfs_buffer_uptodate(eb, gen))
1438 free_extent_buffer(eb);
1440 if (slot + 1 < nritems) {
1441 block2 = btrfs_node_blockptr(parent, slot + 1);
1442 gen = btrfs_node_ptr_generation(parent, slot + 1);
1443 eb = btrfs_find_tree_block(root, block2, blocksize);
1444 if (eb && btrfs_buffer_uptodate(eb, gen))
1446 free_extent_buffer(eb);
1448 if (block1 || block2) {
1451 /* release the whole path */
1452 btrfs_release_path(root, path);
1454 /* read the blocks */
1456 readahead_tree_block(root, block1, blocksize, 0);
1458 readahead_tree_block(root, block2, blocksize, 0);
1461 eb = read_tree_block(root, block1, blocksize, 0);
1462 free_extent_buffer(eb);
1465 eb = read_tree_block(root, block2, blocksize, 0);
1466 free_extent_buffer(eb);
1474 * when we walk down the tree, it is usually safe to unlock the higher layers
1475 * in the tree. The exceptions are when our path goes through slot 0, because
1476 * operations on the tree might require changing key pointers higher up in the
1479 * callers might also have set path->keep_locks, which tells this code to keep
1480 * the lock if the path points to the last slot in the block. This is part of
1481 * walking through the tree, and selecting the next slot in the higher block.
1483 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1484 * if lowest_unlock is 1, level 0 won't be unlocked
1486 static noinline void unlock_up(struct btrfs_path *path, int level,
1490 int skip_level = level;
1492 struct extent_buffer *t;
1494 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1495 if (!path->nodes[i])
1497 if (!path->locks[i])
1499 if (!no_skips && path->slots[i] == 0) {
1503 if (!no_skips && path->keep_locks) {
1506 nritems = btrfs_header_nritems(t);
1507 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1512 if (skip_level < i && i >= lowest_unlock)
1516 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1517 btrfs_tree_unlock(t);
1524 * This releases any locks held in the path starting at level and
1525 * going all the way up to the root.
1527 * btrfs_search_slot will keep the lock held on higher nodes in a few
1528 * corner cases, such as COW of the block at slot zero in the node. This
1529 * ignores those rules, and it should only be called when there are no
1530 * more updates to be done higher up in the tree.
1532 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1536 if (path->keep_locks)
1539 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1540 if (!path->nodes[i])
1542 if (!path->locks[i])
1544 btrfs_tree_unlock(path->nodes[i]);
1550 * helper function for btrfs_search_slot. The goal is to find a block
1551 * in cache without setting the path to blocking. If we find the block
1552 * we return zero and the path is unchanged.
1554 * If we can't find the block, we set the path blocking and do some
1555 * reada. -EAGAIN is returned and the search must be repeated.
1558 read_block_for_search(struct btrfs_trans_handle *trans,
1559 struct btrfs_root *root, struct btrfs_path *p,
1560 struct extent_buffer **eb_ret, int level, int slot,
1561 struct btrfs_key *key)
1566 struct extent_buffer *b = *eb_ret;
1567 struct extent_buffer *tmp;
1570 blocknr = btrfs_node_blockptr(b, slot);
1571 gen = btrfs_node_ptr_generation(b, slot);
1572 blocksize = btrfs_level_size(root, level - 1);
1574 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1576 if (btrfs_buffer_uptodate(tmp, 0)) {
1577 if (btrfs_buffer_uptodate(tmp, gen)) {
1579 * we found an up to date block without
1586 /* the pages were up to date, but we failed
1587 * the generation number check. Do a full
1588 * read for the generation number that is correct.
1589 * We must do this without dropping locks so
1590 * we can trust our generation number
1592 free_extent_buffer(tmp);
1593 tmp = read_tree_block(root, blocknr, blocksize, gen);
1594 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
1598 free_extent_buffer(tmp);
1599 btrfs_release_path(NULL, p);
1605 * reduce lock contention at high levels
1606 * of the btree by dropping locks before
1607 * we read. Don't release the lock on the current
1608 * level because we need to walk this node to figure
1609 * out which blocks to read.
1611 btrfs_unlock_up_safe(p, level + 1);
1612 btrfs_set_path_blocking(p);
1614 free_extent_buffer(tmp);
1616 reada_for_search(root, p, level, slot, key->objectid);
1618 btrfs_release_path(NULL, p);
1621 tmp = read_tree_block(root, blocknr, blocksize, 0);
1624 * If the read above didn't mark this buffer up to date,
1625 * it will never end up being up to date. Set ret to EIO now
1626 * and give up so that our caller doesn't loop forever
1629 if (!btrfs_buffer_uptodate(tmp, 0))
1631 free_extent_buffer(tmp);
1637 * helper function for btrfs_search_slot. This does all of the checks
1638 * for node-level blocks and does any balancing required based on
1641 * If no extra work was required, zero is returned. If we had to
1642 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1646 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1647 struct btrfs_root *root, struct btrfs_path *p,
1648 struct extent_buffer *b, int level, int ins_len)
1651 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1652 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1655 sret = reada_for_balance(root, p, level);
1659 btrfs_set_path_blocking(p);
1660 sret = split_node(trans, root, p, level);
1661 btrfs_clear_path_blocking(p, NULL);
1668 b = p->nodes[level];
1669 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1670 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
1673 sret = reada_for_balance(root, p, level);
1677 btrfs_set_path_blocking(p);
1678 sret = balance_level(trans, root, p, level);
1679 btrfs_clear_path_blocking(p, NULL);
1685 b = p->nodes[level];
1687 btrfs_release_path(NULL, p);
1690 BUG_ON(btrfs_header_nritems(b) == 1);
1701 * look for key in the tree. path is filled in with nodes along the way
1702 * if key is found, we return zero and you can find the item in the leaf
1703 * level of the path (level 0)
1705 * If the key isn't found, the path points to the slot where it should
1706 * be inserted, and 1 is returned. If there are other errors during the
1707 * search a negative error number is returned.
1709 * if ins_len > 0, nodes and leaves will be split as we walk down the
1710 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1713 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1714 *root, struct btrfs_key *key, struct btrfs_path *p, int
1717 struct extent_buffer *b;
1722 int lowest_unlock = 1;
1723 u8 lowest_level = 0;
1725 lowest_level = p->lowest_level;
1726 WARN_ON(lowest_level && ins_len > 0);
1727 WARN_ON(p->nodes[0] != NULL);
1733 if (p->search_commit_root) {
1734 b = root->commit_root;
1735 extent_buffer_get(b);
1736 if (!p->skip_locking)
1739 if (p->skip_locking)
1740 b = btrfs_root_node(root);
1742 b = btrfs_lock_root_node(root);
1746 level = btrfs_header_level(b);
1749 * setup the path here so we can release it under lock
1750 * contention with the cow code
1752 p->nodes[level] = b;
1753 if (!p->skip_locking)
1754 p->locks[level] = 1;
1758 * if we don't really need to cow this block
1759 * then we don't want to set the path blocking,
1760 * so we test it here
1762 if (!should_cow_block(trans, root, b))
1765 btrfs_set_path_blocking(p);
1767 err = btrfs_cow_block(trans, root, b,
1768 p->nodes[level + 1],
1769 p->slots[level + 1], &b);
1776 BUG_ON(!cow && ins_len);
1777 if (level != btrfs_header_level(b))
1779 level = btrfs_header_level(b);
1781 p->nodes[level] = b;
1782 if (!p->skip_locking)
1783 p->locks[level] = 1;
1785 btrfs_clear_path_blocking(p, NULL);
1788 * we have a lock on b and as long as we aren't changing
1789 * the tree, there is no way to for the items in b to change.
1790 * It is safe to drop the lock on our parent before we
1791 * go through the expensive btree search on b.
1793 * If cow is true, then we might be changing slot zero,
1794 * which may require changing the parent. So, we can't
1795 * drop the lock until after we know which slot we're
1799 btrfs_unlock_up_safe(p, level + 1);
1801 ret = check_block(root, p, level);
1807 ret = bin_search(b, key, level, &slot);
1811 if (ret && slot > 0) {
1815 p->slots[level] = slot;
1816 err = setup_nodes_for_search(trans, root, p, b, level,
1824 b = p->nodes[level];
1825 slot = p->slots[level];
1827 unlock_up(p, level, lowest_unlock);
1829 if (level == lowest_level) {
1835 err = read_block_for_search(trans, root, p,
1836 &b, level, slot, key);
1844 if (!p->skip_locking) {
1845 btrfs_clear_path_blocking(p, NULL);
1846 err = btrfs_try_spin_lock(b);
1849 btrfs_set_path_blocking(p);
1851 btrfs_clear_path_blocking(p, b);
1855 p->slots[level] = slot;
1857 btrfs_leaf_free_space(root, b) < ins_len) {
1858 btrfs_set_path_blocking(p);
1859 err = split_leaf(trans, root, key,
1860 p, ins_len, ret == 0);
1861 btrfs_clear_path_blocking(p, NULL);
1869 if (!p->search_for_split)
1870 unlock_up(p, level, lowest_unlock);
1877 * we don't really know what they plan on doing with the path
1878 * from here on, so for now just mark it as blocking
1880 if (!p->leave_spinning)
1881 btrfs_set_path_blocking(p);
1883 btrfs_release_path(root, p);
1888 * adjust the pointers going up the tree, starting at level
1889 * making sure the right key of each node is points to 'key'.
1890 * This is used after shifting pointers to the left, so it stops
1891 * fixing up pointers when a given leaf/node is not in slot 0 of the
1894 * If this fails to write a tree block, it returns -1, but continues
1895 * fixing up the blocks in ram so the tree is consistent.
1897 static int fixup_low_keys(struct btrfs_trans_handle *trans,
1898 struct btrfs_root *root, struct btrfs_path *path,
1899 struct btrfs_disk_key *key, int level)
1903 struct extent_buffer *t;
1905 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1906 int tslot = path->slots[i];
1907 if (!path->nodes[i])
1910 btrfs_set_node_key(t, key, tslot);
1911 btrfs_mark_buffer_dirty(path->nodes[i]);
1921 * This function isn't completely safe. It's the caller's responsibility
1922 * that the new key won't break the order
1924 int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1925 struct btrfs_root *root, struct btrfs_path *path,
1926 struct btrfs_key *new_key)
1928 struct btrfs_disk_key disk_key;
1929 struct extent_buffer *eb;
1932 eb = path->nodes[0];
1933 slot = path->slots[0];
1935 btrfs_item_key(eb, &disk_key, slot - 1);
1936 if (comp_keys(&disk_key, new_key) >= 0)
1939 if (slot < btrfs_header_nritems(eb) - 1) {
1940 btrfs_item_key(eb, &disk_key, slot + 1);
1941 if (comp_keys(&disk_key, new_key) <= 0)
1945 btrfs_cpu_key_to_disk(&disk_key, new_key);
1946 btrfs_set_item_key(eb, &disk_key, slot);
1947 btrfs_mark_buffer_dirty(eb);
1949 fixup_low_keys(trans, root, path, &disk_key, 1);
1954 * try to push data from one node into the next node left in the
1957 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1958 * error, and > 0 if there was no room in the left hand block.
1960 static int push_node_left(struct btrfs_trans_handle *trans,
1961 struct btrfs_root *root, struct extent_buffer *dst,
1962 struct extent_buffer *src, int empty)
1969 src_nritems = btrfs_header_nritems(src);
1970 dst_nritems = btrfs_header_nritems(dst);
1971 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1972 WARN_ON(btrfs_header_generation(src) != trans->transid);
1973 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1975 if (!empty && src_nritems <= 8)
1978 if (push_items <= 0)
1982 push_items = min(src_nritems, push_items);
1983 if (push_items < src_nritems) {
1984 /* leave at least 8 pointers in the node if
1985 * we aren't going to empty it
1987 if (src_nritems - push_items < 8) {
1988 if (push_items <= 8)
1994 push_items = min(src_nritems - 8, push_items);
1996 copy_extent_buffer(dst, src,
1997 btrfs_node_key_ptr_offset(dst_nritems),
1998 btrfs_node_key_ptr_offset(0),
1999 push_items * sizeof(struct btrfs_key_ptr));
2001 if (push_items < src_nritems) {
2002 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2003 btrfs_node_key_ptr_offset(push_items),
2004 (src_nritems - push_items) *
2005 sizeof(struct btrfs_key_ptr));
2007 btrfs_set_header_nritems(src, src_nritems - push_items);
2008 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2009 btrfs_mark_buffer_dirty(src);
2010 btrfs_mark_buffer_dirty(dst);
2016 * try to push data from one node into the next node right in the
2019 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2020 * error, and > 0 if there was no room in the right hand block.
2022 * this will only push up to 1/2 the contents of the left node over
2024 static int balance_node_right(struct btrfs_trans_handle *trans,
2025 struct btrfs_root *root,
2026 struct extent_buffer *dst,
2027 struct extent_buffer *src)
2035 WARN_ON(btrfs_header_generation(src) != trans->transid);
2036 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2038 src_nritems = btrfs_header_nritems(src);
2039 dst_nritems = btrfs_header_nritems(dst);
2040 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2041 if (push_items <= 0)
2044 if (src_nritems < 4)
2047 max_push = src_nritems / 2 + 1;
2048 /* don't try to empty the node */
2049 if (max_push >= src_nritems)
2052 if (max_push < push_items)
2053 push_items = max_push;
2055 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2056 btrfs_node_key_ptr_offset(0),
2058 sizeof(struct btrfs_key_ptr));
2060 copy_extent_buffer(dst, src,
2061 btrfs_node_key_ptr_offset(0),
2062 btrfs_node_key_ptr_offset(src_nritems - push_items),
2063 push_items * sizeof(struct btrfs_key_ptr));
2065 btrfs_set_header_nritems(src, src_nritems - push_items);
2066 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2068 btrfs_mark_buffer_dirty(src);
2069 btrfs_mark_buffer_dirty(dst);
2075 * helper function to insert a new root level in the tree.
2076 * A new node is allocated, and a single item is inserted to
2077 * point to the existing root
2079 * returns zero on success or < 0 on failure.
2081 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2082 struct btrfs_root *root,
2083 struct btrfs_path *path, int level)
2086 struct extent_buffer *lower;
2087 struct extent_buffer *c;
2088 struct extent_buffer *old;
2089 struct btrfs_disk_key lower_key;
2091 BUG_ON(path->nodes[level]);
2092 BUG_ON(path->nodes[level-1] != root->node);
2094 lower = path->nodes[level-1];
2096 btrfs_item_key(lower, &lower_key, 0);
2098 btrfs_node_key(lower, &lower_key, 0);
2100 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2101 root->root_key.objectid, &lower_key,
2102 level, root->node->start, 0);
2106 root_add_used(root, root->nodesize);
2108 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2109 btrfs_set_header_nritems(c, 1);
2110 btrfs_set_header_level(c, level);
2111 btrfs_set_header_bytenr(c, c->start);
2112 btrfs_set_header_generation(c, trans->transid);
2113 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
2114 btrfs_set_header_owner(c, root->root_key.objectid);
2116 write_extent_buffer(c, root->fs_info->fsid,
2117 (unsigned long)btrfs_header_fsid(c),
2120 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2121 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2124 btrfs_set_node_key(c, &lower_key, 0);
2125 btrfs_set_node_blockptr(c, 0, lower->start);
2126 lower_gen = btrfs_header_generation(lower);
2127 WARN_ON(lower_gen != trans->transid);
2129 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2131 btrfs_mark_buffer_dirty(c);
2133 spin_lock(&root->node_lock);
2136 spin_unlock(&root->node_lock);
2138 /* the super has an extra ref to root->node */
2139 free_extent_buffer(old);
2141 add_root_to_dirty_list(root);
2142 extent_buffer_get(c);
2143 path->nodes[level] = c;
2144 path->locks[level] = 1;
2145 path->slots[level] = 0;
2150 * worker function to insert a single pointer in a node.
2151 * the node should have enough room for the pointer already
2153 * slot and level indicate where you want the key to go, and
2154 * blocknr is the block the key points to.
2156 * returns zero on success and < 0 on any error
2158 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2159 *root, struct btrfs_path *path, struct btrfs_disk_key
2160 *key, u64 bytenr, int slot, int level)
2162 struct extent_buffer *lower;
2165 BUG_ON(!path->nodes[level]);
2166 btrfs_assert_tree_locked(path->nodes[level]);
2167 lower = path->nodes[level];
2168 nritems = btrfs_header_nritems(lower);
2169 BUG_ON(slot > nritems);
2170 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
2172 if (slot != nritems) {
2173 memmove_extent_buffer(lower,
2174 btrfs_node_key_ptr_offset(slot + 1),
2175 btrfs_node_key_ptr_offset(slot),
2176 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2178 btrfs_set_node_key(lower, key, slot);
2179 btrfs_set_node_blockptr(lower, slot, bytenr);
2180 WARN_ON(trans->transid == 0);
2181 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2182 btrfs_set_header_nritems(lower, nritems + 1);
2183 btrfs_mark_buffer_dirty(lower);
2188 * split the node at the specified level in path in two.
2189 * The path is corrected to point to the appropriate node after the split
2191 * Before splitting this tries to make some room in the node by pushing
2192 * left and right, if either one works, it returns right away.
2194 * returns 0 on success and < 0 on failure
2196 static noinline int split_node(struct btrfs_trans_handle *trans,
2197 struct btrfs_root *root,
2198 struct btrfs_path *path, int level)
2200 struct extent_buffer *c;
2201 struct extent_buffer *split;
2202 struct btrfs_disk_key disk_key;
2208 c = path->nodes[level];
2209 WARN_ON(btrfs_header_generation(c) != trans->transid);
2210 if (c == root->node) {
2211 /* trying to split the root, lets make a new one */
2212 ret = insert_new_root(trans, root, path, level + 1);
2216 ret = push_nodes_for_insert(trans, root, path, level);
2217 c = path->nodes[level];
2218 if (!ret && btrfs_header_nritems(c) <
2219 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
2225 c_nritems = btrfs_header_nritems(c);
2226 mid = (c_nritems + 1) / 2;
2227 btrfs_node_key(c, &disk_key, mid);
2229 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2230 root->root_key.objectid,
2231 &disk_key, level, c->start, 0);
2233 return PTR_ERR(split);
2235 root_add_used(root, root->nodesize);
2237 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
2238 btrfs_set_header_level(split, btrfs_header_level(c));
2239 btrfs_set_header_bytenr(split, split->start);
2240 btrfs_set_header_generation(split, trans->transid);
2241 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
2242 btrfs_set_header_owner(split, root->root_key.objectid);
2243 write_extent_buffer(split, root->fs_info->fsid,
2244 (unsigned long)btrfs_header_fsid(split),
2246 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2247 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2251 copy_extent_buffer(split, c,
2252 btrfs_node_key_ptr_offset(0),
2253 btrfs_node_key_ptr_offset(mid),
2254 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2255 btrfs_set_header_nritems(split, c_nritems - mid);
2256 btrfs_set_header_nritems(c, mid);
2259 btrfs_mark_buffer_dirty(c);
2260 btrfs_mark_buffer_dirty(split);
2262 wret = insert_ptr(trans, root, path, &disk_key, split->start,
2263 path->slots[level + 1] + 1,
2268 if (path->slots[level] >= mid) {
2269 path->slots[level] -= mid;
2270 btrfs_tree_unlock(c);
2271 free_extent_buffer(c);
2272 path->nodes[level] = split;
2273 path->slots[level + 1] += 1;
2275 btrfs_tree_unlock(split);
2276 free_extent_buffer(split);
2282 * how many bytes are required to store the items in a leaf. start
2283 * and nr indicate which items in the leaf to check. This totals up the
2284 * space used both by the item structs and the item data
2286 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2289 int nritems = btrfs_header_nritems(l);
2290 int end = min(nritems, start + nr) - 1;
2294 data_len = btrfs_item_end_nr(l, start);
2295 data_len = data_len - btrfs_item_offset_nr(l, end);
2296 data_len += sizeof(struct btrfs_item) * nr;
2297 WARN_ON(data_len < 0);
2302 * The space between the end of the leaf items and
2303 * the start of the leaf data. IOW, how much room
2304 * the leaf has left for both items and data
2306 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2307 struct extent_buffer *leaf)
2309 int nritems = btrfs_header_nritems(leaf);
2311 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2313 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2314 "used %d nritems %d\n",
2315 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2316 leaf_space_used(leaf, 0, nritems), nritems);
2322 * min slot controls the lowest index we're willing to push to the
2323 * right. We'll push up to and including min_slot, but no lower
2325 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2326 struct btrfs_root *root,
2327 struct btrfs_path *path,
2328 int data_size, int empty,
2329 struct extent_buffer *right,
2330 int free_space, u32 left_nritems,
2333 struct extent_buffer *left = path->nodes[0];
2334 struct extent_buffer *upper = path->nodes[1];
2335 struct btrfs_disk_key disk_key;
2340 struct btrfs_item *item;
2349 nr = max_t(u32, 1, min_slot);
2351 if (path->slots[0] >= left_nritems)
2352 push_space += data_size;
2354 slot = path->slots[1];
2355 i = left_nritems - 1;
2357 item = btrfs_item_nr(left, i);
2359 if (!empty && push_items > 0) {
2360 if (path->slots[0] > i)
2362 if (path->slots[0] == i) {
2363 int space = btrfs_leaf_free_space(root, left);
2364 if (space + push_space * 2 > free_space)
2369 if (path->slots[0] == i)
2370 push_space += data_size;
2372 if (!left->map_token) {
2373 map_extent_buffer(left, (unsigned long)item,
2374 sizeof(struct btrfs_item),
2375 &left->map_token, &left->kaddr,
2376 &left->map_start, &left->map_len,
2380 this_item_size = btrfs_item_size(left, item);
2381 if (this_item_size + sizeof(*item) + push_space > free_space)
2385 push_space += this_item_size + sizeof(*item);
2390 if (left->map_token) {
2391 unmap_extent_buffer(left, left->map_token, KM_USER1);
2392 left->map_token = NULL;
2395 if (push_items == 0)
2398 if (!empty && push_items == left_nritems)
2401 /* push left to right */
2402 right_nritems = btrfs_header_nritems(right);
2404 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2405 push_space -= leaf_data_end(root, left);
2407 /* make room in the right data area */
2408 data_end = leaf_data_end(root, right);
2409 memmove_extent_buffer(right,
2410 btrfs_leaf_data(right) + data_end - push_space,
2411 btrfs_leaf_data(right) + data_end,
2412 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2414 /* copy from the left data area */
2415 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
2416 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2417 btrfs_leaf_data(left) + leaf_data_end(root, left),
2420 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2421 btrfs_item_nr_offset(0),
2422 right_nritems * sizeof(struct btrfs_item));
2424 /* copy the items from left to right */
2425 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2426 btrfs_item_nr_offset(left_nritems - push_items),
2427 push_items * sizeof(struct btrfs_item));
2429 /* update the item pointers */
2430 right_nritems += push_items;
2431 btrfs_set_header_nritems(right, right_nritems);
2432 push_space = BTRFS_LEAF_DATA_SIZE(root);
2433 for (i = 0; i < right_nritems; i++) {
2434 item = btrfs_item_nr(right, i);
2435 if (!right->map_token) {
2436 map_extent_buffer(right, (unsigned long)item,
2437 sizeof(struct btrfs_item),
2438 &right->map_token, &right->kaddr,
2439 &right->map_start, &right->map_len,
2442 push_space -= btrfs_item_size(right, item);
2443 btrfs_set_item_offset(right, item, push_space);
2446 if (right->map_token) {
2447 unmap_extent_buffer(right, right->map_token, KM_USER1);
2448 right->map_token = NULL;
2450 left_nritems -= push_items;
2451 btrfs_set_header_nritems(left, left_nritems);
2454 btrfs_mark_buffer_dirty(left);
2456 clean_tree_block(trans, root, left);
2458 btrfs_mark_buffer_dirty(right);
2460 btrfs_item_key(right, &disk_key, 0);
2461 btrfs_set_node_key(upper, &disk_key, slot + 1);
2462 btrfs_mark_buffer_dirty(upper);
2464 /* then fixup the leaf pointer in the path */
2465 if (path->slots[0] >= left_nritems) {
2466 path->slots[0] -= left_nritems;
2467 if (btrfs_header_nritems(path->nodes[0]) == 0)
2468 clean_tree_block(trans, root, path->nodes[0]);
2469 btrfs_tree_unlock(path->nodes[0]);
2470 free_extent_buffer(path->nodes[0]);
2471 path->nodes[0] = right;
2472 path->slots[1] += 1;
2474 btrfs_tree_unlock(right);
2475 free_extent_buffer(right);
2480 btrfs_tree_unlock(right);
2481 free_extent_buffer(right);
2486 * push some data in the path leaf to the right, trying to free up at
2487 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2489 * returns 1 if the push failed because the other node didn't have enough
2490 * room, 0 if everything worked out and < 0 if there were major errors.
2492 * this will push starting from min_slot to the end of the leaf. It won't
2493 * push any slot lower than min_slot
2495 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2496 *root, struct btrfs_path *path,
2497 int min_data_size, int data_size,
2498 int empty, u32 min_slot)
2500 struct extent_buffer *left = path->nodes[0];
2501 struct extent_buffer *right;
2502 struct extent_buffer *upper;
2508 if (!path->nodes[1])
2511 slot = path->slots[1];
2512 upper = path->nodes[1];
2513 if (slot >= btrfs_header_nritems(upper) - 1)
2516 btrfs_assert_tree_locked(path->nodes[1]);
2518 right = read_node_slot(root, upper, slot + 1);
2519 btrfs_tree_lock(right);
2520 btrfs_set_lock_blocking(right);
2522 free_space = btrfs_leaf_free_space(root, right);
2523 if (free_space < data_size)
2526 /* cow and double check */
2527 ret = btrfs_cow_block(trans, root, right, upper,
2532 free_space = btrfs_leaf_free_space(root, right);
2533 if (free_space < data_size)
2536 left_nritems = btrfs_header_nritems(left);
2537 if (left_nritems == 0)
2540 return __push_leaf_right(trans, root, path, min_data_size, empty,
2541 right, free_space, left_nritems, min_slot);
2543 btrfs_tree_unlock(right);
2544 free_extent_buffer(right);
2549 * push some data in the path leaf to the left, trying to free up at
2550 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2552 * max_slot can put a limit on how far into the leaf we'll push items. The
2553 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2556 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2557 struct btrfs_root *root,
2558 struct btrfs_path *path, int data_size,
2559 int empty, struct extent_buffer *left,
2560 int free_space, u32 right_nritems,
2563 struct btrfs_disk_key disk_key;
2564 struct extent_buffer *right = path->nodes[0];
2568 struct btrfs_item *item;
2569 u32 old_left_nritems;
2574 u32 old_left_item_size;
2577 nr = min(right_nritems, max_slot);
2579 nr = min(right_nritems - 1, max_slot);
2581 for (i = 0; i < nr; i++) {
2582 item = btrfs_item_nr(right, i);
2583 if (!right->map_token) {
2584 map_extent_buffer(right, (unsigned long)item,
2585 sizeof(struct btrfs_item),
2586 &right->map_token, &right->kaddr,
2587 &right->map_start, &right->map_len,
2591 if (!empty && push_items > 0) {
2592 if (path->slots[0] < i)
2594 if (path->slots[0] == i) {
2595 int space = btrfs_leaf_free_space(root, right);
2596 if (space + push_space * 2 > free_space)
2601 if (path->slots[0] == i)
2602 push_space += data_size;
2604 this_item_size = btrfs_item_size(right, item);
2605 if (this_item_size + sizeof(*item) + push_space > free_space)
2609 push_space += this_item_size + sizeof(*item);
2612 if (right->map_token) {
2613 unmap_extent_buffer(right, right->map_token, KM_USER1);
2614 right->map_token = NULL;
2617 if (push_items == 0) {
2621 if (!empty && push_items == btrfs_header_nritems(right))
2624 /* push data from right to left */
2625 copy_extent_buffer(left, right,
2626 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2627 btrfs_item_nr_offset(0),
2628 push_items * sizeof(struct btrfs_item));
2630 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2631 btrfs_item_offset_nr(right, push_items - 1);
2633 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2634 leaf_data_end(root, left) - push_space,
2635 btrfs_leaf_data(right) +
2636 btrfs_item_offset_nr(right, push_items - 1),
2638 old_left_nritems = btrfs_header_nritems(left);
2639 BUG_ON(old_left_nritems <= 0);
2641 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2642 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2645 item = btrfs_item_nr(left, i);
2646 if (!left->map_token) {
2647 map_extent_buffer(left, (unsigned long)item,
2648 sizeof(struct btrfs_item),
2649 &left->map_token, &left->kaddr,
2650 &left->map_start, &left->map_len,
2654 ioff = btrfs_item_offset(left, item);
2655 btrfs_set_item_offset(left, item,
2656 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
2658 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2659 if (left->map_token) {
2660 unmap_extent_buffer(left, left->map_token, KM_USER1);
2661 left->map_token = NULL;
2664 /* fixup right node */
2665 if (push_items > right_nritems) {
2666 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2671 if (push_items < right_nritems) {
2672 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2673 leaf_data_end(root, right);
2674 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2675 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2676 btrfs_leaf_data(right) +
2677 leaf_data_end(root, right), push_space);
2679 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2680 btrfs_item_nr_offset(push_items),
2681 (btrfs_header_nritems(right) - push_items) *
2682 sizeof(struct btrfs_item));
2684 right_nritems -= push_items;
2685 btrfs_set_header_nritems(right, right_nritems);
2686 push_space = BTRFS_LEAF_DATA_SIZE(root);
2687 for (i = 0; i < right_nritems; i++) {
2688 item = btrfs_item_nr(right, i);
2690 if (!right->map_token) {
2691 map_extent_buffer(right, (unsigned long)item,
2692 sizeof(struct btrfs_item),
2693 &right->map_token, &right->kaddr,
2694 &right->map_start, &right->map_len,
2698 push_space = push_space - btrfs_item_size(right, item);
2699 btrfs_set_item_offset(right, item, push_space);
2701 if (right->map_token) {
2702 unmap_extent_buffer(right, right->map_token, KM_USER1);
2703 right->map_token = NULL;
2706 btrfs_mark_buffer_dirty(left);
2708 btrfs_mark_buffer_dirty(right);
2710 clean_tree_block(trans, root, right);
2712 btrfs_item_key(right, &disk_key, 0);
2713 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2717 /* then fixup the leaf pointer in the path */
2718 if (path->slots[0] < push_items) {
2719 path->slots[0] += old_left_nritems;
2720 btrfs_tree_unlock(path->nodes[0]);
2721 free_extent_buffer(path->nodes[0]);
2722 path->nodes[0] = left;
2723 path->slots[1] -= 1;
2725 btrfs_tree_unlock(left);
2726 free_extent_buffer(left);
2727 path->slots[0] -= push_items;
2729 BUG_ON(path->slots[0] < 0);
2732 btrfs_tree_unlock(left);
2733 free_extent_buffer(left);
2738 * push some data in the path leaf to the left, trying to free up at
2739 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2741 * max_slot can put a limit on how far into the leaf we'll push items. The
2742 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
2745 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2746 *root, struct btrfs_path *path, int min_data_size,
2747 int data_size, int empty, u32 max_slot)
2749 struct extent_buffer *right = path->nodes[0];
2750 struct extent_buffer *left;
2756 slot = path->slots[1];
2759 if (!path->nodes[1])
2762 right_nritems = btrfs_header_nritems(right);
2763 if (right_nritems == 0)
2766 btrfs_assert_tree_locked(path->nodes[1]);
2768 left = read_node_slot(root, path->nodes[1], slot - 1);
2769 btrfs_tree_lock(left);
2770 btrfs_set_lock_blocking(left);
2772 free_space = btrfs_leaf_free_space(root, left);
2773 if (free_space < data_size) {
2778 /* cow and double check */
2779 ret = btrfs_cow_block(trans, root, left,
2780 path->nodes[1], slot - 1, &left);
2782 /* we hit -ENOSPC, but it isn't fatal here */
2787 free_space = btrfs_leaf_free_space(root, left);
2788 if (free_space < data_size) {
2793 return __push_leaf_left(trans, root, path, min_data_size,
2794 empty, left, free_space, right_nritems,
2797 btrfs_tree_unlock(left);
2798 free_extent_buffer(left);
2803 * split the path's leaf in two, making sure there is at least data_size
2804 * available for the resulting leaf level of the path.
2806 * returns 0 if all went well and < 0 on failure.
2808 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2809 struct btrfs_root *root,
2810 struct btrfs_path *path,
2811 struct extent_buffer *l,
2812 struct extent_buffer *right,
2813 int slot, int mid, int nritems)
2820 struct btrfs_disk_key disk_key;
2822 nritems = nritems - mid;
2823 btrfs_set_header_nritems(right, nritems);
2824 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2826 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2827 btrfs_item_nr_offset(mid),
2828 nritems * sizeof(struct btrfs_item));
2830 copy_extent_buffer(right, l,
2831 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2832 data_copy_size, btrfs_leaf_data(l) +
2833 leaf_data_end(root, l), data_copy_size);
2835 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2836 btrfs_item_end_nr(l, mid);
2838 for (i = 0; i < nritems; i++) {
2839 struct btrfs_item *item = btrfs_item_nr(right, i);
2842 if (!right->map_token) {
2843 map_extent_buffer(right, (unsigned long)item,
2844 sizeof(struct btrfs_item),
2845 &right->map_token, &right->kaddr,
2846 &right->map_start, &right->map_len,
2850 ioff = btrfs_item_offset(right, item);
2851 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2854 if (right->map_token) {
2855 unmap_extent_buffer(right, right->map_token, KM_USER1);
2856 right->map_token = NULL;
2859 btrfs_set_header_nritems(l, mid);
2861 btrfs_item_key(right, &disk_key, 0);
2862 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2863 path->slots[1] + 1, 1);
2867 btrfs_mark_buffer_dirty(right);
2868 btrfs_mark_buffer_dirty(l);
2869 BUG_ON(path->slots[0] != slot);
2872 btrfs_tree_unlock(path->nodes[0]);
2873 free_extent_buffer(path->nodes[0]);
2874 path->nodes[0] = right;
2875 path->slots[0] -= mid;
2876 path->slots[1] += 1;
2878 btrfs_tree_unlock(right);
2879 free_extent_buffer(right);
2882 BUG_ON(path->slots[0] < 0);
2888 * double splits happen when we need to insert a big item in the middle
2889 * of a leaf. A double split can leave us with 3 mostly empty leaves:
2890 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
2893 * We avoid this by trying to push the items on either side of our target
2894 * into the adjacent leaves. If all goes well we can avoid the double split
2897 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
2898 struct btrfs_root *root,
2899 struct btrfs_path *path,
2907 slot = path->slots[0];
2910 * try to push all the items after our slot into the
2913 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
2920 nritems = btrfs_header_nritems(path->nodes[0]);
2922 * our goal is to get our slot at the start or end of a leaf. If
2923 * we've done so we're done
2925 if (path->slots[0] == 0 || path->slots[0] == nritems)
2928 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
2931 /* try to push all the items before our slot into the next leaf */
2932 slot = path->slots[0];
2933 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
2946 * split the path's leaf in two, making sure there is at least data_size
2947 * available for the resulting leaf level of the path.
2949 * returns 0 if all went well and < 0 on failure.
2951 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2952 struct btrfs_root *root,
2953 struct btrfs_key *ins_key,
2954 struct btrfs_path *path, int data_size,
2957 struct btrfs_disk_key disk_key;
2958 struct extent_buffer *l;
2962 struct extent_buffer *right;
2966 int num_doubles = 0;
2967 int tried_avoid_double = 0;
2970 slot = path->slots[0];
2971 if (extend && data_size + btrfs_item_size_nr(l, slot) +
2972 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
2975 /* first try to make some room by pushing left and right */
2977 wret = push_leaf_right(trans, root, path, data_size,
2982 wret = push_leaf_left(trans, root, path, data_size,
2983 data_size, 0, (u32)-1);
2989 /* did the pushes work? */
2990 if (btrfs_leaf_free_space(root, l) >= data_size)
2994 if (!path->nodes[1]) {
2995 ret = insert_new_root(trans, root, path, 1);
3002 slot = path->slots[0];
3003 nritems = btrfs_header_nritems(l);
3004 mid = (nritems + 1) / 2;
3008 leaf_space_used(l, mid, nritems - mid) + data_size >
3009 BTRFS_LEAF_DATA_SIZE(root)) {
3010 if (slot >= nritems) {
3014 if (mid != nritems &&
3015 leaf_space_used(l, mid, nritems - mid) +
3016 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3017 if (data_size && !tried_avoid_double)
3018 goto push_for_double;
3024 if (leaf_space_used(l, 0, mid) + data_size >
3025 BTRFS_LEAF_DATA_SIZE(root)) {
3026 if (!extend && data_size && slot == 0) {
3028 } else if ((extend || !data_size) && slot == 0) {
3032 if (mid != nritems &&
3033 leaf_space_used(l, mid, nritems - mid) +
3034 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3035 if (data_size && !tried_avoid_double)
3036 goto push_for_double;
3044 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3046 btrfs_item_key(l, &disk_key, mid);
3048 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
3049 root->root_key.objectid,
3050 &disk_key, 0, l->start, 0);
3052 return PTR_ERR(right);
3054 root_add_used(root, root->leafsize);
3056 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
3057 btrfs_set_header_bytenr(right, right->start);
3058 btrfs_set_header_generation(right, trans->transid);
3059 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
3060 btrfs_set_header_owner(right, root->root_key.objectid);
3061 btrfs_set_header_level(right, 0);
3062 write_extent_buffer(right, root->fs_info->fsid,
3063 (unsigned long)btrfs_header_fsid(right),
3066 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
3067 (unsigned long)btrfs_header_chunk_tree_uuid(right),
3072 btrfs_set_header_nritems(right, 0);
3073 wret = insert_ptr(trans, root, path,
3074 &disk_key, right->start,
3075 path->slots[1] + 1, 1);
3079 btrfs_tree_unlock(path->nodes[0]);
3080 free_extent_buffer(path->nodes[0]);
3081 path->nodes[0] = right;
3083 path->slots[1] += 1;
3085 btrfs_set_header_nritems(right, 0);
3086 wret = insert_ptr(trans, root, path,
3092 btrfs_tree_unlock(path->nodes[0]);
3093 free_extent_buffer(path->nodes[0]);
3094 path->nodes[0] = right;
3096 if (path->slots[1] == 0) {
3097 wret = fixup_low_keys(trans, root,
3098 path, &disk_key, 1);
3103 btrfs_mark_buffer_dirty(right);
3107 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3111 BUG_ON(num_doubles != 0);
3119 push_for_double_split(trans, root, path, data_size);
3120 tried_avoid_double = 1;
3121 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3126 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3127 struct btrfs_root *root,
3128 struct btrfs_path *path, int ins_len)
3130 struct btrfs_key key;
3131 struct extent_buffer *leaf;
3132 struct btrfs_file_extent_item *fi;
3137 leaf = path->nodes[0];
3138 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3140 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3141 key.type != BTRFS_EXTENT_CSUM_KEY);
3143 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3146 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3147 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3148 fi = btrfs_item_ptr(leaf, path->slots[0],
3149 struct btrfs_file_extent_item);
3150 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3152 btrfs_release_path(root, path);
3154 path->keep_locks = 1;
3155 path->search_for_split = 1;
3156 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3157 path->search_for_split = 0;
3162 leaf = path->nodes[0];
3163 /* if our item isn't there or got smaller, return now */
3164 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3167 /* the leaf has changed, it now has room. return now */
3168 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3171 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3172 fi = btrfs_item_ptr(leaf, path->slots[0],
3173 struct btrfs_file_extent_item);
3174 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3178 btrfs_set_path_blocking(path);
3179 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3183 path->keep_locks = 0;
3184 btrfs_unlock_up_safe(path, 1);
3187 path->keep_locks = 0;
3191 static noinline int split_item(struct btrfs_trans_handle *trans,
3192 struct btrfs_root *root,
3193 struct btrfs_path *path,
3194 struct btrfs_key *new_key,
3195 unsigned long split_offset)
3197 struct extent_buffer *leaf;
3198 struct btrfs_item *item;
3199 struct btrfs_item *new_item;
3205 struct btrfs_disk_key disk_key;
3207 leaf = path->nodes[0];
3208 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3210 btrfs_set_path_blocking(path);
3212 item = btrfs_item_nr(leaf, path->slots[0]);
3213 orig_offset = btrfs_item_offset(leaf, item);
3214 item_size = btrfs_item_size(leaf, item);
3216 buf = kmalloc(item_size, GFP_NOFS);
3220 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3221 path->slots[0]), item_size);
3223 slot = path->slots[0] + 1;
3224 nritems = btrfs_header_nritems(leaf);
3225 if (slot != nritems) {
3226 /* shift the items */
3227 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3228 btrfs_item_nr_offset(slot),
3229 (nritems - slot) * sizeof(struct btrfs_item));
3232 btrfs_cpu_key_to_disk(&disk_key, new_key);
3233 btrfs_set_item_key(leaf, &disk_key, slot);
3235 new_item = btrfs_item_nr(leaf, slot);
3237 btrfs_set_item_offset(leaf, new_item, orig_offset);
3238 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3240 btrfs_set_item_offset(leaf, item,
3241 orig_offset + item_size - split_offset);
3242 btrfs_set_item_size(leaf, item, split_offset);
3244 btrfs_set_header_nritems(leaf, nritems + 1);
3246 /* write the data for the start of the original item */
3247 write_extent_buffer(leaf, buf,
3248 btrfs_item_ptr_offset(leaf, path->slots[0]),
3251 /* write the data for the new item */
3252 write_extent_buffer(leaf, buf + split_offset,
3253 btrfs_item_ptr_offset(leaf, slot),
3254 item_size - split_offset);
3255 btrfs_mark_buffer_dirty(leaf);
3257 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
3263 * This function splits a single item into two items,
3264 * giving 'new_key' to the new item and splitting the
3265 * old one at split_offset (from the start of the item).
3267 * The path may be released by this operation. After
3268 * the split, the path is pointing to the old item. The
3269 * new item is going to be in the same node as the old one.
3271 * Note, the item being split must be smaller enough to live alone on
3272 * a tree block with room for one extra struct btrfs_item
3274 * This allows us to split the item in place, keeping a lock on the
3275 * leaf the entire time.
3277 int btrfs_split_item(struct btrfs_trans_handle *trans,
3278 struct btrfs_root *root,
3279 struct btrfs_path *path,
3280 struct btrfs_key *new_key,
3281 unsigned long split_offset)
3284 ret = setup_leaf_for_split(trans, root, path,
3285 sizeof(struct btrfs_item));
3289 ret = split_item(trans, root, path, new_key, split_offset);
3294 * This function duplicate a item, giving 'new_key' to the new item.
3295 * It guarantees both items live in the same tree leaf and the new item
3296 * is contiguous with the original item.
3298 * This allows us to split file extent in place, keeping a lock on the
3299 * leaf the entire time.
3301 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3302 struct btrfs_root *root,
3303 struct btrfs_path *path,
3304 struct btrfs_key *new_key)
3306 struct extent_buffer *leaf;
3310 leaf = path->nodes[0];
3311 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3312 ret = setup_leaf_for_split(trans, root, path,
3313 item_size + sizeof(struct btrfs_item));
3318 ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
3319 item_size, item_size +
3320 sizeof(struct btrfs_item), 1);
3323 leaf = path->nodes[0];
3324 memcpy_extent_buffer(leaf,
3325 btrfs_item_ptr_offset(leaf, path->slots[0]),
3326 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3332 * make the item pointed to by the path smaller. new_size indicates
3333 * how small to make it, and from_end tells us if we just chop bytes
3334 * off the end of the item or if we shift the item to chop bytes off
3337 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3338 struct btrfs_root *root,
3339 struct btrfs_path *path,
3340 u32 new_size, int from_end)
3344 struct extent_buffer *leaf;
3345 struct btrfs_item *item;
3347 unsigned int data_end;
3348 unsigned int old_data_start;
3349 unsigned int old_size;
3350 unsigned int size_diff;
3353 leaf = path->nodes[0];
3354 slot = path->slots[0];
3356 old_size = btrfs_item_size_nr(leaf, slot);
3357 if (old_size == new_size)
3360 nritems = btrfs_header_nritems(leaf);
3361 data_end = leaf_data_end(root, leaf);
3363 old_data_start = btrfs_item_offset_nr(leaf, slot);
3365 size_diff = old_size - new_size;
3368 BUG_ON(slot >= nritems);
3371 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3373 /* first correct the data pointers */
3374 for (i = slot; i < nritems; i++) {
3376 item = btrfs_item_nr(leaf, i);
3378 if (!leaf->map_token) {
3379 map_extent_buffer(leaf, (unsigned long)item,
3380 sizeof(struct btrfs_item),
3381 &leaf->map_token, &leaf->kaddr,
3382 &leaf->map_start, &leaf->map_len,
3386 ioff = btrfs_item_offset(leaf, item);
3387 btrfs_set_item_offset(leaf, item, ioff + size_diff);
3390 if (leaf->map_token) {
3391 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3392 leaf->map_token = NULL;
3395 /* shift the data */
3397 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3398 data_end + size_diff, btrfs_leaf_data(leaf) +
3399 data_end, old_data_start + new_size - data_end);
3401 struct btrfs_disk_key disk_key;
3404 btrfs_item_key(leaf, &disk_key, slot);
3406 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3408 struct btrfs_file_extent_item *fi;
3410 fi = btrfs_item_ptr(leaf, slot,
3411 struct btrfs_file_extent_item);
3412 fi = (struct btrfs_file_extent_item *)(
3413 (unsigned long)fi - size_diff);
3415 if (btrfs_file_extent_type(leaf, fi) ==
3416 BTRFS_FILE_EXTENT_INLINE) {
3417 ptr = btrfs_item_ptr_offset(leaf, slot);
3418 memmove_extent_buffer(leaf, ptr,
3420 offsetof(struct btrfs_file_extent_item,
3425 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3426 data_end + size_diff, btrfs_leaf_data(leaf) +
3427 data_end, old_data_start - data_end);
3429 offset = btrfs_disk_key_offset(&disk_key);
3430 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3431 btrfs_set_item_key(leaf, &disk_key, slot);
3433 fixup_low_keys(trans, root, path, &disk_key, 1);
3436 item = btrfs_item_nr(leaf, slot);
3437 btrfs_set_item_size(leaf, item, new_size);
3438 btrfs_mark_buffer_dirty(leaf);
3441 if (btrfs_leaf_free_space(root, leaf) < 0) {
3442 btrfs_print_leaf(root, leaf);
3449 * make the item pointed to by the path bigger, data_size is the new size.
3451 int btrfs_extend_item(struct btrfs_trans_handle *trans,
3452 struct btrfs_root *root, struct btrfs_path *path,
3457 struct extent_buffer *leaf;
3458 struct btrfs_item *item;
3460 unsigned int data_end;
3461 unsigned int old_data;
3462 unsigned int old_size;
3465 leaf = path->nodes[0];
3467 nritems = btrfs_header_nritems(leaf);
3468 data_end = leaf_data_end(root, leaf);
3470 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3471 btrfs_print_leaf(root, leaf);
3474 slot = path->slots[0];
3475 old_data = btrfs_item_end_nr(leaf, slot);
3478 if (slot >= nritems) {
3479 btrfs_print_leaf(root, leaf);
3480 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3486 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3488 /* first correct the data pointers */
3489 for (i = slot; i < nritems; i++) {
3491 item = btrfs_item_nr(leaf, i);
3493 if (!leaf->map_token) {
3494 map_extent_buffer(leaf, (unsigned long)item,
3495 sizeof(struct btrfs_item),
3496 &leaf->map_token, &leaf->kaddr,
3497 &leaf->map_start, &leaf->map_len,
3500 ioff = btrfs_item_offset(leaf, item);
3501 btrfs_set_item_offset(leaf, item, ioff - data_size);
3504 if (leaf->map_token) {
3505 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3506 leaf->map_token = NULL;
3509 /* shift the data */
3510 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3511 data_end - data_size, btrfs_leaf_data(leaf) +
3512 data_end, old_data - data_end);
3514 data_end = old_data;
3515 old_size = btrfs_item_size_nr(leaf, slot);
3516 item = btrfs_item_nr(leaf, slot);
3517 btrfs_set_item_size(leaf, item, old_size + data_size);
3518 btrfs_mark_buffer_dirty(leaf);
3521 if (btrfs_leaf_free_space(root, leaf) < 0) {
3522 btrfs_print_leaf(root, leaf);
3529 * Given a key and some data, insert items into the tree.
3530 * This does all the path init required, making room in the tree if needed.
3531 * Returns the number of keys that were inserted.
3533 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3534 struct btrfs_root *root,
3535 struct btrfs_path *path,
3536 struct btrfs_key *cpu_key, u32 *data_size,
3539 struct extent_buffer *leaf;
3540 struct btrfs_item *item;
3547 unsigned int data_end;
3548 struct btrfs_disk_key disk_key;
3549 struct btrfs_key found_key;
3551 for (i = 0; i < nr; i++) {
3552 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3553 BTRFS_LEAF_DATA_SIZE(root)) {
3557 total_data += data_size[i];
3558 total_size += data_size[i] + sizeof(struct btrfs_item);
3562 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3568 leaf = path->nodes[0];
3570 nritems = btrfs_header_nritems(leaf);
3571 data_end = leaf_data_end(root, leaf);
3573 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3574 for (i = nr; i >= 0; i--) {
3575 total_data -= data_size[i];
3576 total_size -= data_size[i] + sizeof(struct btrfs_item);
3577 if (total_size < btrfs_leaf_free_space(root, leaf))
3583 slot = path->slots[0];
3586 if (slot != nritems) {
3587 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3589 item = btrfs_item_nr(leaf, slot);
3590 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3592 /* figure out how many keys we can insert in here */
3593 total_data = data_size[0];
3594 for (i = 1; i < nr; i++) {
3595 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
3597 total_data += data_size[i];
3601 if (old_data < data_end) {
3602 btrfs_print_leaf(root, leaf);
3603 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3604 slot, old_data, data_end);
3608 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3610 /* first correct the data pointers */
3611 WARN_ON(leaf->map_token);
3612 for (i = slot; i < nritems; i++) {
3615 item = btrfs_item_nr(leaf, i);
3616 if (!leaf->map_token) {
3617 map_extent_buffer(leaf, (unsigned long)item,
3618 sizeof(struct btrfs_item),
3619 &leaf->map_token, &leaf->kaddr,
3620 &leaf->map_start, &leaf->map_len,
3624 ioff = btrfs_item_offset(leaf, item);
3625 btrfs_set_item_offset(leaf, item, ioff - total_data);
3627 if (leaf->map_token) {
3628 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3629 leaf->map_token = NULL;
3632 /* shift the items */
3633 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3634 btrfs_item_nr_offset(slot),
3635 (nritems - slot) * sizeof(struct btrfs_item));
3637 /* shift the data */
3638 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3639 data_end - total_data, btrfs_leaf_data(leaf) +
3640 data_end, old_data - data_end);
3641 data_end = old_data;
3644 * this sucks but it has to be done, if we are inserting at
3645 * the end of the leaf only insert 1 of the items, since we
3646 * have no way of knowing whats on the next leaf and we'd have
3647 * to drop our current locks to figure it out
3652 /* setup the item for the new data */
3653 for (i = 0; i < nr; i++) {
3654 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3655 btrfs_set_item_key(leaf, &disk_key, slot + i);
3656 item = btrfs_item_nr(leaf, slot + i);
3657 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3658 data_end -= data_size[i];
3659 btrfs_set_item_size(leaf, item, data_size[i]);
3661 btrfs_set_header_nritems(leaf, nritems + nr);
3662 btrfs_mark_buffer_dirty(leaf);
3666 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3667 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3670 if (btrfs_leaf_free_space(root, leaf) < 0) {
3671 btrfs_print_leaf(root, leaf);
3681 * this is a helper for btrfs_insert_empty_items, the main goal here is
3682 * to save stack depth by doing the bulk of the work in a function
3683 * that doesn't call btrfs_search_slot
3685 static noinline_for_stack int
3686 setup_items_for_insert(struct btrfs_trans_handle *trans,
3687 struct btrfs_root *root, struct btrfs_path *path,
3688 struct btrfs_key *cpu_key, u32 *data_size,
3689 u32 total_data, u32 total_size, int nr)
3691 struct btrfs_item *item;
3694 unsigned int data_end;
3695 struct btrfs_disk_key disk_key;
3697 struct extent_buffer *leaf;
3700 leaf = path->nodes[0];
3701 slot = path->slots[0];
3703 nritems = btrfs_header_nritems(leaf);
3704 data_end = leaf_data_end(root, leaf);
3706 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3707 btrfs_print_leaf(root, leaf);
3708 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3709 total_size, btrfs_leaf_free_space(root, leaf));
3713 if (slot != nritems) {
3714 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3716 if (old_data < data_end) {
3717 btrfs_print_leaf(root, leaf);
3718 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3719 slot, old_data, data_end);
3723 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3725 /* first correct the data pointers */
3726 WARN_ON(leaf->map_token);
3727 for (i = slot; i < nritems; i++) {
3730 item = btrfs_item_nr(leaf, i);
3731 if (!leaf->map_token) {
3732 map_extent_buffer(leaf, (unsigned long)item,
3733 sizeof(struct btrfs_item),
3734 &leaf->map_token, &leaf->kaddr,
3735 &leaf->map_start, &leaf->map_len,
3739 ioff = btrfs_item_offset(leaf, item);
3740 btrfs_set_item_offset(leaf, item, ioff - total_data);
3742 if (leaf->map_token) {
3743 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3744 leaf->map_token = NULL;
3747 /* shift the items */
3748 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3749 btrfs_item_nr_offset(slot),
3750 (nritems - slot) * sizeof(struct btrfs_item));
3752 /* shift the data */
3753 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3754 data_end - total_data, btrfs_leaf_data(leaf) +
3755 data_end, old_data - data_end);
3756 data_end = old_data;
3759 /* setup the item for the new data */
3760 for (i = 0; i < nr; i++) {
3761 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3762 btrfs_set_item_key(leaf, &disk_key, slot + i);
3763 item = btrfs_item_nr(leaf, slot + i);
3764 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3765 data_end -= data_size[i];
3766 btrfs_set_item_size(leaf, item, data_size[i]);
3769 btrfs_set_header_nritems(leaf, nritems + nr);
3773 struct btrfs_disk_key disk_key;
3774 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3775 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3777 btrfs_unlock_up_safe(path, 1);
3778 btrfs_mark_buffer_dirty(leaf);
3780 if (btrfs_leaf_free_space(root, leaf) < 0) {
3781 btrfs_print_leaf(root, leaf);
3788 * Given a key and some data, insert items into the tree.
3789 * This does all the path init required, making room in the tree if needed.
3791 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3792 struct btrfs_root *root,
3793 struct btrfs_path *path,
3794 struct btrfs_key *cpu_key, u32 *data_size,
3803 for (i = 0; i < nr; i++)
3804 total_data += data_size[i];
3806 total_size = total_data + (nr * sizeof(struct btrfs_item));
3807 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3813 slot = path->slots[0];
3816 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
3817 total_data, total_size, nr);
3824 * Given a key and some data, insert an item into the tree.
3825 * This does all the path init required, making room in the tree if needed.
3827 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3828 *root, struct btrfs_key *cpu_key, void *data, u32
3832 struct btrfs_path *path;
3833 struct extent_buffer *leaf;
3836 path = btrfs_alloc_path();
3838 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3840 leaf = path->nodes[0];
3841 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3842 write_extent_buffer(leaf, data, ptr, data_size);
3843 btrfs_mark_buffer_dirty(leaf);
3845 btrfs_free_path(path);
3850 * delete the pointer from a given node.
3852 * the tree should have been previously balanced so the deletion does not
3855 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3856 struct btrfs_path *path, int level, int slot)
3858 struct extent_buffer *parent = path->nodes[level];
3863 nritems = btrfs_header_nritems(parent);
3864 if (slot != nritems - 1) {
3865 memmove_extent_buffer(parent,
3866 btrfs_node_key_ptr_offset(slot),
3867 btrfs_node_key_ptr_offset(slot + 1),
3868 sizeof(struct btrfs_key_ptr) *
3869 (nritems - slot - 1));
3872 btrfs_set_header_nritems(parent, nritems);
3873 if (nritems == 0 && parent == root->node) {
3874 BUG_ON(btrfs_header_level(root->node) != 1);
3875 /* just turn the root into a leaf and break */
3876 btrfs_set_header_level(root->node, 0);
3877 } else if (slot == 0) {
3878 struct btrfs_disk_key disk_key;
3880 btrfs_node_key(parent, &disk_key, 0);
3881 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
3885 btrfs_mark_buffer_dirty(parent);
3890 * a helper function to delete the leaf pointed to by path->slots[1] and
3893 * This deletes the pointer in path->nodes[1] and frees the leaf
3894 * block extent. zero is returned if it all worked out, < 0 otherwise.
3896 * The path must have already been setup for deleting the leaf, including
3897 * all the proper balancing. path->nodes[1] must be locked.
3899 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3900 struct btrfs_root *root,
3901 struct btrfs_path *path,
3902 struct extent_buffer *leaf)
3906 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3907 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3912 * btrfs_free_extent is expensive, we want to make sure we
3913 * aren't holding any locks when we call it
3915 btrfs_unlock_up_safe(path, 0);
3917 root_sub_used(root, leaf->len);
3919 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3923 * delete the item at the leaf level in path. If that empties
3924 * the leaf, remove it from the tree
3926 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3927 struct btrfs_path *path, int slot, int nr)
3929 struct extent_buffer *leaf;
3930 struct btrfs_item *item;
3938 leaf = path->nodes[0];
3939 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3941 for (i = 0; i < nr; i++)
3942 dsize += btrfs_item_size_nr(leaf, slot + i);
3944 nritems = btrfs_header_nritems(leaf);
3946 if (slot + nr != nritems) {
3947 int data_end = leaf_data_end(root, leaf);
3949 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3951 btrfs_leaf_data(leaf) + data_end,
3952 last_off - data_end);
3954 for (i = slot + nr; i < nritems; i++) {
3957 item = btrfs_item_nr(leaf, i);
3958 if (!leaf->map_token) {
3959 map_extent_buffer(leaf, (unsigned long)item,
3960 sizeof(struct btrfs_item),
3961 &leaf->map_token, &leaf->kaddr,
3962 &leaf->map_start, &leaf->map_len,
3965 ioff = btrfs_item_offset(leaf, item);
3966 btrfs_set_item_offset(leaf, item, ioff + dsize);
3969 if (leaf->map_token) {
3970 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3971 leaf->map_token = NULL;
3974 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
3975 btrfs_item_nr_offset(slot + nr),
3976 sizeof(struct btrfs_item) *
3977 (nritems - slot - nr));
3979 btrfs_set_header_nritems(leaf, nritems - nr);
3982 /* delete the leaf if we've emptied it */
3984 if (leaf == root->node) {
3985 btrfs_set_header_level(leaf, 0);
3987 btrfs_set_path_blocking(path);
3988 clean_tree_block(trans, root, leaf);
3989 ret = btrfs_del_leaf(trans, root, path, leaf);
3993 int used = leaf_space_used(leaf, 0, nritems);
3995 struct btrfs_disk_key disk_key;
3997 btrfs_item_key(leaf, &disk_key, 0);
3998 wret = fixup_low_keys(trans, root, path,
4004 /* delete the leaf if it is mostly empty */
4005 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4006 /* push_leaf_left fixes the path.
4007 * make sure the path still points to our leaf
4008 * for possible call to del_ptr below
4010 slot = path->slots[1];
4011 extent_buffer_get(leaf);
4013 btrfs_set_path_blocking(path);
4014 wret = push_leaf_left(trans, root, path, 1, 1,
4016 if (wret < 0 && wret != -ENOSPC)
4019 if (path->nodes[0] == leaf &&
4020 btrfs_header_nritems(leaf)) {
4021 wret = push_leaf_right(trans, root, path, 1,
4023 if (wret < 0 && wret != -ENOSPC)
4027 if (btrfs_header_nritems(leaf) == 0) {
4028 path->slots[1] = slot;
4029 ret = btrfs_del_leaf(trans, root, path, leaf);
4031 free_extent_buffer(leaf);
4033 /* if we're still in the path, make sure
4034 * we're dirty. Otherwise, one of the
4035 * push_leaf functions must have already
4036 * dirtied this buffer
4038 if (path->nodes[0] == leaf)
4039 btrfs_mark_buffer_dirty(leaf);
4040 free_extent_buffer(leaf);
4043 btrfs_mark_buffer_dirty(leaf);
4050 * search the tree again to find a leaf with lesser keys
4051 * returns 0 if it found something or 1 if there are no lesser leaves.
4052 * returns < 0 on io errors.
4054 * This may release the path, and so you may lose any locks held at the
4057 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4059 struct btrfs_key key;
4060 struct btrfs_disk_key found_key;
4063 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4067 else if (key.type > 0)
4069 else if (key.objectid > 0)
4074 btrfs_release_path(root, path);
4075 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4078 btrfs_item_key(path->nodes[0], &found_key, 0);
4079 ret = comp_keys(&found_key, &key);
4086 * A helper function to walk down the tree starting at min_key, and looking
4087 * for nodes or leaves that are either in cache or have a minimum
4088 * transaction id. This is used by the btree defrag code, and tree logging
4090 * This does not cow, but it does stuff the starting key it finds back
4091 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4092 * key and get a writable path.
4094 * This does lock as it descends, and path->keep_locks should be set
4095 * to 1 by the caller.
4097 * This honors path->lowest_level to prevent descent past a given level
4100 * min_trans indicates the oldest transaction that you are interested
4101 * in walking through. Any nodes or leaves older than min_trans are
4102 * skipped over (without reading them).
4104 * returns zero if something useful was found, < 0 on error and 1 if there
4105 * was nothing in the tree that matched the search criteria.
4107 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4108 struct btrfs_key *max_key,
4109 struct btrfs_path *path, int cache_only,
4112 struct extent_buffer *cur;
4113 struct btrfs_key found_key;
4120 WARN_ON(!path->keep_locks);
4122 cur = btrfs_lock_root_node(root);
4123 level = btrfs_header_level(cur);
4124 WARN_ON(path->nodes[level]);
4125 path->nodes[level] = cur;
4126 path->locks[level] = 1;
4128 if (btrfs_header_generation(cur) < min_trans) {
4133 nritems = btrfs_header_nritems(cur);
4134 level = btrfs_header_level(cur);
4135 sret = bin_search(cur, min_key, level, &slot);
4137 /* at the lowest level, we're done, setup the path and exit */
4138 if (level == path->lowest_level) {
4139 if (slot >= nritems)
4142 path->slots[level] = slot;
4143 btrfs_item_key_to_cpu(cur, &found_key, slot);
4146 if (sret && slot > 0)
4149 * check this node pointer against the cache_only and
4150 * min_trans parameters. If it isn't in cache or is too
4151 * old, skip to the next one.
4153 while (slot < nritems) {
4156 struct extent_buffer *tmp;
4157 struct btrfs_disk_key disk_key;
4159 blockptr = btrfs_node_blockptr(cur, slot);
4160 gen = btrfs_node_ptr_generation(cur, slot);
4161 if (gen < min_trans) {
4169 btrfs_node_key(cur, &disk_key, slot);
4170 if (comp_keys(&disk_key, max_key) >= 0) {
4176 tmp = btrfs_find_tree_block(root, blockptr,
4177 btrfs_level_size(root, level - 1));
4179 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
4180 free_extent_buffer(tmp);
4184 free_extent_buffer(tmp);
4189 * we didn't find a candidate key in this node, walk forward
4190 * and find another one
4192 if (slot >= nritems) {
4193 path->slots[level] = slot;
4194 btrfs_set_path_blocking(path);
4195 sret = btrfs_find_next_key(root, path, min_key, level,
4196 cache_only, min_trans);
4198 btrfs_release_path(root, path);
4204 /* save our key for returning back */
4205 btrfs_node_key_to_cpu(cur, &found_key, slot);
4206 path->slots[level] = slot;
4207 if (level == path->lowest_level) {
4209 unlock_up(path, level, 1);
4212 btrfs_set_path_blocking(path);
4213 cur = read_node_slot(root, cur, slot);
4215 btrfs_tree_lock(cur);
4217 path->locks[level - 1] = 1;
4218 path->nodes[level - 1] = cur;
4219 unlock_up(path, level, 1);
4220 btrfs_clear_path_blocking(path, NULL);
4224 memcpy(min_key, &found_key, sizeof(found_key));
4225 btrfs_set_path_blocking(path);
4230 * this is similar to btrfs_next_leaf, but does not try to preserve
4231 * and fixup the path. It looks for and returns the next key in the
4232 * tree based on the current path and the cache_only and min_trans
4235 * 0 is returned if another key is found, < 0 if there are any errors
4236 * and 1 is returned if there are no higher keys in the tree
4238 * path->keep_locks should be set to 1 on the search made before
4239 * calling this function.
4241 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4242 struct btrfs_key *key, int level,
4243 int cache_only, u64 min_trans)
4246 struct extent_buffer *c;
4248 WARN_ON(!path->keep_locks);
4249 while (level < BTRFS_MAX_LEVEL) {
4250 if (!path->nodes[level])
4253 slot = path->slots[level] + 1;
4254 c = path->nodes[level];
4256 if (slot >= btrfs_header_nritems(c)) {
4259 struct btrfs_key cur_key;
4260 if (level + 1 >= BTRFS_MAX_LEVEL ||
4261 !path->nodes[level + 1])
4264 if (path->locks[level + 1]) {
4269 slot = btrfs_header_nritems(c) - 1;
4271 btrfs_item_key_to_cpu(c, &cur_key, slot);
4273 btrfs_node_key_to_cpu(c, &cur_key, slot);
4275 orig_lowest = path->lowest_level;
4276 btrfs_release_path(root, path);
4277 path->lowest_level = level;
4278 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4280 path->lowest_level = orig_lowest;
4284 c = path->nodes[level];
4285 slot = path->slots[level];
4292 btrfs_item_key_to_cpu(c, key, slot);
4294 u64 blockptr = btrfs_node_blockptr(c, slot);
4295 u64 gen = btrfs_node_ptr_generation(c, slot);
4298 struct extent_buffer *cur;
4299 cur = btrfs_find_tree_block(root, blockptr,
4300 btrfs_level_size(root, level - 1));
4301 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
4304 free_extent_buffer(cur);
4307 free_extent_buffer(cur);
4309 if (gen < min_trans) {
4313 btrfs_node_key_to_cpu(c, key, slot);
4321 * search the tree again to find a leaf with greater keys
4322 * returns 0 if it found something or 1 if there are no greater leaves.
4323 * returns < 0 on io errors.
4325 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4329 struct extent_buffer *c;
4330 struct extent_buffer *next;
4331 struct btrfs_key key;
4334 int old_spinning = path->leave_spinning;
4335 int force_blocking = 0;
4337 nritems = btrfs_header_nritems(path->nodes[0]);
4342 * we take the blocks in an order that upsets lockdep. Using
4343 * blocking mode is the only way around it.
4345 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4349 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4353 btrfs_release_path(root, path);
4355 path->keep_locks = 1;
4357 if (!force_blocking)
4358 path->leave_spinning = 1;
4360 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4361 path->keep_locks = 0;
4366 nritems = btrfs_header_nritems(path->nodes[0]);
4368 * by releasing the path above we dropped all our locks. A balance
4369 * could have added more items next to the key that used to be
4370 * at the very end of the block. So, check again here and
4371 * advance the path if there are now more items available.
4373 if (nritems > 0 && path->slots[0] < nritems - 1) {
4380 while (level < BTRFS_MAX_LEVEL) {
4381 if (!path->nodes[level]) {
4386 slot = path->slots[level] + 1;
4387 c = path->nodes[level];
4388 if (slot >= btrfs_header_nritems(c)) {
4390 if (level == BTRFS_MAX_LEVEL) {
4398 btrfs_tree_unlock(next);
4399 free_extent_buffer(next);
4403 ret = read_block_for_search(NULL, root, path, &next, level,
4409 btrfs_release_path(root, path);
4413 if (!path->skip_locking) {
4414 ret = btrfs_try_spin_lock(next);
4416 btrfs_set_path_blocking(path);
4417 btrfs_tree_lock(next);
4418 if (!force_blocking)
4419 btrfs_clear_path_blocking(path, next);
4422 btrfs_set_lock_blocking(next);
4426 path->slots[level] = slot;
4429 c = path->nodes[level];
4430 if (path->locks[level])
4431 btrfs_tree_unlock(c);
4433 free_extent_buffer(c);
4434 path->nodes[level] = next;
4435 path->slots[level] = 0;
4436 if (!path->skip_locking)
4437 path->locks[level] = 1;
4442 ret = read_block_for_search(NULL, root, path, &next, level,
4448 btrfs_release_path(root, path);
4452 if (!path->skip_locking) {
4453 btrfs_assert_tree_locked(path->nodes[level]);
4454 ret = btrfs_try_spin_lock(next);
4456 btrfs_set_path_blocking(path);
4457 btrfs_tree_lock(next);
4458 if (!force_blocking)
4459 btrfs_clear_path_blocking(path, next);
4462 btrfs_set_lock_blocking(next);
4467 unlock_up(path, 0, 1);
4468 path->leave_spinning = old_spinning;
4470 btrfs_set_path_blocking(path);
4476 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4477 * searching until it gets past min_objectid or finds an item of 'type'
4479 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4481 int btrfs_previous_item(struct btrfs_root *root,
4482 struct btrfs_path *path, u64 min_objectid,
4485 struct btrfs_key found_key;
4486 struct extent_buffer *leaf;
4491 if (path->slots[0] == 0) {
4492 btrfs_set_path_blocking(path);
4493 ret = btrfs_prev_leaf(root, path);
4499 leaf = path->nodes[0];
4500 nritems = btrfs_header_nritems(leaf);
4503 if (path->slots[0] == nritems)
4506 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4507 if (found_key.objectid < min_objectid)
4509 if (found_key.type == type)
4511 if (found_key.objectid == min_objectid &&
4512 found_key.type < type)