struct radix_tree_root block_group_data_radix;
struct radix_tree_root extent_map_radix;
- u64 extent_tree_insert[BTRFS_MAX_LEVEL * 3];
+ u64 extent_tree_insert[BTRFS_MAX_LEVEL * 6];
int extent_tree_insert_nr;
- u64 extent_tree_prealloc[BTRFS_MAX_LEVEL * 3];
+ u64 extent_tree_prealloc[BTRFS_MAX_LEVEL * 6];
int extent_tree_prealloc_nr;
u64 generation;
if (num_blocks == 0) {
fill_prealloc = 1;
num_blocks = 1;
- total_needed = (min(level + 1, BTRFS_MAX_LEVEL) + 2) * 3;
+ total_needed = (min(level + 1, BTRFS_MAX_LEVEL)) * 6;
}
if (fill_prealloc) {
u64 first;
ins->objectid;
ret = update_block_group(trans, root,
ins->objectid, ins->offset, 1, 0, 0);
+ WARN_ON(info->extent_tree_insert_nr >
+ ARRAY_SIZE(info->extent_tree_insert));
BUG_ON(ret);
return 0;
}
if (wret < 0)
ret = wret;
num_walks++;
- if (num_walks > 10) {
+ if (num_walks > 2) {
ret = -EAGAIN;
get_bh(root->node);
break;
mutex_unlock(&info->fs_mutex);
btrfs_btree_balance_dirty(root);
+ cond_resched();
mutex_lock(&info->fs_mutex);
trans = btrfs_start_transaction(tree_root, 1);
ret = err;
ret = btrfs_end_transaction(trans, tree_root);
BUG_ON(ret);
+ mutex_unlock(&tree_root->fs_info->fs_mutex);
+
+ btrfs_btree_balance_dirty(tree_root);
+ schedule();
+
+ mutex_lock(&tree_root->fs_info->fs_mutex);
}
BUG_ON(ret);
ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
kfree(dirty);
mutex_unlock(&tree_root->fs_info->fs_mutex);
btrfs_btree_balance_dirty(tree_root);
+ schedule();
}
return ret;
}