fs_info->generation = btrfs_super_generation(disk_super) + 1;
if (btrfs_super_num_devices(disk_super) > 0) {
- fs_info->data_alloc_profile = BTRFS_BLOCK_GROUP_RAID0;
- fs_info->metadata_alloc_profile = BTRFS_BLOCK_GROUP_RAID1;
- fs_info->system_alloc_profile = BTRFS_BLOCK_GROUP_RAID0;
+ fs_info->data_alloc_profile = BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID1;
+ fs_info->metadata_alloc_profile = BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_DUP;
+ fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
}
mutex_unlock(&fs_info->fs_mutex);
return tree_root;
if (start + num > total_fs_bytes)
goto new_group;
if (!block_group_bits(cache, data)) {
- printk("block group bits don't match %Lu %Lu\n", cache->flags, data);
+ printk("block group bits don't match %Lu %d\n", cache->flags, data);
}
*start_ret = start;
return 0;
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
- BTRFS_BLOCK_GROUP_RAID1);
+ BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_DUP);
if (extra_flags) {
if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits |= extra_flags;
struct extent_map *em;
u64 physical;
u64 calc_size = 1024 * 1024 * 1024;
+ u64 min_free = calc_size;
u64 avail;
u64 max_avail = 0;
int num_stripes = 1;
if (type & (BTRFS_BLOCK_GROUP_RAID0))
num_stripes = btrfs_super_num_devices(&info->super_copy);
+ if (type & (BTRFS_BLOCK_GROUP_DUP))
+ num_stripes = 2;
if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
num_stripes = min_t(u64, 2,
btrfs_super_num_devices(&info->super_copy));
INIT_LIST_HEAD(&private_devs);
cur = dev_list->next;
index = 0;
+
+ if (type & BTRFS_BLOCK_GROUP_DUP)
+ min_free = calc_size * 2;
+
/* build a private list of devices we will allocate from */
while(index < num_stripes) {
device = list_entry(cur, struct btrfs_device, dev_list);
+
avail = device->total_bytes - device->bytes_used;
cur = cur->next;
if (avail > max_avail)
max_avail = avail;
- if (avail >= calc_size) {
+ if (avail >= min_free) {
list_move_tail(&device->dev_list, &private_devs);
index++;
+ if (type & BTRFS_BLOCK_GROUP_DUP)
+ index++;
}
if (cur == dev_list)
break;
stripes = &chunk->stripe;
- if (type & BTRFS_BLOCK_GROUP_RAID1)
+ if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
*num_bytes = calc_size;
else
*num_bytes = calc_size * num_stripes;
index = 0;
+printk("new chunk type %Lu start %Lu size %Lu\n", type, key.objectid, *num_bytes);
while(index < num_stripes) {
BUG_ON(list_empty(&private_devs));
cur = private_devs.next;
device = list_entry(cur, struct btrfs_device, dev_list);
- list_move_tail(&device->dev_list, dev_list);
+
+ /* loop over this device again if we're doing a dup group */
+ if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
+ (index == num_stripes - 1))
+ list_move_tail(&device->dev_list, dev_list);
ret = btrfs_alloc_dev_extent(trans, device,
key.objectid,
}
*total_devs = 1;
}
+ } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
+ if (rw == WRITE) {
+ *total_devs = map->num_stripes;
+ stripe_index = dev_nr;
+ } else {
+ stripe_index = 0;
+ *total_devs = 1;
+ }
} else {
/*
* after this do_div call, stripe_nr is the number of stripes
*phys = map->stripes[stripe_index].physical + stripe_offset +
stripe_nr * map->stripe_len;
- if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1)) {
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_DUP)) {
/* we limit the length of each bio to what fits in a stripe */
*length = min_t(u64, em->len - offset,
map->stripe_len - stripe_offset);