2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <linux/iocontext.h>
24 #include <asm/div64.h>
27 #include "extent_map.h"
29 #include "transaction.h"
30 #include "print-tree.h"
32 #include "async-thread.h"
42 struct btrfs_bio_stripe stripes[];
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46 struct btrfs_root *root,
47 struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
50 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
51 (sizeof(struct btrfs_bio_stripe) * (n)))
53 static DEFINE_MUTEX(uuid_mutex);
54 static LIST_HEAD(fs_uuids);
56 void btrfs_lock_volumes(void)
58 mutex_lock(&uuid_mutex);
61 void btrfs_unlock_volumes(void)
63 mutex_unlock(&uuid_mutex);
66 static void lock_chunks(struct btrfs_root *root)
68 mutex_lock(&root->fs_info->chunk_mutex);
71 static void unlock_chunks(struct btrfs_root *root)
73 mutex_unlock(&root->fs_info->chunk_mutex);
76 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
78 struct btrfs_device *device;
79 WARN_ON(fs_devices->opened);
80 while (!list_empty(&fs_devices->devices)) {
81 device = list_entry(fs_devices->devices.next,
82 struct btrfs_device, dev_list);
83 list_del(&device->dev_list);
90 int btrfs_cleanup_fs_uuids(void)
92 struct btrfs_fs_devices *fs_devices;
94 while (!list_empty(&fs_uuids)) {
95 fs_devices = list_entry(fs_uuids.next,
96 struct btrfs_fs_devices, list);
97 list_del(&fs_devices->list);
98 free_fs_devices(fs_devices);
103 static noinline struct btrfs_device *__find_device(struct list_head *head,
106 struct btrfs_device *dev;
108 list_for_each_entry(dev, head, dev_list) {
109 if (dev->devid == devid &&
110 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
117 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
119 struct btrfs_fs_devices *fs_devices;
121 list_for_each_entry(fs_devices, &fs_uuids, list) {
122 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
128 static void requeue_list(struct btrfs_pending_bios *pending_bios,
129 struct bio *head, struct bio *tail)
132 struct bio *old_head;
134 old_head = pending_bios->head;
135 pending_bios->head = head;
136 if (pending_bios->tail)
137 tail->bi_next = old_head;
139 pending_bios->tail = tail;
143 * we try to collect pending bios for a device so we don't get a large
144 * number of procs sending bios down to the same device. This greatly
145 * improves the schedulers ability to collect and merge the bios.
147 * But, it also turns into a long list of bios to process and that is sure
148 * to eventually make the worker thread block. The solution here is to
149 * make some progress and then put this work struct back at the end of
150 * the list if the block device is congested. This way, multiple devices
151 * can make progress from a single worker thread.
153 static noinline int run_scheduled_bios(struct btrfs_device *device)
156 struct backing_dev_info *bdi;
157 struct btrfs_fs_info *fs_info;
158 struct btrfs_pending_bios *pending_bios;
162 unsigned long num_run;
163 unsigned long num_sync_run;
164 unsigned long batch_run = 0;
166 unsigned long last_waited = 0;
169 bdi = blk_get_backing_dev_info(device->bdev);
170 fs_info = device->dev_root->fs_info;
171 limit = btrfs_async_submit_limit(fs_info);
172 limit = limit * 2 / 3;
174 /* we want to make sure that every time we switch from the sync
175 * list to the normal list, we unplug
180 spin_lock(&device->io_lock);
185 /* take all the bios off the list at once and process them
186 * later on (without the lock held). But, remember the
187 * tail and other pointers so the bios can be properly reinserted
188 * into the list if we hit congestion
190 if (!force_reg && device->pending_sync_bios.head) {
191 pending_bios = &device->pending_sync_bios;
194 pending_bios = &device->pending_bios;
198 pending = pending_bios->head;
199 tail = pending_bios->tail;
200 WARN_ON(pending && !tail);
203 * if pending was null this time around, no bios need processing
204 * at all and we can stop. Otherwise it'll loop back up again
205 * and do an additional check so no bios are missed.
207 * device->running_pending is used to synchronize with the
210 if (device->pending_sync_bios.head == NULL &&
211 device->pending_bios.head == NULL) {
213 device->running_pending = 0;
216 device->running_pending = 1;
219 pending_bios->head = NULL;
220 pending_bios->tail = NULL;
222 spin_unlock(&device->io_lock);
225 * if we're doing the regular priority list, make sure we unplug
226 * for any high prio bios we've sent down
228 if (pending_bios == &device->pending_bios && num_sync_run > 0) {
230 blk_run_backing_dev(bdi, NULL);
236 /* we want to work on both lists, but do more bios on the
237 * sync list than the regular list
240 pending_bios != &device->pending_sync_bios &&
241 device->pending_sync_bios.head) ||
242 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
243 device->pending_bios.head)) {
244 spin_lock(&device->io_lock);
245 requeue_list(pending_bios, pending, tail);
250 pending = pending->bi_next;
252 atomic_dec(&fs_info->nr_async_bios);
254 if (atomic_read(&fs_info->nr_async_bios) < limit &&
255 waitqueue_active(&fs_info->async_submit_wait))
256 wake_up(&fs_info->async_submit_wait);
258 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
260 if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
263 submit_bio(cur->bi_rw, cur);
266 if (need_resched()) {
268 blk_run_backing_dev(bdi, NULL);
275 * we made progress, there is more work to do and the bdi
276 * is now congested. Back off and let other work structs
279 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
280 fs_info->fs_devices->open_devices > 1) {
281 struct io_context *ioc;
283 ioc = current->io_context;
286 * the main goal here is that we don't want to
287 * block if we're going to be able to submit
288 * more requests without blocking.
290 * This code does two great things, it pokes into
291 * the elevator code from a filesystem _and_
292 * it makes assumptions about how batching works.
294 if (ioc && ioc->nr_batch_requests > 0 &&
295 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
297 ioc->last_waited == last_waited)) {
299 * we want to go through our batch of
300 * requests and stop. So, we copy out
301 * the ioc->last_waited time and test
302 * against it before looping
304 last_waited = ioc->last_waited;
305 if (need_resched()) {
307 blk_run_backing_dev(bdi, NULL);
314 spin_lock(&device->io_lock);
315 requeue_list(pending_bios, pending, tail);
316 device->running_pending = 1;
318 spin_unlock(&device->io_lock);
319 btrfs_requeue_work(&device->work);
326 blk_run_backing_dev(bdi, NULL);
329 * IO has already been through a long path to get here. Checksumming,
330 * async helper threads, perhaps compression. We've done a pretty
331 * good job of collecting a batch of IO and should just unplug
332 * the device right away.
334 * This will help anyone who is waiting on the IO, they might have
335 * already unplugged, but managed to do so before the bio they
336 * cared about found its way down here.
338 blk_run_backing_dev(bdi, NULL);
344 spin_lock(&device->io_lock);
345 if (device->pending_bios.head || device->pending_sync_bios.head)
347 spin_unlock(&device->io_lock);
353 static void pending_bios_fn(struct btrfs_work *work)
355 struct btrfs_device *device;
357 device = container_of(work, struct btrfs_device, work);
358 run_scheduled_bios(device);
361 static noinline int device_list_add(const char *path,
362 struct btrfs_super_block *disk_super,
363 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
365 struct btrfs_device *device;
366 struct btrfs_fs_devices *fs_devices;
367 u64 found_transid = btrfs_super_generation(disk_super);
370 fs_devices = find_fsid(disk_super->fsid);
372 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
375 INIT_LIST_HEAD(&fs_devices->devices);
376 INIT_LIST_HEAD(&fs_devices->alloc_list);
377 list_add(&fs_devices->list, &fs_uuids);
378 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
379 fs_devices->latest_devid = devid;
380 fs_devices->latest_trans = found_transid;
381 mutex_init(&fs_devices->device_list_mutex);
384 device = __find_device(&fs_devices->devices, devid,
385 disk_super->dev_item.uuid);
388 if (fs_devices->opened)
391 device = kzalloc(sizeof(*device), GFP_NOFS);
393 /* we can safely leave the fs_devices entry around */
396 device->devid = devid;
397 device->work.func = pending_bios_fn;
398 memcpy(device->uuid, disk_super->dev_item.uuid,
400 device->barriers = 1;
401 spin_lock_init(&device->io_lock);
402 device->name = kstrdup(path, GFP_NOFS);
407 INIT_LIST_HEAD(&device->dev_alloc_list);
409 mutex_lock(&fs_devices->device_list_mutex);
410 list_add(&device->dev_list, &fs_devices->devices);
411 mutex_unlock(&fs_devices->device_list_mutex);
413 device->fs_devices = fs_devices;
414 fs_devices->num_devices++;
415 } else if (strcmp(device->name, path)) {
416 name = kstrdup(path, GFP_NOFS);
423 if (found_transid > fs_devices->latest_trans) {
424 fs_devices->latest_devid = devid;
425 fs_devices->latest_trans = found_transid;
427 *fs_devices_ret = fs_devices;
431 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
433 struct btrfs_fs_devices *fs_devices;
434 struct btrfs_device *device;
435 struct btrfs_device *orig_dev;
437 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
439 return ERR_PTR(-ENOMEM);
441 INIT_LIST_HEAD(&fs_devices->devices);
442 INIT_LIST_HEAD(&fs_devices->alloc_list);
443 INIT_LIST_HEAD(&fs_devices->list);
444 mutex_init(&fs_devices->device_list_mutex);
445 fs_devices->latest_devid = orig->latest_devid;
446 fs_devices->latest_trans = orig->latest_trans;
447 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
449 mutex_lock(&orig->device_list_mutex);
450 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
451 device = kzalloc(sizeof(*device), GFP_NOFS);
455 device->name = kstrdup(orig_dev->name, GFP_NOFS);
461 device->devid = orig_dev->devid;
462 device->work.func = pending_bios_fn;
463 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
464 device->barriers = 1;
465 spin_lock_init(&device->io_lock);
466 INIT_LIST_HEAD(&device->dev_list);
467 INIT_LIST_HEAD(&device->dev_alloc_list);
469 list_add(&device->dev_list, &fs_devices->devices);
470 device->fs_devices = fs_devices;
471 fs_devices->num_devices++;
473 mutex_unlock(&orig->device_list_mutex);
476 mutex_unlock(&orig->device_list_mutex);
477 free_fs_devices(fs_devices);
478 return ERR_PTR(-ENOMEM);
481 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
483 struct btrfs_device *device, *next;
485 mutex_lock(&uuid_mutex);
487 mutex_lock(&fs_devices->device_list_mutex);
488 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
489 if (device->in_fs_metadata)
493 close_bdev_exclusive(device->bdev, device->mode);
495 fs_devices->open_devices--;
497 if (device->writeable) {
498 list_del_init(&device->dev_alloc_list);
499 device->writeable = 0;
500 fs_devices->rw_devices--;
502 list_del_init(&device->dev_list);
503 fs_devices->num_devices--;
507 mutex_unlock(&fs_devices->device_list_mutex);
509 if (fs_devices->seed) {
510 fs_devices = fs_devices->seed;
514 mutex_unlock(&uuid_mutex);
518 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
520 struct btrfs_device *device;
522 if (--fs_devices->opened > 0)
525 list_for_each_entry(device, &fs_devices->devices, dev_list) {
527 close_bdev_exclusive(device->bdev, device->mode);
528 fs_devices->open_devices--;
530 if (device->writeable) {
531 list_del_init(&device->dev_alloc_list);
532 fs_devices->rw_devices--;
536 device->writeable = 0;
537 device->in_fs_metadata = 0;
539 WARN_ON(fs_devices->open_devices);
540 WARN_ON(fs_devices->rw_devices);
541 fs_devices->opened = 0;
542 fs_devices->seeding = 0;
547 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
549 struct btrfs_fs_devices *seed_devices = NULL;
552 mutex_lock(&uuid_mutex);
553 ret = __btrfs_close_devices(fs_devices);
554 if (!fs_devices->opened) {
555 seed_devices = fs_devices->seed;
556 fs_devices->seed = NULL;
558 mutex_unlock(&uuid_mutex);
560 while (seed_devices) {
561 fs_devices = seed_devices;
562 seed_devices = fs_devices->seed;
563 __btrfs_close_devices(fs_devices);
564 free_fs_devices(fs_devices);
569 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
570 fmode_t flags, void *holder)
572 struct block_device *bdev;
573 struct list_head *head = &fs_devices->devices;
574 struct btrfs_device *device;
575 struct block_device *latest_bdev = NULL;
576 struct buffer_head *bh;
577 struct btrfs_super_block *disk_super;
578 u64 latest_devid = 0;
579 u64 latest_transid = 0;
584 list_for_each_entry(device, head, dev_list) {
590 bdev = open_bdev_exclusive(device->name, flags, holder);
592 printk(KERN_INFO "open %s failed\n", device->name);
595 set_blocksize(bdev, 4096);
597 bh = btrfs_read_dev_super(bdev);
601 disk_super = (struct btrfs_super_block *)bh->b_data;
602 devid = btrfs_stack_device_id(&disk_super->dev_item);
603 if (devid != device->devid)
606 if (memcmp(device->uuid, disk_super->dev_item.uuid,
610 device->generation = btrfs_super_generation(disk_super);
611 if (!latest_transid || device->generation > latest_transid) {
612 latest_devid = devid;
613 latest_transid = device->generation;
617 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
618 device->writeable = 0;
620 device->writeable = !bdev_read_only(bdev);
625 device->in_fs_metadata = 0;
626 device->mode = flags;
628 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
629 fs_devices->rotating = 1;
631 fs_devices->open_devices++;
632 if (device->writeable) {
633 fs_devices->rw_devices++;
634 list_add(&device->dev_alloc_list,
635 &fs_devices->alloc_list);
642 close_bdev_exclusive(bdev, FMODE_READ);
646 if (fs_devices->open_devices == 0) {
650 fs_devices->seeding = seeding;
651 fs_devices->opened = 1;
652 fs_devices->latest_bdev = latest_bdev;
653 fs_devices->latest_devid = latest_devid;
654 fs_devices->latest_trans = latest_transid;
655 fs_devices->total_rw_bytes = 0;
660 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
661 fmode_t flags, void *holder)
665 mutex_lock(&uuid_mutex);
666 if (fs_devices->opened) {
667 fs_devices->opened++;
670 ret = __btrfs_open_devices(fs_devices, flags, holder);
672 mutex_unlock(&uuid_mutex);
676 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
677 struct btrfs_fs_devices **fs_devices_ret)
679 struct btrfs_super_block *disk_super;
680 struct block_device *bdev;
681 struct buffer_head *bh;
686 mutex_lock(&uuid_mutex);
688 bdev = open_bdev_exclusive(path, flags, holder);
695 ret = set_blocksize(bdev, 4096);
698 bh = btrfs_read_dev_super(bdev);
703 disk_super = (struct btrfs_super_block *)bh->b_data;
704 devid = btrfs_stack_device_id(&disk_super->dev_item);
705 transid = btrfs_super_generation(disk_super);
706 if (disk_super->label[0])
707 printk(KERN_INFO "device label %s ", disk_super->label);
709 /* FIXME, make a readl uuid parser */
710 printk(KERN_INFO "device fsid %llx-%llx ",
711 *(unsigned long long *)disk_super->fsid,
712 *(unsigned long long *)(disk_super->fsid + 8));
714 printk(KERN_CONT "devid %llu transid %llu %s\n",
715 (unsigned long long)devid, (unsigned long long)transid, path);
716 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
720 close_bdev_exclusive(bdev, flags);
722 mutex_unlock(&uuid_mutex);
727 * this uses a pretty simple search, the expectation is that it is
728 * called very infrequently and that a given device has a small number
731 int find_free_dev_extent(struct btrfs_trans_handle *trans,
732 struct btrfs_device *device, u64 num_bytes,
733 u64 *start, u64 *max_avail)
735 struct btrfs_key key;
736 struct btrfs_root *root = device->dev_root;
737 struct btrfs_dev_extent *dev_extent = NULL;
738 struct btrfs_path *path;
741 u64 search_start = 0;
742 u64 search_end = device->total_bytes;
746 struct extent_buffer *l;
748 path = btrfs_alloc_path();
754 /* FIXME use last free of some kind */
756 /* we don't want to overwrite the superblock on the drive,
757 * so we make sure to start at an offset of at least 1MB
759 search_start = max((u64)1024 * 1024, search_start);
761 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
762 search_start = max(root->fs_info->alloc_start, search_start);
764 key.objectid = device->devid;
765 key.offset = search_start;
766 key.type = BTRFS_DEV_EXTENT_KEY;
767 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
771 ret = btrfs_previous_item(root, path, key.objectid, key.type);
778 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
781 slot = path->slots[0];
782 if (slot >= btrfs_header_nritems(l)) {
783 ret = btrfs_next_leaf(root, path);
790 if (search_start >= search_end) {
794 *start = search_start;
798 *start = last_byte > search_start ?
799 last_byte : search_start;
800 if (search_end <= *start) {
806 btrfs_item_key_to_cpu(l, &key, slot);
808 if (key.objectid < device->devid)
811 if (key.objectid > device->devid)
814 if (key.offset >= search_start && key.offset > last_byte &&
816 if (last_byte < search_start)
817 last_byte = search_start;
818 hole_size = key.offset - last_byte;
820 if (hole_size > *max_avail)
821 *max_avail = hole_size;
823 if (key.offset > last_byte &&
824 hole_size >= num_bytes) {
829 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
833 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
834 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
840 /* we have to make sure we didn't find an extent that has already
841 * been allocated by the map tree or the original allocation
843 BUG_ON(*start < search_start);
845 if (*start + num_bytes > search_end) {
849 /* check for pending inserts here */
853 btrfs_free_path(path);
857 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
858 struct btrfs_device *device,
862 struct btrfs_path *path;
863 struct btrfs_root *root = device->dev_root;
864 struct btrfs_key key;
865 struct btrfs_key found_key;
866 struct extent_buffer *leaf = NULL;
867 struct btrfs_dev_extent *extent = NULL;
869 path = btrfs_alloc_path();
873 key.objectid = device->devid;
875 key.type = BTRFS_DEV_EXTENT_KEY;
877 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
879 ret = btrfs_previous_item(root, path, key.objectid,
880 BTRFS_DEV_EXTENT_KEY);
882 leaf = path->nodes[0];
883 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
884 extent = btrfs_item_ptr(leaf, path->slots[0],
885 struct btrfs_dev_extent);
886 BUG_ON(found_key.offset > start || found_key.offset +
887 btrfs_dev_extent_length(leaf, extent) < start);
889 } else if (ret == 0) {
890 leaf = path->nodes[0];
891 extent = btrfs_item_ptr(leaf, path->slots[0],
892 struct btrfs_dev_extent);
896 if (device->bytes_used > 0)
897 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
898 ret = btrfs_del_item(trans, root, path);
901 btrfs_free_path(path);
905 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
906 struct btrfs_device *device,
907 u64 chunk_tree, u64 chunk_objectid,
908 u64 chunk_offset, u64 start, u64 num_bytes)
911 struct btrfs_path *path;
912 struct btrfs_root *root = device->dev_root;
913 struct btrfs_dev_extent *extent;
914 struct extent_buffer *leaf;
915 struct btrfs_key key;
917 WARN_ON(!device->in_fs_metadata);
918 path = btrfs_alloc_path();
922 key.objectid = device->devid;
924 key.type = BTRFS_DEV_EXTENT_KEY;
925 ret = btrfs_insert_empty_item(trans, root, path, &key,
929 leaf = path->nodes[0];
930 extent = btrfs_item_ptr(leaf, path->slots[0],
931 struct btrfs_dev_extent);
932 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
933 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
934 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
936 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
937 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
940 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
941 btrfs_mark_buffer_dirty(leaf);
942 btrfs_free_path(path);
946 static noinline int find_next_chunk(struct btrfs_root *root,
947 u64 objectid, u64 *offset)
949 struct btrfs_path *path;
951 struct btrfs_key key;
952 struct btrfs_chunk *chunk;
953 struct btrfs_key found_key;
955 path = btrfs_alloc_path();
958 key.objectid = objectid;
959 key.offset = (u64)-1;
960 key.type = BTRFS_CHUNK_ITEM_KEY;
962 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
968 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
972 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
974 if (found_key.objectid != objectid)
977 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
979 *offset = found_key.offset +
980 btrfs_chunk_length(path->nodes[0], chunk);
985 btrfs_free_path(path);
989 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
992 struct btrfs_key key;
993 struct btrfs_key found_key;
994 struct btrfs_path *path;
996 root = root->fs_info->chunk_root;
998 path = btrfs_alloc_path();
1002 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1003 key.type = BTRFS_DEV_ITEM_KEY;
1004 key.offset = (u64)-1;
1006 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1012 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1013 BTRFS_DEV_ITEM_KEY);
1017 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1019 *objectid = found_key.offset + 1;
1023 btrfs_free_path(path);
1028 * the device information is stored in the chunk root
1029 * the btrfs_device struct should be fully filled in
1031 int btrfs_add_device(struct btrfs_trans_handle *trans,
1032 struct btrfs_root *root,
1033 struct btrfs_device *device)
1036 struct btrfs_path *path;
1037 struct btrfs_dev_item *dev_item;
1038 struct extent_buffer *leaf;
1039 struct btrfs_key key;
1042 root = root->fs_info->chunk_root;
1044 path = btrfs_alloc_path();
1048 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1049 key.type = BTRFS_DEV_ITEM_KEY;
1050 key.offset = device->devid;
1052 ret = btrfs_insert_empty_item(trans, root, path, &key,
1057 leaf = path->nodes[0];
1058 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1060 btrfs_set_device_id(leaf, dev_item, device->devid);
1061 btrfs_set_device_generation(leaf, dev_item, 0);
1062 btrfs_set_device_type(leaf, dev_item, device->type);
1063 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1064 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1065 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1066 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1067 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1068 btrfs_set_device_group(leaf, dev_item, 0);
1069 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1070 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1071 btrfs_set_device_start_offset(leaf, dev_item, 0);
1073 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1074 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1075 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1076 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1077 btrfs_mark_buffer_dirty(leaf);
1081 btrfs_free_path(path);
1085 static int btrfs_rm_dev_item(struct btrfs_root *root,
1086 struct btrfs_device *device)
1089 struct btrfs_path *path;
1090 struct btrfs_key key;
1091 struct btrfs_trans_handle *trans;
1093 root = root->fs_info->chunk_root;
1095 path = btrfs_alloc_path();
1099 trans = btrfs_start_transaction(root, 1);
1100 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1101 key.type = BTRFS_DEV_ITEM_KEY;
1102 key.offset = device->devid;
1105 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1114 ret = btrfs_del_item(trans, root, path);
1118 btrfs_free_path(path);
1119 unlock_chunks(root);
1120 btrfs_commit_transaction(trans, root);
1124 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1126 struct btrfs_device *device;
1127 struct btrfs_device *next_device;
1128 struct block_device *bdev;
1129 struct buffer_head *bh = NULL;
1130 struct btrfs_super_block *disk_super;
1137 mutex_lock(&uuid_mutex);
1138 mutex_lock(&root->fs_info->volume_mutex);
1140 all_avail = root->fs_info->avail_data_alloc_bits |
1141 root->fs_info->avail_system_alloc_bits |
1142 root->fs_info->avail_metadata_alloc_bits;
1144 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1145 root->fs_info->fs_devices->num_devices <= 4) {
1146 printk(KERN_ERR "btrfs: unable to go below four devices "
1152 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1153 root->fs_info->fs_devices->num_devices <= 2) {
1154 printk(KERN_ERR "btrfs: unable to go below two "
1155 "devices on raid1\n");
1160 if (strcmp(device_path, "missing") == 0) {
1161 struct list_head *devices;
1162 struct btrfs_device *tmp;
1165 devices = &root->fs_info->fs_devices->devices;
1166 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1167 list_for_each_entry(tmp, devices, dev_list) {
1168 if (tmp->in_fs_metadata && !tmp->bdev) {
1173 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1178 printk(KERN_ERR "btrfs: no missing devices found to "
1183 bdev = open_bdev_exclusive(device_path, FMODE_READ,
1184 root->fs_info->bdev_holder);
1186 ret = PTR_ERR(bdev);
1190 set_blocksize(bdev, 4096);
1191 bh = btrfs_read_dev_super(bdev);
1196 disk_super = (struct btrfs_super_block *)bh->b_data;
1197 devid = btrfs_stack_device_id(&disk_super->dev_item);
1198 dev_uuid = disk_super->dev_item.uuid;
1199 device = btrfs_find_device(root, devid, dev_uuid,
1207 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1208 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1214 if (device->writeable) {
1215 list_del_init(&device->dev_alloc_list);
1216 root->fs_info->fs_devices->rw_devices--;
1219 ret = btrfs_shrink_device(device, 0);
1223 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1227 device->in_fs_metadata = 0;
1230 * the device list mutex makes sure that we don't change
1231 * the device list while someone else is writing out all
1232 * the device supers.
1234 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1235 list_del_init(&device->dev_list);
1236 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1238 device->fs_devices->num_devices--;
1240 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1241 struct btrfs_device, dev_list);
1242 if (device->bdev == root->fs_info->sb->s_bdev)
1243 root->fs_info->sb->s_bdev = next_device->bdev;
1244 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1245 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1248 close_bdev_exclusive(device->bdev, device->mode);
1249 device->bdev = NULL;
1250 device->fs_devices->open_devices--;
1253 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1254 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1256 if (device->fs_devices->open_devices == 0) {
1257 struct btrfs_fs_devices *fs_devices;
1258 fs_devices = root->fs_info->fs_devices;
1259 while (fs_devices) {
1260 if (fs_devices->seed == device->fs_devices)
1262 fs_devices = fs_devices->seed;
1264 fs_devices->seed = device->fs_devices->seed;
1265 device->fs_devices->seed = NULL;
1266 __btrfs_close_devices(device->fs_devices);
1267 free_fs_devices(device->fs_devices);
1271 * at this point, the device is zero sized. We want to
1272 * remove it from the devices list and zero out the old super
1274 if (device->writeable) {
1275 /* make sure this device isn't detected as part of
1278 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1279 set_buffer_dirty(bh);
1280 sync_dirty_buffer(bh);
1283 kfree(device->name);
1291 close_bdev_exclusive(bdev, FMODE_READ);
1293 mutex_unlock(&root->fs_info->volume_mutex);
1294 mutex_unlock(&uuid_mutex);
1299 * does all the dirty work required for changing file system's UUID.
1301 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1302 struct btrfs_root *root)
1304 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1305 struct btrfs_fs_devices *old_devices;
1306 struct btrfs_fs_devices *seed_devices;
1307 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1308 struct btrfs_device *device;
1311 BUG_ON(!mutex_is_locked(&uuid_mutex));
1312 if (!fs_devices->seeding)
1315 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1319 old_devices = clone_fs_devices(fs_devices);
1320 if (IS_ERR(old_devices)) {
1321 kfree(seed_devices);
1322 return PTR_ERR(old_devices);
1325 list_add(&old_devices->list, &fs_uuids);
1327 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1328 seed_devices->opened = 1;
1329 INIT_LIST_HEAD(&seed_devices->devices);
1330 INIT_LIST_HEAD(&seed_devices->alloc_list);
1331 mutex_init(&seed_devices->device_list_mutex);
1332 list_splice_init(&fs_devices->devices, &seed_devices->devices);
1333 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1334 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1335 device->fs_devices = seed_devices;
1338 fs_devices->seeding = 0;
1339 fs_devices->num_devices = 0;
1340 fs_devices->open_devices = 0;
1341 fs_devices->seed = seed_devices;
1343 generate_random_uuid(fs_devices->fsid);
1344 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1345 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1346 super_flags = btrfs_super_flags(disk_super) &
1347 ~BTRFS_SUPER_FLAG_SEEDING;
1348 btrfs_set_super_flags(disk_super, super_flags);
1354 * strore the expected generation for seed devices in device items.
1356 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1357 struct btrfs_root *root)
1359 struct btrfs_path *path;
1360 struct extent_buffer *leaf;
1361 struct btrfs_dev_item *dev_item;
1362 struct btrfs_device *device;
1363 struct btrfs_key key;
1364 u8 fs_uuid[BTRFS_UUID_SIZE];
1365 u8 dev_uuid[BTRFS_UUID_SIZE];
1369 path = btrfs_alloc_path();
1373 root = root->fs_info->chunk_root;
1374 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1376 key.type = BTRFS_DEV_ITEM_KEY;
1379 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1383 leaf = path->nodes[0];
1385 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1386 ret = btrfs_next_leaf(root, path);
1391 leaf = path->nodes[0];
1392 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1393 btrfs_release_path(root, path);
1397 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1398 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1399 key.type != BTRFS_DEV_ITEM_KEY)
1402 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1403 struct btrfs_dev_item);
1404 devid = btrfs_device_id(leaf, dev_item);
1405 read_extent_buffer(leaf, dev_uuid,
1406 (unsigned long)btrfs_device_uuid(dev_item),
1408 read_extent_buffer(leaf, fs_uuid,
1409 (unsigned long)btrfs_device_fsid(dev_item),
1411 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1414 if (device->fs_devices->seeding) {
1415 btrfs_set_device_generation(leaf, dev_item,
1416 device->generation);
1417 btrfs_mark_buffer_dirty(leaf);
1425 btrfs_free_path(path);
1429 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1431 struct btrfs_trans_handle *trans;
1432 struct btrfs_device *device;
1433 struct block_device *bdev;
1434 struct list_head *devices;
1435 struct super_block *sb = root->fs_info->sb;
1437 int seeding_dev = 0;
1440 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1443 bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
1445 return PTR_ERR(bdev);
1447 if (root->fs_info->fs_devices->seeding) {
1449 down_write(&sb->s_umount);
1450 mutex_lock(&uuid_mutex);
1453 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1454 mutex_lock(&root->fs_info->volume_mutex);
1456 devices = &root->fs_info->fs_devices->devices;
1458 * we have the volume lock, so we don't need the extra
1459 * device list mutex while reading the list here.
1461 list_for_each_entry(device, devices, dev_list) {
1462 if (device->bdev == bdev) {
1468 device = kzalloc(sizeof(*device), GFP_NOFS);
1470 /* we can safely leave the fs_devices entry around */
1475 device->name = kstrdup(device_path, GFP_NOFS);
1476 if (!device->name) {
1482 ret = find_next_devid(root, &device->devid);
1488 trans = btrfs_start_transaction(root, 1);
1491 device->barriers = 1;
1492 device->writeable = 1;
1493 device->work.func = pending_bios_fn;
1494 generate_random_uuid(device->uuid);
1495 spin_lock_init(&device->io_lock);
1496 device->generation = trans->transid;
1497 device->io_width = root->sectorsize;
1498 device->io_align = root->sectorsize;
1499 device->sector_size = root->sectorsize;
1500 device->total_bytes = i_size_read(bdev->bd_inode);
1501 device->disk_total_bytes = device->total_bytes;
1502 device->dev_root = root->fs_info->dev_root;
1503 device->bdev = bdev;
1504 device->in_fs_metadata = 1;
1506 set_blocksize(device->bdev, 4096);
1509 sb->s_flags &= ~MS_RDONLY;
1510 ret = btrfs_prepare_sprout(trans, root);
1514 device->fs_devices = root->fs_info->fs_devices;
1517 * we don't want write_supers to jump in here with our device
1520 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1521 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1522 list_add(&device->dev_alloc_list,
1523 &root->fs_info->fs_devices->alloc_list);
1524 root->fs_info->fs_devices->num_devices++;
1525 root->fs_info->fs_devices->open_devices++;
1526 root->fs_info->fs_devices->rw_devices++;
1527 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1529 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1530 root->fs_info->fs_devices->rotating = 1;
1532 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1533 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1534 total_bytes + device->total_bytes);
1536 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1537 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1539 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1542 ret = init_first_rw_device(trans, root, device);
1544 ret = btrfs_finish_sprout(trans, root);
1547 ret = btrfs_add_device(trans, root, device);
1551 * we've got more storage, clear any full flags on the space
1554 btrfs_clear_space_info_full(root->fs_info);
1556 unlock_chunks(root);
1557 btrfs_commit_transaction(trans, root);
1560 mutex_unlock(&uuid_mutex);
1561 up_write(&sb->s_umount);
1563 ret = btrfs_relocate_sys_chunks(root);
1567 mutex_unlock(&root->fs_info->volume_mutex);
1570 close_bdev_exclusive(bdev, 0);
1572 mutex_unlock(&uuid_mutex);
1573 up_write(&sb->s_umount);
1578 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1579 struct btrfs_device *device)
1582 struct btrfs_path *path;
1583 struct btrfs_root *root;
1584 struct btrfs_dev_item *dev_item;
1585 struct extent_buffer *leaf;
1586 struct btrfs_key key;
1588 root = device->dev_root->fs_info->chunk_root;
1590 path = btrfs_alloc_path();
1594 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1595 key.type = BTRFS_DEV_ITEM_KEY;
1596 key.offset = device->devid;
1598 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1607 leaf = path->nodes[0];
1608 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1610 btrfs_set_device_id(leaf, dev_item, device->devid);
1611 btrfs_set_device_type(leaf, dev_item, device->type);
1612 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1613 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1614 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1615 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1616 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1617 btrfs_mark_buffer_dirty(leaf);
1620 btrfs_free_path(path);
1624 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1625 struct btrfs_device *device, u64 new_size)
1627 struct btrfs_super_block *super_copy =
1628 &device->dev_root->fs_info->super_copy;
1629 u64 old_total = btrfs_super_total_bytes(super_copy);
1630 u64 diff = new_size - device->total_bytes;
1632 if (!device->writeable)
1634 if (new_size <= device->total_bytes)
1637 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1638 device->fs_devices->total_rw_bytes += diff;
1640 device->total_bytes = new_size;
1641 device->disk_total_bytes = new_size;
1642 btrfs_clear_space_info_full(device->dev_root->fs_info);
1644 return btrfs_update_device(trans, device);
1647 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1648 struct btrfs_device *device, u64 new_size)
1651 lock_chunks(device->dev_root);
1652 ret = __btrfs_grow_device(trans, device, new_size);
1653 unlock_chunks(device->dev_root);
1657 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1658 struct btrfs_root *root,
1659 u64 chunk_tree, u64 chunk_objectid,
1663 struct btrfs_path *path;
1664 struct btrfs_key key;
1666 root = root->fs_info->chunk_root;
1667 path = btrfs_alloc_path();
1671 key.objectid = chunk_objectid;
1672 key.offset = chunk_offset;
1673 key.type = BTRFS_CHUNK_ITEM_KEY;
1675 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1678 ret = btrfs_del_item(trans, root, path);
1681 btrfs_free_path(path);
1685 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1688 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1689 struct btrfs_disk_key *disk_key;
1690 struct btrfs_chunk *chunk;
1697 struct btrfs_key key;
1699 array_size = btrfs_super_sys_array_size(super_copy);
1701 ptr = super_copy->sys_chunk_array;
1704 while (cur < array_size) {
1705 disk_key = (struct btrfs_disk_key *)ptr;
1706 btrfs_disk_key_to_cpu(&key, disk_key);
1708 len = sizeof(*disk_key);
1710 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1711 chunk = (struct btrfs_chunk *)(ptr + len);
1712 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1713 len += btrfs_chunk_item_size(num_stripes);
1718 if (key.objectid == chunk_objectid &&
1719 key.offset == chunk_offset) {
1720 memmove(ptr, ptr + len, array_size - (cur + len));
1722 btrfs_set_super_sys_array_size(super_copy, array_size);
1731 static int btrfs_relocate_chunk(struct btrfs_root *root,
1732 u64 chunk_tree, u64 chunk_objectid,
1735 struct extent_map_tree *em_tree;
1736 struct btrfs_root *extent_root;
1737 struct btrfs_trans_handle *trans;
1738 struct extent_map *em;
1739 struct map_lookup *map;
1743 root = root->fs_info->chunk_root;
1744 extent_root = root->fs_info->extent_root;
1745 em_tree = &root->fs_info->mapping_tree.map_tree;
1747 ret = btrfs_can_relocate(extent_root, chunk_offset);
1751 /* step one, relocate all the extents inside this chunk */
1752 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1755 trans = btrfs_start_transaction(root, 1);
1761 * step two, delete the device extents and the
1762 * chunk tree entries
1764 read_lock(&em_tree->lock);
1765 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1766 read_unlock(&em_tree->lock);
1768 BUG_ON(em->start > chunk_offset ||
1769 em->start + em->len < chunk_offset);
1770 map = (struct map_lookup *)em->bdev;
1772 for (i = 0; i < map->num_stripes; i++) {
1773 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1774 map->stripes[i].physical);
1777 if (map->stripes[i].dev) {
1778 ret = btrfs_update_device(trans, map->stripes[i].dev);
1782 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1787 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1788 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1792 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1795 write_lock(&em_tree->lock);
1796 remove_extent_mapping(em_tree, em);
1797 write_unlock(&em_tree->lock);
1802 /* once for the tree */
1803 free_extent_map(em);
1805 free_extent_map(em);
1807 unlock_chunks(root);
1808 btrfs_end_transaction(trans, root);
1812 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1814 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1815 struct btrfs_path *path;
1816 struct extent_buffer *leaf;
1817 struct btrfs_chunk *chunk;
1818 struct btrfs_key key;
1819 struct btrfs_key found_key;
1820 u64 chunk_tree = chunk_root->root_key.objectid;
1822 bool retried = false;
1826 path = btrfs_alloc_path();
1831 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1832 key.offset = (u64)-1;
1833 key.type = BTRFS_CHUNK_ITEM_KEY;
1836 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1841 ret = btrfs_previous_item(chunk_root, path, key.objectid,
1848 leaf = path->nodes[0];
1849 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1851 chunk = btrfs_item_ptr(leaf, path->slots[0],
1852 struct btrfs_chunk);
1853 chunk_type = btrfs_chunk_type(leaf, chunk);
1854 btrfs_release_path(chunk_root, path);
1856 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1857 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1866 if (found_key.offset == 0)
1868 key.offset = found_key.offset - 1;
1871 if (failed && !retried) {
1875 } else if (failed && retried) {
1880 btrfs_free_path(path);
1884 static u64 div_factor(u64 num, int factor)
1893 int btrfs_balance(struct btrfs_root *dev_root)
1896 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1897 struct btrfs_device *device;
1900 struct btrfs_path *path;
1901 struct btrfs_key key;
1902 struct btrfs_chunk *chunk;
1903 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1904 struct btrfs_trans_handle *trans;
1905 struct btrfs_key found_key;
1907 if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
1910 mutex_lock(&dev_root->fs_info->volume_mutex);
1911 dev_root = dev_root->fs_info->dev_root;
1913 /* step one make some room on all the devices */
1914 list_for_each_entry(device, devices, dev_list) {
1915 old_size = device->total_bytes;
1916 size_to_free = div_factor(old_size, 1);
1917 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1918 if (!device->writeable ||
1919 device->total_bytes - device->bytes_used > size_to_free)
1922 ret = btrfs_shrink_device(device, old_size - size_to_free);
1927 trans = btrfs_start_transaction(dev_root, 1);
1930 ret = btrfs_grow_device(trans, device, old_size);
1933 btrfs_end_transaction(trans, dev_root);
1936 /* step two, relocate all the chunks */
1937 path = btrfs_alloc_path();
1940 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1941 key.offset = (u64)-1;
1942 key.type = BTRFS_CHUNK_ITEM_KEY;
1945 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1950 * this shouldn't happen, it means the last relocate
1956 ret = btrfs_previous_item(chunk_root, path, 0,
1957 BTRFS_CHUNK_ITEM_KEY);
1961 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1963 if (found_key.objectid != key.objectid)
1966 chunk = btrfs_item_ptr(path->nodes[0],
1968 struct btrfs_chunk);
1969 /* chunk zero is special */
1970 if (found_key.offset == 0)
1973 btrfs_release_path(chunk_root, path);
1974 ret = btrfs_relocate_chunk(chunk_root,
1975 chunk_root->root_key.objectid,
1978 BUG_ON(ret && ret != -ENOSPC);
1979 key.offset = found_key.offset - 1;
1983 btrfs_free_path(path);
1984 mutex_unlock(&dev_root->fs_info->volume_mutex);
1989 * shrinking a device means finding all of the device extents past
1990 * the new size, and then following the back refs to the chunks.
1991 * The chunk relocation code actually frees the device extent
1993 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1995 struct btrfs_trans_handle *trans;
1996 struct btrfs_root *root = device->dev_root;
1997 struct btrfs_dev_extent *dev_extent = NULL;
1998 struct btrfs_path *path;
2006 bool retried = false;
2007 struct extent_buffer *l;
2008 struct btrfs_key key;
2009 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2010 u64 old_total = btrfs_super_total_bytes(super_copy);
2011 u64 old_size = device->total_bytes;
2012 u64 diff = device->total_bytes - new_size;
2014 if (new_size >= device->total_bytes)
2017 path = btrfs_alloc_path();
2025 device->total_bytes = new_size;
2026 if (device->writeable)
2027 device->fs_devices->total_rw_bytes -= diff;
2028 unlock_chunks(root);
2031 key.objectid = device->devid;
2032 key.offset = (u64)-1;
2033 key.type = BTRFS_DEV_EXTENT_KEY;
2036 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2040 ret = btrfs_previous_item(root, path, 0, key.type);
2045 btrfs_release_path(root, path);
2050 slot = path->slots[0];
2051 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2053 if (key.objectid != device->devid) {
2054 btrfs_release_path(root, path);
2058 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2059 length = btrfs_dev_extent_length(l, dev_extent);
2061 if (key.offset + length <= new_size) {
2062 btrfs_release_path(root, path);
2066 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2067 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2068 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2069 btrfs_release_path(root, path);
2071 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2073 if (ret && ret != -ENOSPC)
2080 if (failed && !retried) {
2084 } else if (failed && retried) {
2088 device->total_bytes = old_size;
2089 if (device->writeable)
2090 device->fs_devices->total_rw_bytes += diff;
2091 unlock_chunks(root);
2095 /* Shrinking succeeded, else we would be at "done". */
2096 trans = btrfs_start_transaction(root, 1);
2103 device->disk_total_bytes = new_size;
2104 /* Now btrfs_update_device() will change the on-disk size. */
2105 ret = btrfs_update_device(trans, device);
2107 unlock_chunks(root);
2108 btrfs_end_transaction(trans, root);
2111 WARN_ON(diff > old_total);
2112 btrfs_set_super_total_bytes(super_copy, old_total - diff);
2113 unlock_chunks(root);
2114 btrfs_end_transaction(trans, root);
2116 btrfs_free_path(path);
2120 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2121 struct btrfs_root *root,
2122 struct btrfs_key *key,
2123 struct btrfs_chunk *chunk, int item_size)
2125 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2126 struct btrfs_disk_key disk_key;
2130 array_size = btrfs_super_sys_array_size(super_copy);
2131 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
2134 ptr = super_copy->sys_chunk_array + array_size;
2135 btrfs_cpu_key_to_disk(&disk_key, key);
2136 memcpy(ptr, &disk_key, sizeof(disk_key));
2137 ptr += sizeof(disk_key);
2138 memcpy(ptr, chunk, item_size);
2139 item_size += sizeof(disk_key);
2140 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
2144 static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
2145 int num_stripes, int sub_stripes)
2147 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
2149 else if (type & BTRFS_BLOCK_GROUP_RAID10)
2150 return calc_size * (num_stripes / sub_stripes);
2152 return calc_size * num_stripes;
2155 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2156 struct btrfs_root *extent_root,
2157 struct map_lookup **map_ret,
2158 u64 *num_bytes, u64 *stripe_size,
2159 u64 start, u64 type)
2161 struct btrfs_fs_info *info = extent_root->fs_info;
2162 struct btrfs_device *device = NULL;
2163 struct btrfs_fs_devices *fs_devices = info->fs_devices;
2164 struct list_head *cur;
2165 struct map_lookup *map = NULL;
2166 struct extent_map_tree *em_tree;
2167 struct extent_map *em;
2168 struct list_head private_devs;
2169 int min_stripe_size = 1 * 1024 * 1024;
2170 u64 calc_size = 1024 * 1024 * 1024;
2171 u64 max_chunk_size = calc_size;
2176 int num_stripes = 1;
2177 int min_stripes = 1;
2178 int sub_stripes = 0;
2182 int stripe_len = 64 * 1024;
2184 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2185 (type & BTRFS_BLOCK_GROUP_DUP)) {
2187 type &= ~BTRFS_BLOCK_GROUP_DUP;
2189 if (list_empty(&fs_devices->alloc_list))
2192 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2193 num_stripes = fs_devices->rw_devices;
2196 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2200 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2201 num_stripes = min_t(u64, 2, fs_devices->rw_devices);
2202 if (num_stripes < 2)
2206 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2207 num_stripes = fs_devices->rw_devices;
2208 if (num_stripes < 4)
2210 num_stripes &= ~(u32)1;
2215 if (type & BTRFS_BLOCK_GROUP_DATA) {
2216 max_chunk_size = 10 * calc_size;
2217 min_stripe_size = 64 * 1024 * 1024;
2218 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2219 max_chunk_size = 256 * 1024 * 1024;
2220 min_stripe_size = 32 * 1024 * 1024;
2221 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2222 calc_size = 8 * 1024 * 1024;
2223 max_chunk_size = calc_size * 2;
2224 min_stripe_size = 1 * 1024 * 1024;
2227 /* we don't want a chunk larger than 10% of writeable space */
2228 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2233 if (!map || map->num_stripes != num_stripes) {
2235 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2238 map->num_stripes = num_stripes;
2241 if (calc_size * num_stripes > max_chunk_size) {
2242 calc_size = max_chunk_size;
2243 do_div(calc_size, num_stripes);
2244 do_div(calc_size, stripe_len);
2245 calc_size *= stripe_len;
2247 /* we don't want tiny stripes */
2248 calc_size = max_t(u64, min_stripe_size, calc_size);
2250 do_div(calc_size, stripe_len);
2251 calc_size *= stripe_len;
2253 cur = fs_devices->alloc_list.next;
2256 if (type & BTRFS_BLOCK_GROUP_DUP)
2257 min_free = calc_size * 2;
2259 min_free = calc_size;
2262 * we add 1MB because we never use the first 1MB of the device, unless
2263 * we've looped, then we are likely allocating the maximum amount of
2264 * space left already
2267 min_free += 1024 * 1024;
2269 INIT_LIST_HEAD(&private_devs);
2270 while (index < num_stripes) {
2271 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2272 BUG_ON(!device->writeable);
2273 if (device->total_bytes > device->bytes_used)
2274 avail = device->total_bytes - device->bytes_used;
2279 if (device->in_fs_metadata && avail >= min_free) {
2280 ret = find_free_dev_extent(trans, device,
2281 min_free, &dev_offset,
2284 list_move_tail(&device->dev_alloc_list,
2286 map->stripes[index].dev = device;
2287 map->stripes[index].physical = dev_offset;
2289 if (type & BTRFS_BLOCK_GROUP_DUP) {
2290 map->stripes[index].dev = device;
2291 map->stripes[index].physical =
2292 dev_offset + calc_size;
2296 } else if (device->in_fs_metadata && avail > max_avail)
2298 if (cur == &fs_devices->alloc_list)
2301 list_splice(&private_devs, &fs_devices->alloc_list);
2302 if (index < num_stripes) {
2303 if (index >= min_stripes) {
2304 num_stripes = index;
2305 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2306 num_stripes /= sub_stripes;
2307 num_stripes *= sub_stripes;
2312 if (!looped && max_avail > 0) {
2314 calc_size = max_avail;
2320 map->sector_size = extent_root->sectorsize;
2321 map->stripe_len = stripe_len;
2322 map->io_align = stripe_len;
2323 map->io_width = stripe_len;
2325 map->num_stripes = num_stripes;
2326 map->sub_stripes = sub_stripes;
2329 *stripe_size = calc_size;
2330 *num_bytes = chunk_bytes_by_type(type, calc_size,
2331 num_stripes, sub_stripes);
2333 em = alloc_extent_map(GFP_NOFS);
2338 em->bdev = (struct block_device *)map;
2340 em->len = *num_bytes;
2341 em->block_start = 0;
2342 em->block_len = em->len;
2344 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2345 write_lock(&em_tree->lock);
2346 ret = add_extent_mapping(em_tree, em);
2347 write_unlock(&em_tree->lock);
2349 free_extent_map(em);
2351 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2352 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2357 while (index < map->num_stripes) {
2358 device = map->stripes[index].dev;
2359 dev_offset = map->stripes[index].physical;
2361 ret = btrfs_alloc_dev_extent(trans, device,
2362 info->chunk_root->root_key.objectid,
2363 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2364 start, dev_offset, calc_size);
2372 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2373 struct btrfs_root *extent_root,
2374 struct map_lookup *map, u64 chunk_offset,
2375 u64 chunk_size, u64 stripe_size)
2378 struct btrfs_key key;
2379 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2380 struct btrfs_device *device;
2381 struct btrfs_chunk *chunk;
2382 struct btrfs_stripe *stripe;
2383 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2387 chunk = kzalloc(item_size, GFP_NOFS);
2392 while (index < map->num_stripes) {
2393 device = map->stripes[index].dev;
2394 device->bytes_used += stripe_size;
2395 ret = btrfs_update_device(trans, device);
2401 stripe = &chunk->stripe;
2402 while (index < map->num_stripes) {
2403 device = map->stripes[index].dev;
2404 dev_offset = map->stripes[index].physical;
2406 btrfs_set_stack_stripe_devid(stripe, device->devid);
2407 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2408 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2413 btrfs_set_stack_chunk_length(chunk, chunk_size);
2414 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2415 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2416 btrfs_set_stack_chunk_type(chunk, map->type);
2417 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2418 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2419 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2420 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2421 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2423 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2424 key.type = BTRFS_CHUNK_ITEM_KEY;
2425 key.offset = chunk_offset;
2427 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2430 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2431 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2440 * Chunk allocation falls into two parts. The first part does works
2441 * that make the new allocated chunk useable, but not do any operation
2442 * that modifies the chunk tree. The second part does the works that
2443 * require modifying the chunk tree. This division is important for the
2444 * bootstrap process of adding storage to a seed btrfs.
2446 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2447 struct btrfs_root *extent_root, u64 type)
2452 struct map_lookup *map;
2453 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2456 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2461 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2462 &stripe_size, chunk_offset, type);
2466 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2467 chunk_size, stripe_size);
2472 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2473 struct btrfs_root *root,
2474 struct btrfs_device *device)
2477 u64 sys_chunk_offset;
2481 u64 sys_stripe_size;
2483 struct map_lookup *map;
2484 struct map_lookup *sys_map;
2485 struct btrfs_fs_info *fs_info = root->fs_info;
2486 struct btrfs_root *extent_root = fs_info->extent_root;
2489 ret = find_next_chunk(fs_info->chunk_root,
2490 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2493 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2494 (fs_info->metadata_alloc_profile &
2495 fs_info->avail_metadata_alloc_bits);
2496 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2498 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2499 &stripe_size, chunk_offset, alloc_profile);
2502 sys_chunk_offset = chunk_offset + chunk_size;
2504 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2505 (fs_info->system_alloc_profile &
2506 fs_info->avail_system_alloc_bits);
2507 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2509 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2510 &sys_chunk_size, &sys_stripe_size,
2511 sys_chunk_offset, alloc_profile);
2514 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2518 * Modifying chunk tree needs allocating new blocks from both
2519 * system block group and metadata block group. So we only can
2520 * do operations require modifying the chunk tree after both
2521 * block groups were created.
2523 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2524 chunk_size, stripe_size);
2527 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2528 sys_chunk_offset, sys_chunk_size,
2534 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2536 struct extent_map *em;
2537 struct map_lookup *map;
2538 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2542 read_lock(&map_tree->map_tree.lock);
2543 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2544 read_unlock(&map_tree->map_tree.lock);
2548 if (btrfs_test_opt(root, DEGRADED)) {
2549 free_extent_map(em);
2553 map = (struct map_lookup *)em->bdev;
2554 for (i = 0; i < map->num_stripes; i++) {
2555 if (!map->stripes[i].dev->writeable) {
2560 free_extent_map(em);
2564 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2566 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
2569 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2571 struct extent_map *em;
2574 write_lock(&tree->map_tree.lock);
2575 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2577 remove_extent_mapping(&tree->map_tree, em);
2578 write_unlock(&tree->map_tree.lock);
2583 free_extent_map(em);
2584 /* once for the tree */
2585 free_extent_map(em);
2589 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2591 struct extent_map *em;
2592 struct map_lookup *map;
2593 struct extent_map_tree *em_tree = &map_tree->map_tree;
2596 read_lock(&em_tree->lock);
2597 em = lookup_extent_mapping(em_tree, logical, len);
2598 read_unlock(&em_tree->lock);
2601 BUG_ON(em->start > logical || em->start + em->len < logical);
2602 map = (struct map_lookup *)em->bdev;
2603 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2604 ret = map->num_stripes;
2605 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2606 ret = map->sub_stripes;
2609 free_extent_map(em);
2613 static int find_live_mirror(struct map_lookup *map, int first, int num,
2617 if (map->stripes[optimal].dev->bdev)
2619 for (i = first; i < first + num; i++) {
2620 if (map->stripes[i].dev->bdev)
2623 /* we couldn't find one that doesn't fail. Just return something
2624 * and the io error handling code will clean up eventually
2629 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2630 u64 logical, u64 *length,
2631 struct btrfs_multi_bio **multi_ret,
2632 int mirror_num, struct page *unplug_page)
2634 struct extent_map *em;
2635 struct map_lookup *map;
2636 struct extent_map_tree *em_tree = &map_tree->map_tree;
2640 int stripes_allocated = 8;
2641 int stripes_required = 1;
2646 struct btrfs_multi_bio *multi = NULL;
2648 if (multi_ret && !(rw & (1 << BIO_RW)))
2649 stripes_allocated = 1;
2652 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2657 atomic_set(&multi->error, 0);
2660 read_lock(&em_tree->lock);
2661 em = lookup_extent_mapping(em_tree, logical, *length);
2662 read_unlock(&em_tree->lock);
2664 if (!em && unplug_page) {
2670 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2671 (unsigned long long)logical,
2672 (unsigned long long)*length);
2676 BUG_ON(em->start > logical || em->start + em->len < logical);
2677 map = (struct map_lookup *)em->bdev;
2678 offset = logical - em->start;
2680 if (mirror_num > map->num_stripes)
2683 /* if our multi bio struct is too small, back off and try again */
2684 if (rw & (1 << BIO_RW)) {
2685 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2686 BTRFS_BLOCK_GROUP_DUP)) {
2687 stripes_required = map->num_stripes;
2689 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2690 stripes_required = map->sub_stripes;
2694 if (multi_ret && (rw & (1 << BIO_RW)) &&
2695 stripes_allocated < stripes_required) {
2696 stripes_allocated = map->num_stripes;
2697 free_extent_map(em);
2703 * stripe_nr counts the total number of stripes we have to stride
2704 * to get to this block
2706 do_div(stripe_nr, map->stripe_len);
2708 stripe_offset = stripe_nr * map->stripe_len;
2709 BUG_ON(offset < stripe_offset);
2711 /* stripe_offset is the offset of this block in its stripe*/
2712 stripe_offset = offset - stripe_offset;
2714 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2715 BTRFS_BLOCK_GROUP_RAID10 |
2716 BTRFS_BLOCK_GROUP_DUP)) {
2717 /* we limit the length of each bio to what fits in a stripe */
2718 *length = min_t(u64, em->len - offset,
2719 map->stripe_len - stripe_offset);
2721 *length = em->len - offset;
2724 if (!multi_ret && !unplug_page)
2729 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2730 if (unplug_page || (rw & (1 << BIO_RW)))
2731 num_stripes = map->num_stripes;
2732 else if (mirror_num)
2733 stripe_index = mirror_num - 1;
2735 stripe_index = find_live_mirror(map, 0,
2737 current->pid % map->num_stripes);
2740 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2741 if (rw & (1 << BIO_RW))
2742 num_stripes = map->num_stripes;
2743 else if (mirror_num)
2744 stripe_index = mirror_num - 1;
2746 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2747 int factor = map->num_stripes / map->sub_stripes;
2749 stripe_index = do_div(stripe_nr, factor);
2750 stripe_index *= map->sub_stripes;
2752 if (unplug_page || (rw & (1 << BIO_RW)))
2753 num_stripes = map->sub_stripes;
2754 else if (mirror_num)
2755 stripe_index += mirror_num - 1;
2757 stripe_index = find_live_mirror(map, stripe_index,
2758 map->sub_stripes, stripe_index +
2759 current->pid % map->sub_stripes);
2763 * after this do_div call, stripe_nr is the number of stripes
2764 * on this device we have to walk to find the data, and
2765 * stripe_index is the number of our device in the stripe array
2767 stripe_index = do_div(stripe_nr, map->num_stripes);
2769 BUG_ON(stripe_index >= map->num_stripes);
2771 for (i = 0; i < num_stripes; i++) {
2773 struct btrfs_device *device;
2774 struct backing_dev_info *bdi;
2776 device = map->stripes[stripe_index].dev;
2778 bdi = blk_get_backing_dev_info(device->bdev);
2779 if (bdi->unplug_io_fn)
2780 bdi->unplug_io_fn(bdi, unplug_page);
2783 multi->stripes[i].physical =
2784 map->stripes[stripe_index].physical +
2785 stripe_offset + stripe_nr * map->stripe_len;
2786 multi->stripes[i].dev = map->stripes[stripe_index].dev;
2792 multi->num_stripes = num_stripes;
2793 multi->max_errors = max_errors;
2796 free_extent_map(em);
2800 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2801 u64 logical, u64 *length,
2802 struct btrfs_multi_bio **multi_ret, int mirror_num)
2804 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2808 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2809 u64 chunk_start, u64 physical, u64 devid,
2810 u64 **logical, int *naddrs, int *stripe_len)
2812 struct extent_map_tree *em_tree = &map_tree->map_tree;
2813 struct extent_map *em;
2814 struct map_lookup *map;
2821 read_lock(&em_tree->lock);
2822 em = lookup_extent_mapping(em_tree, chunk_start, 1);
2823 read_unlock(&em_tree->lock);
2825 BUG_ON(!em || em->start != chunk_start);
2826 map = (struct map_lookup *)em->bdev;
2829 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2830 do_div(length, map->num_stripes / map->sub_stripes);
2831 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
2832 do_div(length, map->num_stripes);
2834 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
2837 for (i = 0; i < map->num_stripes; i++) {
2838 if (devid && map->stripes[i].dev->devid != devid)
2840 if (map->stripes[i].physical > physical ||
2841 map->stripes[i].physical + length <= physical)
2844 stripe_nr = physical - map->stripes[i].physical;
2845 do_div(stripe_nr, map->stripe_len);
2847 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2848 stripe_nr = stripe_nr * map->num_stripes + i;
2849 do_div(stripe_nr, map->sub_stripes);
2850 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2851 stripe_nr = stripe_nr * map->num_stripes + i;
2853 bytenr = chunk_start + stripe_nr * map->stripe_len;
2854 WARN_ON(nr >= map->num_stripes);
2855 for (j = 0; j < nr; j++) {
2856 if (buf[j] == bytenr)
2860 WARN_ON(nr >= map->num_stripes);
2867 *stripe_len = map->stripe_len;
2869 free_extent_map(em);
2873 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2874 u64 logical, struct page *page)
2876 u64 length = PAGE_CACHE_SIZE;
2877 return __btrfs_map_block(map_tree, READ, logical, &length,
2881 static void end_bio_multi_stripe(struct bio *bio, int err)
2883 struct btrfs_multi_bio *multi = bio->bi_private;
2884 int is_orig_bio = 0;
2887 atomic_inc(&multi->error);
2889 if (bio == multi->orig_bio)
2892 if (atomic_dec_and_test(&multi->stripes_pending)) {
2895 bio = multi->orig_bio;
2897 bio->bi_private = multi->private;
2898 bio->bi_end_io = multi->end_io;
2899 /* only send an error to the higher layers if it is
2900 * beyond the tolerance of the multi-bio
2902 if (atomic_read(&multi->error) > multi->max_errors) {
2906 * this bio is actually up to date, we didn't
2907 * go over the max number of errors
2909 set_bit(BIO_UPTODATE, &bio->bi_flags);
2914 bio_endio(bio, err);
2915 } else if (!is_orig_bio) {
2920 struct async_sched {
2923 struct btrfs_fs_info *info;
2924 struct btrfs_work work;
2928 * see run_scheduled_bios for a description of why bios are collected for
2931 * This will add one bio to the pending list for a device and make sure
2932 * the work struct is scheduled.
2934 static noinline int schedule_bio(struct btrfs_root *root,
2935 struct btrfs_device *device,
2936 int rw, struct bio *bio)
2938 int should_queue = 1;
2939 struct btrfs_pending_bios *pending_bios;
2941 /* don't bother with additional async steps for reads, right now */
2942 if (!(rw & (1 << BIO_RW))) {
2944 submit_bio(rw, bio);
2950 * nr_async_bios allows us to reliably return congestion to the
2951 * higher layers. Otherwise, the async bio makes it appear we have
2952 * made progress against dirty pages when we've really just put it
2953 * on a queue for later
2955 atomic_inc(&root->fs_info->nr_async_bios);
2956 WARN_ON(bio->bi_next);
2957 bio->bi_next = NULL;
2960 spin_lock(&device->io_lock);
2961 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
2962 pending_bios = &device->pending_sync_bios;
2964 pending_bios = &device->pending_bios;
2966 if (pending_bios->tail)
2967 pending_bios->tail->bi_next = bio;
2969 pending_bios->tail = bio;
2970 if (!pending_bios->head)
2971 pending_bios->head = bio;
2972 if (device->running_pending)
2975 spin_unlock(&device->io_lock);
2978 btrfs_queue_worker(&root->fs_info->submit_workers,
2983 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2984 int mirror_num, int async_submit)
2986 struct btrfs_mapping_tree *map_tree;
2987 struct btrfs_device *dev;
2988 struct bio *first_bio = bio;
2989 u64 logical = (u64)bio->bi_sector << 9;
2992 struct btrfs_multi_bio *multi = NULL;
2997 length = bio->bi_size;
2998 map_tree = &root->fs_info->mapping_tree;
2999 map_length = length;
3001 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
3005 total_devs = multi->num_stripes;
3006 if (map_length < length) {
3007 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
3008 "len %llu\n", (unsigned long long)logical,
3009 (unsigned long long)length,
3010 (unsigned long long)map_length);
3013 multi->end_io = first_bio->bi_end_io;
3014 multi->private = first_bio->bi_private;
3015 multi->orig_bio = first_bio;
3016 atomic_set(&multi->stripes_pending, multi->num_stripes);
3018 while (dev_nr < total_devs) {
3019 if (total_devs > 1) {
3020 if (dev_nr < total_devs - 1) {
3021 bio = bio_clone(first_bio, GFP_NOFS);
3026 bio->bi_private = multi;
3027 bio->bi_end_io = end_bio_multi_stripe;
3029 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
3030 dev = multi->stripes[dev_nr].dev;
3031 BUG_ON(rw == WRITE && !dev->writeable);
3032 if (dev && dev->bdev) {
3033 bio->bi_bdev = dev->bdev;
3035 schedule_bio(root, dev, rw, bio);
3037 submit_bio(rw, bio);
3039 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
3040 bio->bi_sector = logical >> 9;
3041 bio_endio(bio, -EIO);
3045 if (total_devs == 1)
3050 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
3053 struct btrfs_device *device;
3054 struct btrfs_fs_devices *cur_devices;
3056 cur_devices = root->fs_info->fs_devices;
3057 while (cur_devices) {
3059 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3060 device = __find_device(&cur_devices->devices,
3065 cur_devices = cur_devices->seed;
3070 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3071 u64 devid, u8 *dev_uuid)
3073 struct btrfs_device *device;
3074 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
3076 device = kzalloc(sizeof(*device), GFP_NOFS);
3079 list_add(&device->dev_list,
3080 &fs_devices->devices);
3081 device->barriers = 1;
3082 device->dev_root = root->fs_info->dev_root;
3083 device->devid = devid;
3084 device->work.func = pending_bios_fn;
3085 device->fs_devices = fs_devices;
3086 fs_devices->num_devices++;
3087 spin_lock_init(&device->io_lock);
3088 INIT_LIST_HEAD(&device->dev_alloc_list);
3089 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
3093 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3094 struct extent_buffer *leaf,
3095 struct btrfs_chunk *chunk)
3097 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3098 struct map_lookup *map;
3099 struct extent_map *em;
3103 u8 uuid[BTRFS_UUID_SIZE];
3108 logical = key->offset;
3109 length = btrfs_chunk_length(leaf, chunk);
3111 read_lock(&map_tree->map_tree.lock);
3112 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3113 read_unlock(&map_tree->map_tree.lock);
3115 /* already mapped? */
3116 if (em && em->start <= logical && em->start + em->len > logical) {
3117 free_extent_map(em);
3120 free_extent_map(em);
3123 em = alloc_extent_map(GFP_NOFS);
3126 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3127 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3129 free_extent_map(em);
3133 em->bdev = (struct block_device *)map;
3134 em->start = logical;
3136 em->block_start = 0;
3137 em->block_len = em->len;
3139 map->num_stripes = num_stripes;
3140 map->io_width = btrfs_chunk_io_width(leaf, chunk);
3141 map->io_align = btrfs_chunk_io_align(leaf, chunk);
3142 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
3143 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3144 map->type = btrfs_chunk_type(leaf, chunk);
3145 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
3146 for (i = 0; i < num_stripes; i++) {
3147 map->stripes[i].physical =
3148 btrfs_stripe_offset_nr(leaf, chunk, i);
3149 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
3150 read_extent_buffer(leaf, uuid, (unsigned long)
3151 btrfs_stripe_dev_uuid_nr(chunk, i),
3153 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
3155 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
3157 free_extent_map(em);
3160 if (!map->stripes[i].dev) {
3161 map->stripes[i].dev =
3162 add_missing_dev(root, devid, uuid);
3163 if (!map->stripes[i].dev) {
3165 free_extent_map(em);
3169 map->stripes[i].dev->in_fs_metadata = 1;
3172 write_lock(&map_tree->map_tree.lock);
3173 ret = add_extent_mapping(&map_tree->map_tree, em);
3174 write_unlock(&map_tree->map_tree.lock);
3176 free_extent_map(em);
3181 static int fill_device_from_item(struct extent_buffer *leaf,
3182 struct btrfs_dev_item *dev_item,
3183 struct btrfs_device *device)
3187 device->devid = btrfs_device_id(leaf, dev_item);
3188 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
3189 device->total_bytes = device->disk_total_bytes;
3190 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
3191 device->type = btrfs_device_type(leaf, dev_item);
3192 device->io_align = btrfs_device_io_align(leaf, dev_item);
3193 device->io_width = btrfs_device_io_width(leaf, dev_item);
3194 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3196 ptr = (unsigned long)btrfs_device_uuid(dev_item);
3197 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3202 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3204 struct btrfs_fs_devices *fs_devices;
3207 mutex_lock(&uuid_mutex);
3209 fs_devices = root->fs_info->fs_devices->seed;
3210 while (fs_devices) {
3211 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3215 fs_devices = fs_devices->seed;
3218 fs_devices = find_fsid(fsid);
3224 fs_devices = clone_fs_devices(fs_devices);
3225 if (IS_ERR(fs_devices)) {
3226 ret = PTR_ERR(fs_devices);
3230 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3231 root->fs_info->bdev_holder);
3235 if (!fs_devices->seeding) {
3236 __btrfs_close_devices(fs_devices);
3237 free_fs_devices(fs_devices);
3242 fs_devices->seed = root->fs_info->fs_devices->seed;
3243 root->fs_info->fs_devices->seed = fs_devices;
3245 mutex_unlock(&uuid_mutex);
3249 static int read_one_dev(struct btrfs_root *root,
3250 struct extent_buffer *leaf,
3251 struct btrfs_dev_item *dev_item)
3253 struct btrfs_device *device;
3256 u8 fs_uuid[BTRFS_UUID_SIZE];
3257 u8 dev_uuid[BTRFS_UUID_SIZE];
3259 devid = btrfs_device_id(leaf, dev_item);
3260 read_extent_buffer(leaf, dev_uuid,
3261 (unsigned long)btrfs_device_uuid(dev_item),
3263 read_extent_buffer(leaf, fs_uuid,
3264 (unsigned long)btrfs_device_fsid(dev_item),
3267 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3268 ret = open_seed_devices(root, fs_uuid);
3269 if (ret && !btrfs_test_opt(root, DEGRADED))
3273 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3274 if (!device || !device->bdev) {
3275 if (!btrfs_test_opt(root, DEGRADED))
3279 printk(KERN_WARNING "warning devid %llu missing\n",
3280 (unsigned long long)devid);
3281 device = add_missing_dev(root, devid, dev_uuid);
3287 if (device->fs_devices != root->fs_info->fs_devices) {
3288 BUG_ON(device->writeable);
3289 if (device->generation !=
3290 btrfs_device_generation(leaf, dev_item))
3294 fill_device_from_item(leaf, dev_item, device);
3295 device->dev_root = root->fs_info->dev_root;
3296 device->in_fs_metadata = 1;
3297 if (device->writeable)
3298 device->fs_devices->total_rw_bytes += device->total_bytes;
3303 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3305 struct btrfs_dev_item *dev_item;
3307 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3309 return read_one_dev(root, buf, dev_item);
3312 int btrfs_read_sys_array(struct btrfs_root *root)
3314 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3315 struct extent_buffer *sb;
3316 struct btrfs_disk_key *disk_key;
3317 struct btrfs_chunk *chunk;
3319 unsigned long sb_ptr;
3325 struct btrfs_key key;
3327 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3328 BTRFS_SUPER_INFO_SIZE);
3331 btrfs_set_buffer_uptodate(sb);
3332 btrfs_set_buffer_lockdep_class(sb, 0);
3334 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3335 array_size = btrfs_super_sys_array_size(super_copy);
3337 ptr = super_copy->sys_chunk_array;
3338 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3341 while (cur < array_size) {
3342 disk_key = (struct btrfs_disk_key *)ptr;
3343 btrfs_disk_key_to_cpu(&key, disk_key);
3345 len = sizeof(*disk_key); ptr += len;
3349 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3350 chunk = (struct btrfs_chunk *)sb_ptr;
3351 ret = read_one_chunk(root, &key, sb, chunk);
3354 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3355 len = btrfs_chunk_item_size(num_stripes);
3364 free_extent_buffer(sb);
3368 int btrfs_read_chunk_tree(struct btrfs_root *root)
3370 struct btrfs_path *path;
3371 struct extent_buffer *leaf;
3372 struct btrfs_key key;
3373 struct btrfs_key found_key;
3377 root = root->fs_info->chunk_root;
3379 path = btrfs_alloc_path();
3383 /* first we search for all of the device items, and then we
3384 * read in all of the chunk items. This way we can create chunk
3385 * mappings that reference all of the devices that are afound
3387 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3391 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3393 leaf = path->nodes[0];
3394 slot = path->slots[0];
3395 if (slot >= btrfs_header_nritems(leaf)) {
3396 ret = btrfs_next_leaf(root, path);
3403 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3404 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3405 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3407 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3408 struct btrfs_dev_item *dev_item;
3409 dev_item = btrfs_item_ptr(leaf, slot,
3410 struct btrfs_dev_item);
3411 ret = read_one_dev(root, leaf, dev_item);
3415 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3416 struct btrfs_chunk *chunk;
3417 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3418 ret = read_one_chunk(root, &found_key, leaf, chunk);
3424 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3426 btrfs_release_path(root, path);
3431 btrfs_free_path(path);