2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <asm/div64.h>
29 #include "extent_map.h"
31 #include "transaction.h"
32 #include "print-tree.h"
34 #include "async-thread.h"
36 static int init_first_rw_device(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct btrfs_device *device);
39 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
41 static DEFINE_MUTEX(uuid_mutex);
42 static LIST_HEAD(fs_uuids);
44 static void lock_chunks(struct btrfs_root *root)
46 mutex_lock(&root->fs_info->chunk_mutex);
49 static void unlock_chunks(struct btrfs_root *root)
51 mutex_unlock(&root->fs_info->chunk_mutex);
54 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
56 struct btrfs_device *device;
57 WARN_ON(fs_devices->opened);
58 while (!list_empty(&fs_devices->devices)) {
59 device = list_entry(fs_devices->devices.next,
60 struct btrfs_device, dev_list);
61 list_del(&device->dev_list);
68 int btrfs_cleanup_fs_uuids(void)
70 struct btrfs_fs_devices *fs_devices;
72 while (!list_empty(&fs_uuids)) {
73 fs_devices = list_entry(fs_uuids.next,
74 struct btrfs_fs_devices, list);
75 list_del(&fs_devices->list);
76 free_fs_devices(fs_devices);
81 static noinline struct btrfs_device *__find_device(struct list_head *head,
84 struct btrfs_device *dev;
86 list_for_each_entry(dev, head, dev_list) {
87 if (dev->devid == devid &&
88 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
95 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
97 struct btrfs_fs_devices *fs_devices;
99 list_for_each_entry(fs_devices, &fs_uuids, list) {
100 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
106 static void requeue_list(struct btrfs_pending_bios *pending_bios,
107 struct bio *head, struct bio *tail)
110 struct bio *old_head;
112 old_head = pending_bios->head;
113 pending_bios->head = head;
114 if (pending_bios->tail)
115 tail->bi_next = old_head;
117 pending_bios->tail = tail;
121 * we try to collect pending bios for a device so we don't get a large
122 * number of procs sending bios down to the same device. This greatly
123 * improves the schedulers ability to collect and merge the bios.
125 * But, it also turns into a long list of bios to process and that is sure
126 * to eventually make the worker thread block. The solution here is to
127 * make some progress and then put this work struct back at the end of
128 * the list if the block device is congested. This way, multiple devices
129 * can make progress from a single worker thread.
131 static noinline int run_scheduled_bios(struct btrfs_device *device)
134 struct backing_dev_info *bdi;
135 struct btrfs_fs_info *fs_info;
136 struct btrfs_pending_bios *pending_bios;
140 unsigned long num_run;
141 unsigned long batch_run = 0;
143 unsigned long last_waited = 0;
145 int sync_pending = 0;
146 struct blk_plug plug;
149 * this function runs all the bios we've collected for
150 * a particular device. We don't want to wander off to
151 * another device without first sending all of these down.
152 * So, setup a plug here and finish it off before we return
154 blk_start_plug(&plug);
156 bdi = blk_get_backing_dev_info(device->bdev);
157 fs_info = device->dev_root->fs_info;
158 limit = btrfs_async_submit_limit(fs_info);
159 limit = limit * 2 / 3;
162 spin_lock(&device->io_lock);
167 /* take all the bios off the list at once and process them
168 * later on (without the lock held). But, remember the
169 * tail and other pointers so the bios can be properly reinserted
170 * into the list if we hit congestion
172 if (!force_reg && device->pending_sync_bios.head) {
173 pending_bios = &device->pending_sync_bios;
176 pending_bios = &device->pending_bios;
180 pending = pending_bios->head;
181 tail = pending_bios->tail;
182 WARN_ON(pending && !tail);
185 * if pending was null this time around, no bios need processing
186 * at all and we can stop. Otherwise it'll loop back up again
187 * and do an additional check so no bios are missed.
189 * device->running_pending is used to synchronize with the
192 if (device->pending_sync_bios.head == NULL &&
193 device->pending_bios.head == NULL) {
195 device->running_pending = 0;
198 device->running_pending = 1;
201 pending_bios->head = NULL;
202 pending_bios->tail = NULL;
204 spin_unlock(&device->io_lock);
209 /* we want to work on both lists, but do more bios on the
210 * sync list than the regular list
213 pending_bios != &device->pending_sync_bios &&
214 device->pending_sync_bios.head) ||
215 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
216 device->pending_bios.head)) {
217 spin_lock(&device->io_lock);
218 requeue_list(pending_bios, pending, tail);
223 pending = pending->bi_next;
225 atomic_dec(&fs_info->nr_async_bios);
227 if (atomic_read(&fs_info->nr_async_bios) < limit &&
228 waitqueue_active(&fs_info->async_submit_wait))
229 wake_up(&fs_info->async_submit_wait);
231 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
234 * if we're doing the sync list, record that our
235 * plug has some sync requests on it
237 * If we're doing the regular list and there are
238 * sync requests sitting around, unplug before
241 if (pending_bios == &device->pending_sync_bios) {
243 } else if (sync_pending) {
244 blk_finish_plug(&plug);
245 blk_start_plug(&plug);
249 submit_bio(cur->bi_rw, cur);
256 * we made progress, there is more work to do and the bdi
257 * is now congested. Back off and let other work structs
260 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
261 fs_info->fs_devices->open_devices > 1) {
262 struct io_context *ioc;
264 ioc = current->io_context;
267 * the main goal here is that we don't want to
268 * block if we're going to be able to submit
269 * more requests without blocking.
271 * This code does two great things, it pokes into
272 * the elevator code from a filesystem _and_
273 * it makes assumptions about how batching works.
275 if (ioc && ioc->nr_batch_requests > 0 &&
276 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
278 ioc->last_waited == last_waited)) {
280 * we want to go through our batch of
281 * requests and stop. So, we copy out
282 * the ioc->last_waited time and test
283 * against it before looping
285 last_waited = ioc->last_waited;
290 spin_lock(&device->io_lock);
291 requeue_list(pending_bios, pending, tail);
292 device->running_pending = 1;
294 spin_unlock(&device->io_lock);
295 btrfs_requeue_work(&device->work);
298 /* unplug every 64 requests just for good measure */
299 if (batch_run % 64 == 0) {
300 blk_finish_plug(&plug);
301 blk_start_plug(&plug);
310 spin_lock(&device->io_lock);
311 if (device->pending_bios.head || device->pending_sync_bios.head)
313 spin_unlock(&device->io_lock);
316 blk_finish_plug(&plug);
320 static void pending_bios_fn(struct btrfs_work *work)
322 struct btrfs_device *device;
324 device = container_of(work, struct btrfs_device, work);
325 run_scheduled_bios(device);
328 static noinline int device_list_add(const char *path,
329 struct btrfs_super_block *disk_super,
330 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
332 struct btrfs_device *device;
333 struct btrfs_fs_devices *fs_devices;
334 u64 found_transid = btrfs_super_generation(disk_super);
337 fs_devices = find_fsid(disk_super->fsid);
339 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
342 INIT_LIST_HEAD(&fs_devices->devices);
343 INIT_LIST_HEAD(&fs_devices->alloc_list);
344 list_add(&fs_devices->list, &fs_uuids);
345 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
346 fs_devices->latest_devid = devid;
347 fs_devices->latest_trans = found_transid;
348 mutex_init(&fs_devices->device_list_mutex);
351 device = __find_device(&fs_devices->devices, devid,
352 disk_super->dev_item.uuid);
355 if (fs_devices->opened)
358 device = kzalloc(sizeof(*device), GFP_NOFS);
360 /* we can safely leave the fs_devices entry around */
363 device->devid = devid;
364 device->work.func = pending_bios_fn;
365 memcpy(device->uuid, disk_super->dev_item.uuid,
367 spin_lock_init(&device->io_lock);
368 device->name = kstrdup(path, GFP_NOFS);
373 INIT_LIST_HEAD(&device->dev_alloc_list);
375 /* init readahead state */
376 spin_lock_init(&device->reada_lock);
377 device->reada_curr_zone = NULL;
378 atomic_set(&device->reada_in_flight, 0);
379 device->reada_next = 0;
380 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
381 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
383 mutex_lock(&fs_devices->device_list_mutex);
384 list_add_rcu(&device->dev_list, &fs_devices->devices);
385 mutex_unlock(&fs_devices->device_list_mutex);
387 device->fs_devices = fs_devices;
388 fs_devices->num_devices++;
389 } else if (!device->name || strcmp(device->name, path)) {
390 name = kstrdup(path, GFP_NOFS);
395 if (device->missing) {
396 fs_devices->missing_devices--;
401 if (found_transid > fs_devices->latest_trans) {
402 fs_devices->latest_devid = devid;
403 fs_devices->latest_trans = found_transid;
405 *fs_devices_ret = fs_devices;
409 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
411 struct btrfs_fs_devices *fs_devices;
412 struct btrfs_device *device;
413 struct btrfs_device *orig_dev;
415 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
417 return ERR_PTR(-ENOMEM);
419 INIT_LIST_HEAD(&fs_devices->devices);
420 INIT_LIST_HEAD(&fs_devices->alloc_list);
421 INIT_LIST_HEAD(&fs_devices->list);
422 mutex_init(&fs_devices->device_list_mutex);
423 fs_devices->latest_devid = orig->latest_devid;
424 fs_devices->latest_trans = orig->latest_trans;
425 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
427 /* We have held the volume lock, it is safe to get the devices. */
428 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
429 device = kzalloc(sizeof(*device), GFP_NOFS);
433 device->name = kstrdup(orig_dev->name, GFP_NOFS);
439 device->devid = orig_dev->devid;
440 device->work.func = pending_bios_fn;
441 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
442 spin_lock_init(&device->io_lock);
443 INIT_LIST_HEAD(&device->dev_list);
444 INIT_LIST_HEAD(&device->dev_alloc_list);
446 list_add(&device->dev_list, &fs_devices->devices);
447 device->fs_devices = fs_devices;
448 fs_devices->num_devices++;
452 free_fs_devices(fs_devices);
453 return ERR_PTR(-ENOMEM);
456 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
458 struct btrfs_device *device, *next;
460 mutex_lock(&uuid_mutex);
462 /* This is the initialized path, it is safe to release the devices. */
463 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
464 if (device->in_fs_metadata)
468 blkdev_put(device->bdev, device->mode);
470 fs_devices->open_devices--;
472 if (device->writeable) {
473 list_del_init(&device->dev_alloc_list);
474 device->writeable = 0;
475 fs_devices->rw_devices--;
477 list_del_init(&device->dev_list);
478 fs_devices->num_devices--;
483 if (fs_devices->seed) {
484 fs_devices = fs_devices->seed;
488 mutex_unlock(&uuid_mutex);
492 static void __free_device(struct work_struct *work)
494 struct btrfs_device *device;
496 device = container_of(work, struct btrfs_device, rcu_work);
499 blkdev_put(device->bdev, device->mode);
505 static void free_device(struct rcu_head *head)
507 struct btrfs_device *device;
509 device = container_of(head, struct btrfs_device, rcu);
511 INIT_WORK(&device->rcu_work, __free_device);
512 schedule_work(&device->rcu_work);
515 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
517 struct btrfs_device *device;
519 if (--fs_devices->opened > 0)
522 mutex_lock(&fs_devices->device_list_mutex);
523 list_for_each_entry(device, &fs_devices->devices, dev_list) {
524 struct btrfs_device *new_device;
527 fs_devices->open_devices--;
529 if (device->writeable) {
530 list_del_init(&device->dev_alloc_list);
531 fs_devices->rw_devices--;
534 if (device->can_discard)
535 fs_devices->num_can_discard--;
537 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
539 memcpy(new_device, device, sizeof(*new_device));
540 new_device->name = kstrdup(device->name, GFP_NOFS);
541 BUG_ON(device->name && !new_device->name);
542 new_device->bdev = NULL;
543 new_device->writeable = 0;
544 new_device->in_fs_metadata = 0;
545 new_device->can_discard = 0;
546 list_replace_rcu(&device->dev_list, &new_device->dev_list);
548 call_rcu(&device->rcu, free_device);
550 mutex_unlock(&fs_devices->device_list_mutex);
552 WARN_ON(fs_devices->open_devices);
553 WARN_ON(fs_devices->rw_devices);
554 fs_devices->opened = 0;
555 fs_devices->seeding = 0;
560 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
562 struct btrfs_fs_devices *seed_devices = NULL;
565 mutex_lock(&uuid_mutex);
566 ret = __btrfs_close_devices(fs_devices);
567 if (!fs_devices->opened) {
568 seed_devices = fs_devices->seed;
569 fs_devices->seed = NULL;
571 mutex_unlock(&uuid_mutex);
573 while (seed_devices) {
574 fs_devices = seed_devices;
575 seed_devices = fs_devices->seed;
576 __btrfs_close_devices(fs_devices);
577 free_fs_devices(fs_devices);
582 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
583 fmode_t flags, void *holder)
585 struct request_queue *q;
586 struct block_device *bdev;
587 struct list_head *head = &fs_devices->devices;
588 struct btrfs_device *device;
589 struct block_device *latest_bdev = NULL;
590 struct buffer_head *bh;
591 struct btrfs_super_block *disk_super;
592 u64 latest_devid = 0;
593 u64 latest_transid = 0;
600 list_for_each_entry(device, head, dev_list) {
606 bdev = blkdev_get_by_path(device->name, flags, holder);
608 printk(KERN_INFO "open %s failed\n", device->name);
611 set_blocksize(bdev, 4096);
613 bh = btrfs_read_dev_super(bdev);
617 disk_super = (struct btrfs_super_block *)bh->b_data;
618 devid = btrfs_stack_device_id(&disk_super->dev_item);
619 if (devid != device->devid)
622 if (memcmp(device->uuid, disk_super->dev_item.uuid,
626 device->generation = btrfs_super_generation(disk_super);
627 if (!latest_transid || device->generation > latest_transid) {
628 latest_devid = devid;
629 latest_transid = device->generation;
633 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
634 device->writeable = 0;
636 device->writeable = !bdev_read_only(bdev);
640 q = bdev_get_queue(bdev);
641 if (blk_queue_discard(q)) {
642 device->can_discard = 1;
643 fs_devices->num_can_discard++;
647 device->in_fs_metadata = 0;
648 device->mode = flags;
650 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
651 fs_devices->rotating = 1;
653 fs_devices->open_devices++;
654 if (device->writeable) {
655 fs_devices->rw_devices++;
656 list_add(&device->dev_alloc_list,
657 &fs_devices->alloc_list);
665 blkdev_put(bdev, flags);
669 if (fs_devices->open_devices == 0) {
673 fs_devices->seeding = seeding;
674 fs_devices->opened = 1;
675 fs_devices->latest_bdev = latest_bdev;
676 fs_devices->latest_devid = latest_devid;
677 fs_devices->latest_trans = latest_transid;
678 fs_devices->total_rw_bytes = 0;
683 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
684 fmode_t flags, void *holder)
688 mutex_lock(&uuid_mutex);
689 if (fs_devices->opened) {
690 fs_devices->opened++;
693 ret = __btrfs_open_devices(fs_devices, flags, holder);
695 mutex_unlock(&uuid_mutex);
699 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
700 struct btrfs_fs_devices **fs_devices_ret)
702 struct btrfs_super_block *disk_super;
703 struct block_device *bdev;
704 struct buffer_head *bh;
709 mutex_lock(&uuid_mutex);
712 bdev = blkdev_get_by_path(path, flags, holder);
719 ret = set_blocksize(bdev, 4096);
722 bh = btrfs_read_dev_super(bdev);
727 disk_super = (struct btrfs_super_block *)bh->b_data;
728 devid = btrfs_stack_device_id(&disk_super->dev_item);
729 transid = btrfs_super_generation(disk_super);
730 if (disk_super->label[0])
731 printk(KERN_INFO "device label %s ", disk_super->label);
733 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
734 printk(KERN_CONT "devid %llu transid %llu %s\n",
735 (unsigned long long)devid, (unsigned long long)transid, path);
736 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
740 blkdev_put(bdev, flags);
742 mutex_unlock(&uuid_mutex);
746 /* helper to account the used device space in the range */
747 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
748 u64 end, u64 *length)
750 struct btrfs_key key;
751 struct btrfs_root *root = device->dev_root;
752 struct btrfs_dev_extent *dev_extent;
753 struct btrfs_path *path;
757 struct extent_buffer *l;
761 if (start >= device->total_bytes)
764 path = btrfs_alloc_path();
769 key.objectid = device->devid;
771 key.type = BTRFS_DEV_EXTENT_KEY;
773 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
777 ret = btrfs_previous_item(root, path, key.objectid, key.type);
784 slot = path->slots[0];
785 if (slot >= btrfs_header_nritems(l)) {
786 ret = btrfs_next_leaf(root, path);
794 btrfs_item_key_to_cpu(l, &key, slot);
796 if (key.objectid < device->devid)
799 if (key.objectid > device->devid)
802 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
805 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
806 extent_end = key.offset + btrfs_dev_extent_length(l,
808 if (key.offset <= start && extent_end > end) {
809 *length = end - start + 1;
811 } else if (key.offset <= start && extent_end > start)
812 *length += extent_end - start;
813 else if (key.offset > start && extent_end <= end)
814 *length += extent_end - key.offset;
815 else if (key.offset > start && key.offset <= end) {
816 *length += end - key.offset + 1;
818 } else if (key.offset > end)
826 btrfs_free_path(path);
831 * find_free_dev_extent - find free space in the specified device
832 * @trans: transaction handler
833 * @device: the device which we search the free space in
834 * @num_bytes: the size of the free space that we need
835 * @start: store the start of the free space.
836 * @len: the size of the free space. that we find, or the size of the max
837 * free space if we don't find suitable free space
839 * this uses a pretty simple search, the expectation is that it is
840 * called very infrequently and that a given device has a small number
843 * @start is used to store the start of the free space if we find. But if we
844 * don't find suitable free space, it will be used to store the start position
845 * of the max free space.
847 * @len is used to store the size of the free space that we find.
848 * But if we don't find suitable free space, it is used to store the size of
849 * the max free space.
851 int find_free_dev_extent(struct btrfs_trans_handle *trans,
852 struct btrfs_device *device, u64 num_bytes,
853 u64 *start, u64 *len)
855 struct btrfs_key key;
856 struct btrfs_root *root = device->dev_root;
857 struct btrfs_dev_extent *dev_extent;
858 struct btrfs_path *path;
864 u64 search_end = device->total_bytes;
867 struct extent_buffer *l;
869 /* FIXME use last free of some kind */
871 /* we don't want to overwrite the superblock on the drive,
872 * so we make sure to start at an offset of at least 1MB
874 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
876 max_hole_start = search_start;
880 if (search_start >= search_end) {
885 path = btrfs_alloc_path();
892 key.objectid = device->devid;
893 key.offset = search_start;
894 key.type = BTRFS_DEV_EXTENT_KEY;
896 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
900 ret = btrfs_previous_item(root, path, key.objectid, key.type);
907 slot = path->slots[0];
908 if (slot >= btrfs_header_nritems(l)) {
909 ret = btrfs_next_leaf(root, path);
917 btrfs_item_key_to_cpu(l, &key, slot);
919 if (key.objectid < device->devid)
922 if (key.objectid > device->devid)
925 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
928 if (key.offset > search_start) {
929 hole_size = key.offset - search_start;
931 if (hole_size > max_hole_size) {
932 max_hole_start = search_start;
933 max_hole_size = hole_size;
937 * If this free space is greater than which we need,
938 * it must be the max free space that we have found
939 * until now, so max_hole_start must point to the start
940 * of this free space and the length of this free space
941 * is stored in max_hole_size. Thus, we return
942 * max_hole_start and max_hole_size and go back to the
945 if (hole_size >= num_bytes) {
951 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
952 extent_end = key.offset + btrfs_dev_extent_length(l,
954 if (extent_end > search_start)
955 search_start = extent_end;
962 * At this point, search_start should be the end of
963 * allocated dev extents, and when shrinking the device,
964 * search_end may be smaller than search_start.
966 if (search_end > search_start)
967 hole_size = search_end - search_start;
969 if (hole_size > max_hole_size) {
970 max_hole_start = search_start;
971 max_hole_size = hole_size;
975 if (hole_size < num_bytes)
981 btrfs_free_path(path);
983 *start = max_hole_start;
985 *len = max_hole_size;
989 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
990 struct btrfs_device *device,
994 struct btrfs_path *path;
995 struct btrfs_root *root = device->dev_root;
996 struct btrfs_key key;
997 struct btrfs_key found_key;
998 struct extent_buffer *leaf = NULL;
999 struct btrfs_dev_extent *extent = NULL;
1001 path = btrfs_alloc_path();
1005 key.objectid = device->devid;
1007 key.type = BTRFS_DEV_EXTENT_KEY;
1009 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1011 ret = btrfs_previous_item(root, path, key.objectid,
1012 BTRFS_DEV_EXTENT_KEY);
1015 leaf = path->nodes[0];
1016 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1017 extent = btrfs_item_ptr(leaf, path->slots[0],
1018 struct btrfs_dev_extent);
1019 BUG_ON(found_key.offset > start || found_key.offset +
1020 btrfs_dev_extent_length(leaf, extent) < start);
1022 btrfs_release_path(path);
1024 } else if (ret == 0) {
1025 leaf = path->nodes[0];
1026 extent = btrfs_item_ptr(leaf, path->slots[0],
1027 struct btrfs_dev_extent);
1031 if (device->bytes_used > 0) {
1032 u64 len = btrfs_dev_extent_length(leaf, extent);
1033 device->bytes_used -= len;
1034 spin_lock(&root->fs_info->free_chunk_lock);
1035 root->fs_info->free_chunk_space += len;
1036 spin_unlock(&root->fs_info->free_chunk_lock);
1038 ret = btrfs_del_item(trans, root, path);
1041 btrfs_free_path(path);
1045 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1046 struct btrfs_device *device,
1047 u64 chunk_tree, u64 chunk_objectid,
1048 u64 chunk_offset, u64 start, u64 num_bytes)
1051 struct btrfs_path *path;
1052 struct btrfs_root *root = device->dev_root;
1053 struct btrfs_dev_extent *extent;
1054 struct extent_buffer *leaf;
1055 struct btrfs_key key;
1057 WARN_ON(!device->in_fs_metadata);
1058 path = btrfs_alloc_path();
1062 key.objectid = device->devid;
1064 key.type = BTRFS_DEV_EXTENT_KEY;
1065 ret = btrfs_insert_empty_item(trans, root, path, &key,
1069 leaf = path->nodes[0];
1070 extent = btrfs_item_ptr(leaf, path->slots[0],
1071 struct btrfs_dev_extent);
1072 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1073 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1074 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1076 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1077 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1080 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1081 btrfs_mark_buffer_dirty(leaf);
1082 btrfs_free_path(path);
1086 static noinline int find_next_chunk(struct btrfs_root *root,
1087 u64 objectid, u64 *offset)
1089 struct btrfs_path *path;
1091 struct btrfs_key key;
1092 struct btrfs_chunk *chunk;
1093 struct btrfs_key found_key;
1095 path = btrfs_alloc_path();
1099 key.objectid = objectid;
1100 key.offset = (u64)-1;
1101 key.type = BTRFS_CHUNK_ITEM_KEY;
1103 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1109 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1113 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1115 if (found_key.objectid != objectid)
1118 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1119 struct btrfs_chunk);
1120 *offset = found_key.offset +
1121 btrfs_chunk_length(path->nodes[0], chunk);
1126 btrfs_free_path(path);
1130 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1133 struct btrfs_key key;
1134 struct btrfs_key found_key;
1135 struct btrfs_path *path;
1137 root = root->fs_info->chunk_root;
1139 path = btrfs_alloc_path();
1143 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1144 key.type = BTRFS_DEV_ITEM_KEY;
1145 key.offset = (u64)-1;
1147 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1153 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1154 BTRFS_DEV_ITEM_KEY);
1158 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1160 *objectid = found_key.offset + 1;
1164 btrfs_free_path(path);
1169 * the device information is stored in the chunk root
1170 * the btrfs_device struct should be fully filled in
1172 int btrfs_add_device(struct btrfs_trans_handle *trans,
1173 struct btrfs_root *root,
1174 struct btrfs_device *device)
1177 struct btrfs_path *path;
1178 struct btrfs_dev_item *dev_item;
1179 struct extent_buffer *leaf;
1180 struct btrfs_key key;
1183 root = root->fs_info->chunk_root;
1185 path = btrfs_alloc_path();
1189 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1190 key.type = BTRFS_DEV_ITEM_KEY;
1191 key.offset = device->devid;
1193 ret = btrfs_insert_empty_item(trans, root, path, &key,
1198 leaf = path->nodes[0];
1199 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1201 btrfs_set_device_id(leaf, dev_item, device->devid);
1202 btrfs_set_device_generation(leaf, dev_item, 0);
1203 btrfs_set_device_type(leaf, dev_item, device->type);
1204 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1205 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1206 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1207 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1208 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1209 btrfs_set_device_group(leaf, dev_item, 0);
1210 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1211 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1212 btrfs_set_device_start_offset(leaf, dev_item, 0);
1214 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1215 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1216 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1217 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1218 btrfs_mark_buffer_dirty(leaf);
1222 btrfs_free_path(path);
1226 static int btrfs_rm_dev_item(struct btrfs_root *root,
1227 struct btrfs_device *device)
1230 struct btrfs_path *path;
1231 struct btrfs_key key;
1232 struct btrfs_trans_handle *trans;
1234 root = root->fs_info->chunk_root;
1236 path = btrfs_alloc_path();
1240 trans = btrfs_start_transaction(root, 0);
1241 if (IS_ERR(trans)) {
1242 btrfs_free_path(path);
1243 return PTR_ERR(trans);
1245 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1246 key.type = BTRFS_DEV_ITEM_KEY;
1247 key.offset = device->devid;
1250 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1259 ret = btrfs_del_item(trans, root, path);
1263 btrfs_free_path(path);
1264 unlock_chunks(root);
1265 btrfs_commit_transaction(trans, root);
1269 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1271 struct btrfs_device *device;
1272 struct btrfs_device *next_device;
1273 struct block_device *bdev;
1274 struct buffer_head *bh = NULL;
1275 struct btrfs_super_block *disk_super;
1276 struct btrfs_fs_devices *cur_devices;
1282 bool clear_super = false;
1284 mutex_lock(&uuid_mutex);
1286 all_avail = root->fs_info->avail_data_alloc_bits |
1287 root->fs_info->avail_system_alloc_bits |
1288 root->fs_info->avail_metadata_alloc_bits;
1290 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1291 root->fs_info->fs_devices->num_devices <= 4) {
1292 printk(KERN_ERR "btrfs: unable to go below four devices "
1298 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1299 root->fs_info->fs_devices->num_devices <= 2) {
1300 printk(KERN_ERR "btrfs: unable to go below two "
1301 "devices on raid1\n");
1306 if (strcmp(device_path, "missing") == 0) {
1307 struct list_head *devices;
1308 struct btrfs_device *tmp;
1311 devices = &root->fs_info->fs_devices->devices;
1313 * It is safe to read the devices since the volume_mutex
1316 list_for_each_entry(tmp, devices, dev_list) {
1317 if (tmp->in_fs_metadata && !tmp->bdev) {
1326 printk(KERN_ERR "btrfs: no missing devices found to "
1331 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1332 root->fs_info->bdev_holder);
1334 ret = PTR_ERR(bdev);
1338 set_blocksize(bdev, 4096);
1339 bh = btrfs_read_dev_super(bdev);
1344 disk_super = (struct btrfs_super_block *)bh->b_data;
1345 devid = btrfs_stack_device_id(&disk_super->dev_item);
1346 dev_uuid = disk_super->dev_item.uuid;
1347 device = btrfs_find_device(root, devid, dev_uuid,
1355 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1356 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1362 if (device->writeable) {
1364 list_del_init(&device->dev_alloc_list);
1365 unlock_chunks(root);
1366 root->fs_info->fs_devices->rw_devices--;
1370 ret = btrfs_shrink_device(device, 0);
1374 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1378 spin_lock(&root->fs_info->free_chunk_lock);
1379 root->fs_info->free_chunk_space = device->total_bytes -
1381 spin_unlock(&root->fs_info->free_chunk_lock);
1383 device->in_fs_metadata = 0;
1384 btrfs_scrub_cancel_dev(root, device);
1387 * the device list mutex makes sure that we don't change
1388 * the device list while someone else is writing out all
1389 * the device supers.
1392 cur_devices = device->fs_devices;
1393 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1394 list_del_rcu(&device->dev_list);
1396 device->fs_devices->num_devices--;
1398 if (device->missing)
1399 root->fs_info->fs_devices->missing_devices--;
1401 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1402 struct btrfs_device, dev_list);
1403 if (device->bdev == root->fs_info->sb->s_bdev)
1404 root->fs_info->sb->s_bdev = next_device->bdev;
1405 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1406 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1409 device->fs_devices->open_devices--;
1411 call_rcu(&device->rcu, free_device);
1412 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1414 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1415 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1417 if (cur_devices->open_devices == 0) {
1418 struct btrfs_fs_devices *fs_devices;
1419 fs_devices = root->fs_info->fs_devices;
1420 while (fs_devices) {
1421 if (fs_devices->seed == cur_devices)
1423 fs_devices = fs_devices->seed;
1425 fs_devices->seed = cur_devices->seed;
1426 cur_devices->seed = NULL;
1428 __btrfs_close_devices(cur_devices);
1429 unlock_chunks(root);
1430 free_fs_devices(cur_devices);
1434 * at this point, the device is zero sized. We want to
1435 * remove it from the devices list and zero out the old super
1438 /* make sure this device isn't detected as part of
1441 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1442 set_buffer_dirty(bh);
1443 sync_dirty_buffer(bh);
1452 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1454 mutex_unlock(&uuid_mutex);
1457 if (device->writeable) {
1459 list_add(&device->dev_alloc_list,
1460 &root->fs_info->fs_devices->alloc_list);
1461 unlock_chunks(root);
1462 root->fs_info->fs_devices->rw_devices++;
1468 * does all the dirty work required for changing file system's UUID.
1470 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1471 struct btrfs_root *root)
1473 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1474 struct btrfs_fs_devices *old_devices;
1475 struct btrfs_fs_devices *seed_devices;
1476 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1477 struct btrfs_device *device;
1480 BUG_ON(!mutex_is_locked(&uuid_mutex));
1481 if (!fs_devices->seeding)
1484 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1488 old_devices = clone_fs_devices(fs_devices);
1489 if (IS_ERR(old_devices)) {
1490 kfree(seed_devices);
1491 return PTR_ERR(old_devices);
1494 list_add(&old_devices->list, &fs_uuids);
1496 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1497 seed_devices->opened = 1;
1498 INIT_LIST_HEAD(&seed_devices->devices);
1499 INIT_LIST_HEAD(&seed_devices->alloc_list);
1500 mutex_init(&seed_devices->device_list_mutex);
1502 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1503 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1505 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1507 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1508 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1509 device->fs_devices = seed_devices;
1512 fs_devices->seeding = 0;
1513 fs_devices->num_devices = 0;
1514 fs_devices->open_devices = 0;
1515 fs_devices->seed = seed_devices;
1517 generate_random_uuid(fs_devices->fsid);
1518 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1519 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1520 super_flags = btrfs_super_flags(disk_super) &
1521 ~BTRFS_SUPER_FLAG_SEEDING;
1522 btrfs_set_super_flags(disk_super, super_flags);
1528 * strore the expected generation for seed devices in device items.
1530 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1531 struct btrfs_root *root)
1533 struct btrfs_path *path;
1534 struct extent_buffer *leaf;
1535 struct btrfs_dev_item *dev_item;
1536 struct btrfs_device *device;
1537 struct btrfs_key key;
1538 u8 fs_uuid[BTRFS_UUID_SIZE];
1539 u8 dev_uuid[BTRFS_UUID_SIZE];
1543 path = btrfs_alloc_path();
1547 root = root->fs_info->chunk_root;
1548 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1550 key.type = BTRFS_DEV_ITEM_KEY;
1553 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1557 leaf = path->nodes[0];
1559 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1560 ret = btrfs_next_leaf(root, path);
1565 leaf = path->nodes[0];
1566 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1567 btrfs_release_path(path);
1571 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1572 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1573 key.type != BTRFS_DEV_ITEM_KEY)
1576 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1577 struct btrfs_dev_item);
1578 devid = btrfs_device_id(leaf, dev_item);
1579 read_extent_buffer(leaf, dev_uuid,
1580 (unsigned long)btrfs_device_uuid(dev_item),
1582 read_extent_buffer(leaf, fs_uuid,
1583 (unsigned long)btrfs_device_fsid(dev_item),
1585 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1588 if (device->fs_devices->seeding) {
1589 btrfs_set_device_generation(leaf, dev_item,
1590 device->generation);
1591 btrfs_mark_buffer_dirty(leaf);
1599 btrfs_free_path(path);
1603 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1605 struct request_queue *q;
1606 struct btrfs_trans_handle *trans;
1607 struct btrfs_device *device;
1608 struct block_device *bdev;
1609 struct list_head *devices;
1610 struct super_block *sb = root->fs_info->sb;
1612 int seeding_dev = 0;
1615 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1618 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1619 root->fs_info->bdev_holder);
1621 return PTR_ERR(bdev);
1623 if (root->fs_info->fs_devices->seeding) {
1625 down_write(&sb->s_umount);
1626 mutex_lock(&uuid_mutex);
1629 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1631 devices = &root->fs_info->fs_devices->devices;
1633 * we have the volume lock, so we don't need the extra
1634 * device list mutex while reading the list here.
1636 list_for_each_entry(device, devices, dev_list) {
1637 if (device->bdev == bdev) {
1643 device = kzalloc(sizeof(*device), GFP_NOFS);
1645 /* we can safely leave the fs_devices entry around */
1650 device->name = kstrdup(device_path, GFP_NOFS);
1651 if (!device->name) {
1657 ret = find_next_devid(root, &device->devid);
1659 kfree(device->name);
1664 trans = btrfs_start_transaction(root, 0);
1665 if (IS_ERR(trans)) {
1666 kfree(device->name);
1668 ret = PTR_ERR(trans);
1674 q = bdev_get_queue(bdev);
1675 if (blk_queue_discard(q))
1676 device->can_discard = 1;
1677 device->writeable = 1;
1678 device->work.func = pending_bios_fn;
1679 generate_random_uuid(device->uuid);
1680 spin_lock_init(&device->io_lock);
1681 device->generation = trans->transid;
1682 device->io_width = root->sectorsize;
1683 device->io_align = root->sectorsize;
1684 device->sector_size = root->sectorsize;
1685 device->total_bytes = i_size_read(bdev->bd_inode);
1686 device->disk_total_bytes = device->total_bytes;
1687 device->dev_root = root->fs_info->dev_root;
1688 device->bdev = bdev;
1689 device->in_fs_metadata = 1;
1690 device->mode = FMODE_EXCL;
1691 set_blocksize(device->bdev, 4096);
1694 sb->s_flags &= ~MS_RDONLY;
1695 ret = btrfs_prepare_sprout(trans, root);
1699 device->fs_devices = root->fs_info->fs_devices;
1702 * we don't want write_supers to jump in here with our device
1705 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1706 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1707 list_add(&device->dev_alloc_list,
1708 &root->fs_info->fs_devices->alloc_list);
1709 root->fs_info->fs_devices->num_devices++;
1710 root->fs_info->fs_devices->open_devices++;
1711 root->fs_info->fs_devices->rw_devices++;
1712 if (device->can_discard)
1713 root->fs_info->fs_devices->num_can_discard++;
1714 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1716 spin_lock(&root->fs_info->free_chunk_lock);
1717 root->fs_info->free_chunk_space += device->total_bytes;
1718 spin_unlock(&root->fs_info->free_chunk_lock);
1720 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1721 root->fs_info->fs_devices->rotating = 1;
1723 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1724 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1725 total_bytes + device->total_bytes);
1727 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1728 btrfs_set_super_num_devices(root->fs_info->super_copy,
1730 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1733 ret = init_first_rw_device(trans, root, device);
1735 ret = btrfs_finish_sprout(trans, root);
1738 ret = btrfs_add_device(trans, root, device);
1742 * we've got more storage, clear any full flags on the space
1745 btrfs_clear_space_info_full(root->fs_info);
1747 unlock_chunks(root);
1748 btrfs_commit_transaction(trans, root);
1751 mutex_unlock(&uuid_mutex);
1752 up_write(&sb->s_umount);
1754 ret = btrfs_relocate_sys_chunks(root);
1760 blkdev_put(bdev, FMODE_EXCL);
1762 mutex_unlock(&uuid_mutex);
1763 up_write(&sb->s_umount);
1768 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1769 struct btrfs_device *device)
1772 struct btrfs_path *path;
1773 struct btrfs_root *root;
1774 struct btrfs_dev_item *dev_item;
1775 struct extent_buffer *leaf;
1776 struct btrfs_key key;
1778 root = device->dev_root->fs_info->chunk_root;
1780 path = btrfs_alloc_path();
1784 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1785 key.type = BTRFS_DEV_ITEM_KEY;
1786 key.offset = device->devid;
1788 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1797 leaf = path->nodes[0];
1798 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1800 btrfs_set_device_id(leaf, dev_item, device->devid);
1801 btrfs_set_device_type(leaf, dev_item, device->type);
1802 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1803 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1804 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1805 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1806 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1807 btrfs_mark_buffer_dirty(leaf);
1810 btrfs_free_path(path);
1814 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1815 struct btrfs_device *device, u64 new_size)
1817 struct btrfs_super_block *super_copy =
1818 device->dev_root->fs_info->super_copy;
1819 u64 old_total = btrfs_super_total_bytes(super_copy);
1820 u64 diff = new_size - device->total_bytes;
1822 if (!device->writeable)
1824 if (new_size <= device->total_bytes)
1827 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1828 device->fs_devices->total_rw_bytes += diff;
1830 device->total_bytes = new_size;
1831 device->disk_total_bytes = new_size;
1832 btrfs_clear_space_info_full(device->dev_root->fs_info);
1834 return btrfs_update_device(trans, device);
1837 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1838 struct btrfs_device *device, u64 new_size)
1841 lock_chunks(device->dev_root);
1842 ret = __btrfs_grow_device(trans, device, new_size);
1843 unlock_chunks(device->dev_root);
1847 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1848 struct btrfs_root *root,
1849 u64 chunk_tree, u64 chunk_objectid,
1853 struct btrfs_path *path;
1854 struct btrfs_key key;
1856 root = root->fs_info->chunk_root;
1857 path = btrfs_alloc_path();
1861 key.objectid = chunk_objectid;
1862 key.offset = chunk_offset;
1863 key.type = BTRFS_CHUNK_ITEM_KEY;
1865 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1868 ret = btrfs_del_item(trans, root, path);
1870 btrfs_free_path(path);
1874 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1877 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1878 struct btrfs_disk_key *disk_key;
1879 struct btrfs_chunk *chunk;
1886 struct btrfs_key key;
1888 array_size = btrfs_super_sys_array_size(super_copy);
1890 ptr = super_copy->sys_chunk_array;
1893 while (cur < array_size) {
1894 disk_key = (struct btrfs_disk_key *)ptr;
1895 btrfs_disk_key_to_cpu(&key, disk_key);
1897 len = sizeof(*disk_key);
1899 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1900 chunk = (struct btrfs_chunk *)(ptr + len);
1901 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1902 len += btrfs_chunk_item_size(num_stripes);
1907 if (key.objectid == chunk_objectid &&
1908 key.offset == chunk_offset) {
1909 memmove(ptr, ptr + len, array_size - (cur + len));
1911 btrfs_set_super_sys_array_size(super_copy, array_size);
1920 static int btrfs_relocate_chunk(struct btrfs_root *root,
1921 u64 chunk_tree, u64 chunk_objectid,
1924 struct extent_map_tree *em_tree;
1925 struct btrfs_root *extent_root;
1926 struct btrfs_trans_handle *trans;
1927 struct extent_map *em;
1928 struct map_lookup *map;
1932 root = root->fs_info->chunk_root;
1933 extent_root = root->fs_info->extent_root;
1934 em_tree = &root->fs_info->mapping_tree.map_tree;
1936 ret = btrfs_can_relocate(extent_root, chunk_offset);
1940 /* step one, relocate all the extents inside this chunk */
1941 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1945 trans = btrfs_start_transaction(root, 0);
1946 BUG_ON(IS_ERR(trans));
1951 * step two, delete the device extents and the
1952 * chunk tree entries
1954 read_lock(&em_tree->lock);
1955 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1956 read_unlock(&em_tree->lock);
1958 BUG_ON(em->start > chunk_offset ||
1959 em->start + em->len < chunk_offset);
1960 map = (struct map_lookup *)em->bdev;
1962 for (i = 0; i < map->num_stripes; i++) {
1963 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1964 map->stripes[i].physical);
1967 if (map->stripes[i].dev) {
1968 ret = btrfs_update_device(trans, map->stripes[i].dev);
1972 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1977 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
1979 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1980 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1984 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1987 write_lock(&em_tree->lock);
1988 remove_extent_mapping(em_tree, em);
1989 write_unlock(&em_tree->lock);
1994 /* once for the tree */
1995 free_extent_map(em);
1997 free_extent_map(em);
1999 unlock_chunks(root);
2000 btrfs_end_transaction(trans, root);
2004 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2006 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2007 struct btrfs_path *path;
2008 struct extent_buffer *leaf;
2009 struct btrfs_chunk *chunk;
2010 struct btrfs_key key;
2011 struct btrfs_key found_key;
2012 u64 chunk_tree = chunk_root->root_key.objectid;
2014 bool retried = false;
2018 path = btrfs_alloc_path();
2023 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2024 key.offset = (u64)-1;
2025 key.type = BTRFS_CHUNK_ITEM_KEY;
2028 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2033 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2040 leaf = path->nodes[0];
2041 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2043 chunk = btrfs_item_ptr(leaf, path->slots[0],
2044 struct btrfs_chunk);
2045 chunk_type = btrfs_chunk_type(leaf, chunk);
2046 btrfs_release_path(path);
2048 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2049 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2058 if (found_key.offset == 0)
2060 key.offset = found_key.offset - 1;
2063 if (failed && !retried) {
2067 } else if (failed && retried) {
2072 btrfs_free_path(path);
2077 * Should be called with both balance and volume mutexes held to
2078 * serialize other volume operations (add_dev/rm_dev/resize) with
2079 * restriper. Same goes for unset_balance_control.
2081 static void set_balance_control(struct btrfs_balance_control *bctl)
2083 struct btrfs_fs_info *fs_info = bctl->fs_info;
2085 BUG_ON(fs_info->balance_ctl);
2087 spin_lock(&fs_info->balance_lock);
2088 fs_info->balance_ctl = bctl;
2089 spin_unlock(&fs_info->balance_lock);
2092 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2094 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2096 BUG_ON(!fs_info->balance_ctl);
2098 spin_lock(&fs_info->balance_lock);
2099 fs_info->balance_ctl = NULL;
2100 spin_unlock(&fs_info->balance_lock);
2106 * Balance filters. Return 1 if chunk should be filtered out
2107 * (should not be balanced).
2109 static int chunk_profiles_filter(u64 chunk_profile,
2110 struct btrfs_balance_args *bargs)
2112 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
2114 if (chunk_profile == 0)
2115 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2117 if (bargs->profiles & chunk_profile)
2123 static u64 div_factor_fine(u64 num, int factor)
2135 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2136 struct btrfs_balance_args *bargs)
2138 struct btrfs_block_group_cache *cache;
2139 u64 chunk_used, user_thresh;
2142 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2143 chunk_used = btrfs_block_group_used(&cache->item);
2145 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2146 if (chunk_used < user_thresh)
2149 btrfs_put_block_group(cache);
2153 static int chunk_devid_filter(struct extent_buffer *leaf,
2154 struct btrfs_chunk *chunk,
2155 struct btrfs_balance_args *bargs)
2157 struct btrfs_stripe *stripe;
2158 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2161 for (i = 0; i < num_stripes; i++) {
2162 stripe = btrfs_stripe_nr(chunk, i);
2163 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2170 /* [pstart, pend) */
2171 static int chunk_drange_filter(struct extent_buffer *leaf,
2172 struct btrfs_chunk *chunk,
2174 struct btrfs_balance_args *bargs)
2176 struct btrfs_stripe *stripe;
2177 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2183 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2186 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2187 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2191 factor = num_stripes / factor;
2193 for (i = 0; i < num_stripes; i++) {
2194 stripe = btrfs_stripe_nr(chunk, i);
2195 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2198 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2199 stripe_length = btrfs_chunk_length(leaf, chunk);
2200 do_div(stripe_length, factor);
2202 if (stripe_offset < bargs->pend &&
2203 stripe_offset + stripe_length > bargs->pstart)
2210 /* [vstart, vend) */
2211 static int chunk_vrange_filter(struct extent_buffer *leaf,
2212 struct btrfs_chunk *chunk,
2214 struct btrfs_balance_args *bargs)
2216 if (chunk_offset < bargs->vend &&
2217 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2218 /* at least part of the chunk is inside this vrange */
2224 static int should_balance_chunk(struct btrfs_root *root,
2225 struct extent_buffer *leaf,
2226 struct btrfs_chunk *chunk, u64 chunk_offset)
2228 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2229 struct btrfs_balance_args *bargs = NULL;
2230 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2233 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2234 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2238 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2239 bargs = &bctl->data;
2240 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2242 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2243 bargs = &bctl->meta;
2245 /* profiles filter */
2246 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2247 chunk_profiles_filter(chunk_type, bargs)) {
2252 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2253 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2258 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2259 chunk_devid_filter(leaf, chunk, bargs)) {
2263 /* drange filter, makes sense only with devid filter */
2264 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2265 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2270 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2271 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2278 static u64 div_factor(u64 num, int factor)
2287 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2289 struct btrfs_root *chunk_root = fs_info->chunk_root;
2290 struct btrfs_root *dev_root = fs_info->dev_root;
2291 struct list_head *devices;
2292 struct btrfs_device *device;
2295 struct btrfs_chunk *chunk;
2296 struct btrfs_path *path;
2297 struct btrfs_key key;
2298 struct btrfs_key found_key;
2299 struct btrfs_trans_handle *trans;
2300 struct extent_buffer *leaf;
2303 int enospc_errors = 0;
2305 /* step one make some room on all the devices */
2306 devices = &fs_info->fs_devices->devices;
2307 list_for_each_entry(device, devices, dev_list) {
2308 old_size = device->total_bytes;
2309 size_to_free = div_factor(old_size, 1);
2310 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2311 if (!device->writeable ||
2312 device->total_bytes - device->bytes_used > size_to_free)
2315 ret = btrfs_shrink_device(device, old_size - size_to_free);
2320 trans = btrfs_start_transaction(dev_root, 0);
2321 BUG_ON(IS_ERR(trans));
2323 ret = btrfs_grow_device(trans, device, old_size);
2326 btrfs_end_transaction(trans, dev_root);
2329 /* step two, relocate all the chunks */
2330 path = btrfs_alloc_path();
2335 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2336 key.offset = (u64)-1;
2337 key.type = BTRFS_CHUNK_ITEM_KEY;
2340 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2345 * this shouldn't happen, it means the last relocate
2349 BUG(); /* FIXME break ? */
2351 ret = btrfs_previous_item(chunk_root, path, 0,
2352 BTRFS_CHUNK_ITEM_KEY);
2358 leaf = path->nodes[0];
2359 slot = path->slots[0];
2360 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2362 if (found_key.objectid != key.objectid)
2365 /* chunk zero is special */
2366 if (found_key.offset == 0)
2369 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2371 ret = should_balance_chunk(chunk_root, leaf, chunk,
2373 btrfs_release_path(path);
2377 ret = btrfs_relocate_chunk(chunk_root,
2378 chunk_root->root_key.objectid,
2381 if (ret && ret != -ENOSPC)
2386 key.offset = found_key.offset - 1;
2390 btrfs_free_path(path);
2391 if (enospc_errors) {
2392 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2401 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2403 unset_balance_control(fs_info);
2406 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
2407 struct btrfs_ioctl_balance_args *bargs);
2410 * Should be called with both balance and volume mutexes held
2412 int btrfs_balance(struct btrfs_balance_control *bctl,
2413 struct btrfs_ioctl_balance_args *bargs)
2415 struct btrfs_fs_info *fs_info = bctl->fs_info;
2419 if (btrfs_fs_closing(fs_info)) {
2425 * In case of mixed groups both data and meta should be picked,
2426 * and identical options should be given for both of them.
2428 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2429 if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2430 (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
2431 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2432 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2433 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2434 printk(KERN_ERR "btrfs: with mixed groups data and "
2435 "metadata balance options must be the same\n");
2441 set_balance_control(bctl);
2443 mutex_unlock(&fs_info->balance_mutex);
2445 ret = __btrfs_balance(fs_info);
2447 mutex_lock(&fs_info->balance_mutex);
2450 memset(bargs, 0, sizeof(*bargs));
2451 update_ioctl_balance_args(fs_info, bargs);
2454 __cancel_balance(fs_info);
2463 * shrinking a device means finding all of the device extents past
2464 * the new size, and then following the back refs to the chunks.
2465 * The chunk relocation code actually frees the device extent
2467 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2469 struct btrfs_trans_handle *trans;
2470 struct btrfs_root *root = device->dev_root;
2471 struct btrfs_dev_extent *dev_extent = NULL;
2472 struct btrfs_path *path;
2480 bool retried = false;
2481 struct extent_buffer *l;
2482 struct btrfs_key key;
2483 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2484 u64 old_total = btrfs_super_total_bytes(super_copy);
2485 u64 old_size = device->total_bytes;
2486 u64 diff = device->total_bytes - new_size;
2488 if (new_size >= device->total_bytes)
2491 path = btrfs_alloc_path();
2499 device->total_bytes = new_size;
2500 if (device->writeable) {
2501 device->fs_devices->total_rw_bytes -= diff;
2502 spin_lock(&root->fs_info->free_chunk_lock);
2503 root->fs_info->free_chunk_space -= diff;
2504 spin_unlock(&root->fs_info->free_chunk_lock);
2506 unlock_chunks(root);
2509 key.objectid = device->devid;
2510 key.offset = (u64)-1;
2511 key.type = BTRFS_DEV_EXTENT_KEY;
2514 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2518 ret = btrfs_previous_item(root, path, 0, key.type);
2523 btrfs_release_path(path);
2528 slot = path->slots[0];
2529 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2531 if (key.objectid != device->devid) {
2532 btrfs_release_path(path);
2536 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2537 length = btrfs_dev_extent_length(l, dev_extent);
2539 if (key.offset + length <= new_size) {
2540 btrfs_release_path(path);
2544 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2545 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2546 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2547 btrfs_release_path(path);
2549 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2551 if (ret && ret != -ENOSPC)
2558 if (failed && !retried) {
2562 } else if (failed && retried) {
2566 device->total_bytes = old_size;
2567 if (device->writeable)
2568 device->fs_devices->total_rw_bytes += diff;
2569 spin_lock(&root->fs_info->free_chunk_lock);
2570 root->fs_info->free_chunk_space += diff;
2571 spin_unlock(&root->fs_info->free_chunk_lock);
2572 unlock_chunks(root);
2576 /* Shrinking succeeded, else we would be at "done". */
2577 trans = btrfs_start_transaction(root, 0);
2578 if (IS_ERR(trans)) {
2579 ret = PTR_ERR(trans);
2585 device->disk_total_bytes = new_size;
2586 /* Now btrfs_update_device() will change the on-disk size. */
2587 ret = btrfs_update_device(trans, device);
2589 unlock_chunks(root);
2590 btrfs_end_transaction(trans, root);
2593 WARN_ON(diff > old_total);
2594 btrfs_set_super_total_bytes(super_copy, old_total - diff);
2595 unlock_chunks(root);
2596 btrfs_end_transaction(trans, root);
2598 btrfs_free_path(path);
2602 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2603 struct btrfs_root *root,
2604 struct btrfs_key *key,
2605 struct btrfs_chunk *chunk, int item_size)
2607 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2608 struct btrfs_disk_key disk_key;
2612 array_size = btrfs_super_sys_array_size(super_copy);
2613 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
2616 ptr = super_copy->sys_chunk_array + array_size;
2617 btrfs_cpu_key_to_disk(&disk_key, key);
2618 memcpy(ptr, &disk_key, sizeof(disk_key));
2619 ptr += sizeof(disk_key);
2620 memcpy(ptr, chunk, item_size);
2621 item_size += sizeof(disk_key);
2622 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
2627 * sort the devices in descending order by max_avail, total_avail
2629 static int btrfs_cmp_device_info(const void *a, const void *b)
2631 const struct btrfs_device_info *di_a = a;
2632 const struct btrfs_device_info *di_b = b;
2634 if (di_a->max_avail > di_b->max_avail)
2636 if (di_a->max_avail < di_b->max_avail)
2638 if (di_a->total_avail > di_b->total_avail)
2640 if (di_a->total_avail < di_b->total_avail)
2645 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2646 struct btrfs_root *extent_root,
2647 struct map_lookup **map_ret,
2648 u64 *num_bytes_out, u64 *stripe_size_out,
2649 u64 start, u64 type)
2651 struct btrfs_fs_info *info = extent_root->fs_info;
2652 struct btrfs_fs_devices *fs_devices = info->fs_devices;
2653 struct list_head *cur;
2654 struct map_lookup *map = NULL;
2655 struct extent_map_tree *em_tree;
2656 struct extent_map *em;
2657 struct btrfs_device_info *devices_info = NULL;
2659 int num_stripes; /* total number of stripes to allocate */
2660 int sub_stripes; /* sub_stripes info for map */
2661 int dev_stripes; /* stripes per dev */
2662 int devs_max; /* max devs to use */
2663 int devs_min; /* min devs needed */
2664 int devs_increment; /* ndevs has to be a multiple of this */
2665 int ncopies; /* how many copies to data has */
2667 u64 max_stripe_size;
2675 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2676 (type & BTRFS_BLOCK_GROUP_DUP)) {
2678 type &= ~BTRFS_BLOCK_GROUP_DUP;
2681 if (list_empty(&fs_devices->alloc_list))
2688 devs_max = 0; /* 0 == as many as possible */
2692 * define the properties of each RAID type.
2693 * FIXME: move this to a global table and use it in all RAID
2696 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2700 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2702 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2707 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2716 if (type & BTRFS_BLOCK_GROUP_DATA) {
2717 max_stripe_size = 1024 * 1024 * 1024;
2718 max_chunk_size = 10 * max_stripe_size;
2719 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2720 max_stripe_size = 256 * 1024 * 1024;
2721 max_chunk_size = max_stripe_size;
2722 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2723 max_stripe_size = 8 * 1024 * 1024;
2724 max_chunk_size = 2 * max_stripe_size;
2726 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
2731 /* we don't want a chunk larger than 10% of writeable space */
2732 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2735 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
2740 cur = fs_devices->alloc_list.next;
2743 * in the first pass through the devices list, we gather information
2744 * about the available holes on each device.
2747 while (cur != &fs_devices->alloc_list) {
2748 struct btrfs_device *device;
2752 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2756 if (!device->writeable) {
2758 "btrfs: read-only device in alloc_list\n");
2763 if (!device->in_fs_metadata)
2766 if (device->total_bytes > device->bytes_used)
2767 total_avail = device->total_bytes - device->bytes_used;
2771 /* If there is no space on this device, skip it. */
2772 if (total_avail == 0)
2775 ret = find_free_dev_extent(trans, device,
2776 max_stripe_size * dev_stripes,
2777 &dev_offset, &max_avail);
2778 if (ret && ret != -ENOSPC)
2782 max_avail = max_stripe_size * dev_stripes;
2784 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
2787 devices_info[ndevs].dev_offset = dev_offset;
2788 devices_info[ndevs].max_avail = max_avail;
2789 devices_info[ndevs].total_avail = total_avail;
2790 devices_info[ndevs].dev = device;
2795 * now sort the devices by hole size / available space
2797 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
2798 btrfs_cmp_device_info, NULL);
2800 /* round down to number of usable stripes */
2801 ndevs -= ndevs % devs_increment;
2803 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
2808 if (devs_max && ndevs > devs_max)
2811 * the primary goal is to maximize the number of stripes, so use as many
2812 * devices as possible, even if the stripes are not maximum sized.
2814 stripe_size = devices_info[ndevs-1].max_avail;
2815 num_stripes = ndevs * dev_stripes;
2817 if (stripe_size * num_stripes > max_chunk_size * ncopies) {
2818 stripe_size = max_chunk_size * ncopies;
2819 do_div(stripe_size, num_stripes);
2822 do_div(stripe_size, dev_stripes);
2823 do_div(stripe_size, BTRFS_STRIPE_LEN);
2824 stripe_size *= BTRFS_STRIPE_LEN;
2826 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2831 map->num_stripes = num_stripes;
2833 for (i = 0; i < ndevs; ++i) {
2834 for (j = 0; j < dev_stripes; ++j) {
2835 int s = i * dev_stripes + j;
2836 map->stripes[s].dev = devices_info[i].dev;
2837 map->stripes[s].physical = devices_info[i].dev_offset +
2841 map->sector_size = extent_root->sectorsize;
2842 map->stripe_len = BTRFS_STRIPE_LEN;
2843 map->io_align = BTRFS_STRIPE_LEN;
2844 map->io_width = BTRFS_STRIPE_LEN;
2846 map->sub_stripes = sub_stripes;
2849 num_bytes = stripe_size * (num_stripes / ncopies);
2851 *stripe_size_out = stripe_size;
2852 *num_bytes_out = num_bytes;
2854 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
2856 em = alloc_extent_map();
2861 em->bdev = (struct block_device *)map;
2863 em->len = num_bytes;
2864 em->block_start = 0;
2865 em->block_len = em->len;
2867 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2868 write_lock(&em_tree->lock);
2869 ret = add_extent_mapping(em_tree, em);
2870 write_unlock(&em_tree->lock);
2872 free_extent_map(em);
2874 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2875 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2879 for (i = 0; i < map->num_stripes; ++i) {
2880 struct btrfs_device *device;
2883 device = map->stripes[i].dev;
2884 dev_offset = map->stripes[i].physical;
2886 ret = btrfs_alloc_dev_extent(trans, device,
2887 info->chunk_root->root_key.objectid,
2888 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2889 start, dev_offset, stripe_size);
2893 kfree(devices_info);
2898 kfree(devices_info);
2902 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2903 struct btrfs_root *extent_root,
2904 struct map_lookup *map, u64 chunk_offset,
2905 u64 chunk_size, u64 stripe_size)
2908 struct btrfs_key key;
2909 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2910 struct btrfs_device *device;
2911 struct btrfs_chunk *chunk;
2912 struct btrfs_stripe *stripe;
2913 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2917 chunk = kzalloc(item_size, GFP_NOFS);
2922 while (index < map->num_stripes) {
2923 device = map->stripes[index].dev;
2924 device->bytes_used += stripe_size;
2925 ret = btrfs_update_device(trans, device);
2930 spin_lock(&extent_root->fs_info->free_chunk_lock);
2931 extent_root->fs_info->free_chunk_space -= (stripe_size *
2933 spin_unlock(&extent_root->fs_info->free_chunk_lock);
2936 stripe = &chunk->stripe;
2937 while (index < map->num_stripes) {
2938 device = map->stripes[index].dev;
2939 dev_offset = map->stripes[index].physical;
2941 btrfs_set_stack_stripe_devid(stripe, device->devid);
2942 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2943 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2948 btrfs_set_stack_chunk_length(chunk, chunk_size);
2949 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2950 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2951 btrfs_set_stack_chunk_type(chunk, map->type);
2952 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2953 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2954 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2955 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2956 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2958 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2959 key.type = BTRFS_CHUNK_ITEM_KEY;
2960 key.offset = chunk_offset;
2962 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2965 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2966 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2976 * Chunk allocation falls into two parts. The first part does works
2977 * that make the new allocated chunk useable, but not do any operation
2978 * that modifies the chunk tree. The second part does the works that
2979 * require modifying the chunk tree. This division is important for the
2980 * bootstrap process of adding storage to a seed btrfs.
2982 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2983 struct btrfs_root *extent_root, u64 type)
2988 struct map_lookup *map;
2989 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2992 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2997 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2998 &stripe_size, chunk_offset, type);
3002 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3003 chunk_size, stripe_size);
3008 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3009 struct btrfs_root *root,
3010 struct btrfs_device *device)
3013 u64 sys_chunk_offset;
3017 u64 sys_stripe_size;
3019 struct map_lookup *map;
3020 struct map_lookup *sys_map;
3021 struct btrfs_fs_info *fs_info = root->fs_info;
3022 struct btrfs_root *extent_root = fs_info->extent_root;
3025 ret = find_next_chunk(fs_info->chunk_root,
3026 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3030 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3031 fs_info->avail_metadata_alloc_bits;
3032 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3034 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3035 &stripe_size, chunk_offset, alloc_profile);
3038 sys_chunk_offset = chunk_offset + chunk_size;
3040 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3041 fs_info->avail_system_alloc_bits;
3042 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3044 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3045 &sys_chunk_size, &sys_stripe_size,
3046 sys_chunk_offset, alloc_profile);
3049 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3053 * Modifying chunk tree needs allocating new blocks from both
3054 * system block group and metadata block group. So we only can
3055 * do operations require modifying the chunk tree after both
3056 * block groups were created.
3058 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3059 chunk_size, stripe_size);
3062 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3063 sys_chunk_offset, sys_chunk_size,
3069 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3071 struct extent_map *em;
3072 struct map_lookup *map;
3073 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3077 read_lock(&map_tree->map_tree.lock);
3078 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3079 read_unlock(&map_tree->map_tree.lock);
3083 if (btrfs_test_opt(root, DEGRADED)) {
3084 free_extent_map(em);
3088 map = (struct map_lookup *)em->bdev;
3089 for (i = 0; i < map->num_stripes; i++) {
3090 if (!map->stripes[i].dev->writeable) {
3095 free_extent_map(em);
3099 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3101 extent_map_tree_init(&tree->map_tree);
3104 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3106 struct extent_map *em;
3109 write_lock(&tree->map_tree.lock);
3110 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3112 remove_extent_mapping(&tree->map_tree, em);
3113 write_unlock(&tree->map_tree.lock);
3118 free_extent_map(em);
3119 /* once for the tree */
3120 free_extent_map(em);
3124 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3126 struct extent_map *em;
3127 struct map_lookup *map;
3128 struct extent_map_tree *em_tree = &map_tree->map_tree;
3131 read_lock(&em_tree->lock);
3132 em = lookup_extent_mapping(em_tree, logical, len);
3133 read_unlock(&em_tree->lock);
3136 BUG_ON(em->start > logical || em->start + em->len < logical);
3137 map = (struct map_lookup *)em->bdev;
3138 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3139 ret = map->num_stripes;
3140 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3141 ret = map->sub_stripes;
3144 free_extent_map(em);
3148 static int find_live_mirror(struct map_lookup *map, int first, int num,
3152 if (map->stripes[optimal].dev->bdev)
3154 for (i = first; i < first + num; i++) {
3155 if (map->stripes[i].dev->bdev)
3158 /* we couldn't find one that doesn't fail. Just return something
3159 * and the io error handling code will clean up eventually
3164 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3165 u64 logical, u64 *length,
3166 struct btrfs_bio **bbio_ret,
3169 struct extent_map *em;
3170 struct map_lookup *map;
3171 struct extent_map_tree *em_tree = &map_tree->map_tree;
3174 u64 stripe_end_offset;
3178 int stripes_allocated = 8;
3179 int stripes_required = 1;
3184 struct btrfs_bio *bbio = NULL;
3186 if (bbio_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
3187 stripes_allocated = 1;
3190 bbio = kzalloc(btrfs_bio_size(stripes_allocated),
3195 atomic_set(&bbio->error, 0);
3198 read_lock(&em_tree->lock);
3199 em = lookup_extent_mapping(em_tree, logical, *length);
3200 read_unlock(&em_tree->lock);
3203 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3204 (unsigned long long)logical,
3205 (unsigned long long)*length);
3209 BUG_ON(em->start > logical || em->start + em->len < logical);
3210 map = (struct map_lookup *)em->bdev;
3211 offset = logical - em->start;
3213 if (mirror_num > map->num_stripes)
3216 /* if our btrfs_bio struct is too small, back off and try again */
3217 if (rw & REQ_WRITE) {
3218 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3219 BTRFS_BLOCK_GROUP_DUP)) {
3220 stripes_required = map->num_stripes;
3222 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3223 stripes_required = map->sub_stripes;
3227 if (rw & REQ_DISCARD) {
3228 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3229 stripes_required = map->num_stripes;
3231 if (bbio_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
3232 stripes_allocated < stripes_required) {
3233 stripes_allocated = map->num_stripes;
3234 free_extent_map(em);
3240 * stripe_nr counts the total number of stripes we have to stride
3241 * to get to this block
3243 do_div(stripe_nr, map->stripe_len);
3245 stripe_offset = stripe_nr * map->stripe_len;
3246 BUG_ON(offset < stripe_offset);
3248 /* stripe_offset is the offset of this block in its stripe*/
3249 stripe_offset = offset - stripe_offset;
3251 if (rw & REQ_DISCARD)
3252 *length = min_t(u64, em->len - offset, *length);
3253 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3254 /* we limit the length of each bio to what fits in a stripe */
3255 *length = min_t(u64, em->len - offset,
3256 map->stripe_len - stripe_offset);
3258 *length = em->len - offset;
3266 stripe_nr_orig = stripe_nr;
3267 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3268 (~(map->stripe_len - 1));
3269 do_div(stripe_nr_end, map->stripe_len);
3270 stripe_end_offset = stripe_nr_end * map->stripe_len -
3272 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3273 if (rw & REQ_DISCARD)
3274 num_stripes = min_t(u64, map->num_stripes,
3275 stripe_nr_end - stripe_nr_orig);
3276 stripe_index = do_div(stripe_nr, map->num_stripes);
3277 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3278 if (rw & (REQ_WRITE | REQ_DISCARD))
3279 num_stripes = map->num_stripes;
3280 else if (mirror_num)
3281 stripe_index = mirror_num - 1;
3283 stripe_index = find_live_mirror(map, 0,
3285 current->pid % map->num_stripes);
3286 mirror_num = stripe_index + 1;
3289 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3290 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3291 num_stripes = map->num_stripes;
3292 } else if (mirror_num) {
3293 stripe_index = mirror_num - 1;
3298 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3299 int factor = map->num_stripes / map->sub_stripes;
3301 stripe_index = do_div(stripe_nr, factor);
3302 stripe_index *= map->sub_stripes;
3305 num_stripes = map->sub_stripes;
3306 else if (rw & REQ_DISCARD)
3307 num_stripes = min_t(u64, map->sub_stripes *
3308 (stripe_nr_end - stripe_nr_orig),
3310 else if (mirror_num)
3311 stripe_index += mirror_num - 1;
3313 stripe_index = find_live_mirror(map, stripe_index,
3314 map->sub_stripes, stripe_index +
3315 current->pid % map->sub_stripes);
3316 mirror_num = stripe_index + 1;
3320 * after this do_div call, stripe_nr is the number of stripes
3321 * on this device we have to walk to find the data, and
3322 * stripe_index is the number of our device in the stripe array
3324 stripe_index = do_div(stripe_nr, map->num_stripes);
3325 mirror_num = stripe_index + 1;
3327 BUG_ON(stripe_index >= map->num_stripes);
3329 if (rw & REQ_DISCARD) {
3330 for (i = 0; i < num_stripes; i++) {
3331 bbio->stripes[i].physical =
3332 map->stripes[stripe_index].physical +
3333 stripe_offset + stripe_nr * map->stripe_len;
3334 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3336 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3338 u32 last_stripe = 0;
3341 div_u64_rem(stripe_nr_end - 1,
3345 for (j = 0; j < map->num_stripes; j++) {
3348 div_u64_rem(stripe_nr_end - 1 - j,
3349 map->num_stripes, &test);
3350 if (test == stripe_index)
3353 stripes = stripe_nr_end - 1 - j;
3354 do_div(stripes, map->num_stripes);
3355 bbio->stripes[i].length = map->stripe_len *
3356 (stripes - stripe_nr + 1);
3359 bbio->stripes[i].length -=
3363 if (stripe_index == last_stripe)
3364 bbio->stripes[i].length -=
3366 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3369 int factor = map->num_stripes /
3371 u32 last_stripe = 0;
3373 div_u64_rem(stripe_nr_end - 1,
3374 factor, &last_stripe);
3375 last_stripe *= map->sub_stripes;
3377 for (j = 0; j < factor; j++) {
3380 div_u64_rem(stripe_nr_end - 1 - j,
3384 stripe_index / map->sub_stripes)
3387 stripes = stripe_nr_end - 1 - j;
3388 do_div(stripes, factor);
3389 bbio->stripes[i].length = map->stripe_len *
3390 (stripes - stripe_nr + 1);
3392 if (i < map->sub_stripes) {
3393 bbio->stripes[i].length -=
3395 if (i == map->sub_stripes - 1)
3398 if (stripe_index >= last_stripe &&
3399 stripe_index <= (last_stripe +
3400 map->sub_stripes - 1)) {
3401 bbio->stripes[i].length -=
3405 bbio->stripes[i].length = *length;
3408 if (stripe_index == map->num_stripes) {
3409 /* This could only happen for RAID0/10 */
3415 for (i = 0; i < num_stripes; i++) {
3416 bbio->stripes[i].physical =
3417 map->stripes[stripe_index].physical +
3419 stripe_nr * map->stripe_len;
3420 bbio->stripes[i].dev =
3421 map->stripes[stripe_index].dev;
3427 bbio->num_stripes = num_stripes;
3428 bbio->max_errors = max_errors;
3429 bbio->mirror_num = mirror_num;
3432 free_extent_map(em);
3436 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3437 u64 logical, u64 *length,
3438 struct btrfs_bio **bbio_ret, int mirror_num)
3440 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3444 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3445 u64 chunk_start, u64 physical, u64 devid,
3446 u64 **logical, int *naddrs, int *stripe_len)
3448 struct extent_map_tree *em_tree = &map_tree->map_tree;
3449 struct extent_map *em;
3450 struct map_lookup *map;
3457 read_lock(&em_tree->lock);
3458 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3459 read_unlock(&em_tree->lock);
3461 BUG_ON(!em || em->start != chunk_start);
3462 map = (struct map_lookup *)em->bdev;
3465 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3466 do_div(length, map->num_stripes / map->sub_stripes);
3467 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3468 do_div(length, map->num_stripes);
3470 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3473 for (i = 0; i < map->num_stripes; i++) {
3474 if (devid && map->stripes[i].dev->devid != devid)
3476 if (map->stripes[i].physical > physical ||
3477 map->stripes[i].physical + length <= physical)
3480 stripe_nr = physical - map->stripes[i].physical;
3481 do_div(stripe_nr, map->stripe_len);
3483 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3484 stripe_nr = stripe_nr * map->num_stripes + i;
3485 do_div(stripe_nr, map->sub_stripes);
3486 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3487 stripe_nr = stripe_nr * map->num_stripes + i;
3489 bytenr = chunk_start + stripe_nr * map->stripe_len;
3490 WARN_ON(nr >= map->num_stripes);
3491 for (j = 0; j < nr; j++) {
3492 if (buf[j] == bytenr)
3496 WARN_ON(nr >= map->num_stripes);
3503 *stripe_len = map->stripe_len;
3505 free_extent_map(em);
3509 static void btrfs_end_bio(struct bio *bio, int err)
3511 struct btrfs_bio *bbio = bio->bi_private;
3512 int is_orig_bio = 0;
3515 atomic_inc(&bbio->error);
3517 if (bio == bbio->orig_bio)
3520 if (atomic_dec_and_test(&bbio->stripes_pending)) {
3523 bio = bbio->orig_bio;
3525 bio->bi_private = bbio->private;
3526 bio->bi_end_io = bbio->end_io;
3527 bio->bi_bdev = (struct block_device *)
3528 (unsigned long)bbio->mirror_num;
3529 /* only send an error to the higher layers if it is
3530 * beyond the tolerance of the multi-bio
3532 if (atomic_read(&bbio->error) > bbio->max_errors) {
3536 * this bio is actually up to date, we didn't
3537 * go over the max number of errors
3539 set_bit(BIO_UPTODATE, &bio->bi_flags);
3544 bio_endio(bio, err);
3545 } else if (!is_orig_bio) {
3550 struct async_sched {
3553 struct btrfs_fs_info *info;
3554 struct btrfs_work work;
3558 * see run_scheduled_bios for a description of why bios are collected for
3561 * This will add one bio to the pending list for a device and make sure
3562 * the work struct is scheduled.
3564 static noinline int schedule_bio(struct btrfs_root *root,
3565 struct btrfs_device *device,
3566 int rw, struct bio *bio)
3568 int should_queue = 1;
3569 struct btrfs_pending_bios *pending_bios;
3571 /* don't bother with additional async steps for reads, right now */
3572 if (!(rw & REQ_WRITE)) {
3574 submit_bio(rw, bio);
3580 * nr_async_bios allows us to reliably return congestion to the
3581 * higher layers. Otherwise, the async bio makes it appear we have
3582 * made progress against dirty pages when we've really just put it
3583 * on a queue for later
3585 atomic_inc(&root->fs_info->nr_async_bios);
3586 WARN_ON(bio->bi_next);
3587 bio->bi_next = NULL;
3590 spin_lock(&device->io_lock);
3591 if (bio->bi_rw & REQ_SYNC)
3592 pending_bios = &device->pending_sync_bios;
3594 pending_bios = &device->pending_bios;
3596 if (pending_bios->tail)
3597 pending_bios->tail->bi_next = bio;
3599 pending_bios->tail = bio;
3600 if (!pending_bios->head)
3601 pending_bios->head = bio;
3602 if (device->running_pending)
3605 spin_unlock(&device->io_lock);
3608 btrfs_queue_worker(&root->fs_info->submit_workers,
3613 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
3614 int mirror_num, int async_submit)
3616 struct btrfs_mapping_tree *map_tree;
3617 struct btrfs_device *dev;
3618 struct bio *first_bio = bio;
3619 u64 logical = (u64)bio->bi_sector << 9;
3625 struct btrfs_bio *bbio = NULL;
3627 length = bio->bi_size;
3628 map_tree = &root->fs_info->mapping_tree;
3629 map_length = length;
3631 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
3635 total_devs = bbio->num_stripes;
3636 if (map_length < length) {
3637 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
3638 "len %llu\n", (unsigned long long)logical,
3639 (unsigned long long)length,
3640 (unsigned long long)map_length);
3644 bbio->orig_bio = first_bio;
3645 bbio->private = first_bio->bi_private;
3646 bbio->end_io = first_bio->bi_end_io;
3647 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
3649 while (dev_nr < total_devs) {
3650 if (dev_nr < total_devs - 1) {
3651 bio = bio_clone(first_bio, GFP_NOFS);
3656 bio->bi_private = bbio;
3657 bio->bi_end_io = btrfs_end_bio;
3658 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
3659 dev = bbio->stripes[dev_nr].dev;
3660 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
3661 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
3662 "(%s id %llu), size=%u\n", rw,
3663 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
3664 dev->name, dev->devid, bio->bi_size);
3665 bio->bi_bdev = dev->bdev;
3667 schedule_bio(root, dev, rw, bio);
3669 submit_bio(rw, bio);
3671 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
3672 bio->bi_sector = logical >> 9;
3673 bio_endio(bio, -EIO);
3680 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
3683 struct btrfs_device *device;
3684 struct btrfs_fs_devices *cur_devices;
3686 cur_devices = root->fs_info->fs_devices;
3687 while (cur_devices) {
3689 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3690 device = __find_device(&cur_devices->devices,
3695 cur_devices = cur_devices->seed;
3700 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3701 u64 devid, u8 *dev_uuid)
3703 struct btrfs_device *device;
3704 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
3706 device = kzalloc(sizeof(*device), GFP_NOFS);
3709 list_add(&device->dev_list,
3710 &fs_devices->devices);
3711 device->dev_root = root->fs_info->dev_root;
3712 device->devid = devid;
3713 device->work.func = pending_bios_fn;
3714 device->fs_devices = fs_devices;
3715 device->missing = 1;
3716 fs_devices->num_devices++;
3717 fs_devices->missing_devices++;
3718 spin_lock_init(&device->io_lock);
3719 INIT_LIST_HEAD(&device->dev_alloc_list);
3720 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
3724 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3725 struct extent_buffer *leaf,
3726 struct btrfs_chunk *chunk)
3728 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3729 struct map_lookup *map;
3730 struct extent_map *em;
3734 u8 uuid[BTRFS_UUID_SIZE];
3739 logical = key->offset;
3740 length = btrfs_chunk_length(leaf, chunk);
3742 read_lock(&map_tree->map_tree.lock);
3743 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3744 read_unlock(&map_tree->map_tree.lock);
3746 /* already mapped? */
3747 if (em && em->start <= logical && em->start + em->len > logical) {
3748 free_extent_map(em);
3751 free_extent_map(em);
3754 em = alloc_extent_map();
3757 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3758 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3760 free_extent_map(em);
3764 em->bdev = (struct block_device *)map;
3765 em->start = logical;
3767 em->block_start = 0;
3768 em->block_len = em->len;
3770 map->num_stripes = num_stripes;
3771 map->io_width = btrfs_chunk_io_width(leaf, chunk);
3772 map->io_align = btrfs_chunk_io_align(leaf, chunk);
3773 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
3774 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3775 map->type = btrfs_chunk_type(leaf, chunk);
3776 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
3777 for (i = 0; i < num_stripes; i++) {
3778 map->stripes[i].physical =
3779 btrfs_stripe_offset_nr(leaf, chunk, i);
3780 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
3781 read_extent_buffer(leaf, uuid, (unsigned long)
3782 btrfs_stripe_dev_uuid_nr(chunk, i),
3784 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
3786 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
3788 free_extent_map(em);
3791 if (!map->stripes[i].dev) {
3792 map->stripes[i].dev =
3793 add_missing_dev(root, devid, uuid);
3794 if (!map->stripes[i].dev) {
3796 free_extent_map(em);
3800 map->stripes[i].dev->in_fs_metadata = 1;
3803 write_lock(&map_tree->map_tree.lock);
3804 ret = add_extent_mapping(&map_tree->map_tree, em);
3805 write_unlock(&map_tree->map_tree.lock);
3807 free_extent_map(em);
3812 static int fill_device_from_item(struct extent_buffer *leaf,
3813 struct btrfs_dev_item *dev_item,
3814 struct btrfs_device *device)
3818 device->devid = btrfs_device_id(leaf, dev_item);
3819 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
3820 device->total_bytes = device->disk_total_bytes;
3821 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
3822 device->type = btrfs_device_type(leaf, dev_item);
3823 device->io_align = btrfs_device_io_align(leaf, dev_item);
3824 device->io_width = btrfs_device_io_width(leaf, dev_item);
3825 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3827 ptr = (unsigned long)btrfs_device_uuid(dev_item);
3828 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3833 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3835 struct btrfs_fs_devices *fs_devices;
3838 mutex_lock(&uuid_mutex);
3840 fs_devices = root->fs_info->fs_devices->seed;
3841 while (fs_devices) {
3842 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3846 fs_devices = fs_devices->seed;
3849 fs_devices = find_fsid(fsid);
3855 fs_devices = clone_fs_devices(fs_devices);
3856 if (IS_ERR(fs_devices)) {
3857 ret = PTR_ERR(fs_devices);
3861 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3862 root->fs_info->bdev_holder);
3866 if (!fs_devices->seeding) {
3867 __btrfs_close_devices(fs_devices);
3868 free_fs_devices(fs_devices);
3873 fs_devices->seed = root->fs_info->fs_devices->seed;
3874 root->fs_info->fs_devices->seed = fs_devices;
3876 mutex_unlock(&uuid_mutex);
3880 static int read_one_dev(struct btrfs_root *root,
3881 struct extent_buffer *leaf,
3882 struct btrfs_dev_item *dev_item)
3884 struct btrfs_device *device;
3887 u8 fs_uuid[BTRFS_UUID_SIZE];
3888 u8 dev_uuid[BTRFS_UUID_SIZE];
3890 devid = btrfs_device_id(leaf, dev_item);
3891 read_extent_buffer(leaf, dev_uuid,
3892 (unsigned long)btrfs_device_uuid(dev_item),
3894 read_extent_buffer(leaf, fs_uuid,
3895 (unsigned long)btrfs_device_fsid(dev_item),
3898 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3899 ret = open_seed_devices(root, fs_uuid);
3900 if (ret && !btrfs_test_opt(root, DEGRADED))
3904 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3905 if (!device || !device->bdev) {
3906 if (!btrfs_test_opt(root, DEGRADED))
3910 printk(KERN_WARNING "warning devid %llu missing\n",
3911 (unsigned long long)devid);
3912 device = add_missing_dev(root, devid, dev_uuid);
3915 } else if (!device->missing) {
3917 * this happens when a device that was properly setup
3918 * in the device info lists suddenly goes bad.
3919 * device->bdev is NULL, and so we have to set
3920 * device->missing to one here
3922 root->fs_info->fs_devices->missing_devices++;
3923 device->missing = 1;
3927 if (device->fs_devices != root->fs_info->fs_devices) {
3928 BUG_ON(device->writeable);
3929 if (device->generation !=
3930 btrfs_device_generation(leaf, dev_item))
3934 fill_device_from_item(leaf, dev_item, device);
3935 device->dev_root = root->fs_info->dev_root;
3936 device->in_fs_metadata = 1;
3937 if (device->writeable) {
3938 device->fs_devices->total_rw_bytes += device->total_bytes;
3939 spin_lock(&root->fs_info->free_chunk_lock);
3940 root->fs_info->free_chunk_space += device->total_bytes -
3942 spin_unlock(&root->fs_info->free_chunk_lock);
3948 int btrfs_read_sys_array(struct btrfs_root *root)
3950 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3951 struct extent_buffer *sb;
3952 struct btrfs_disk_key *disk_key;
3953 struct btrfs_chunk *chunk;
3955 unsigned long sb_ptr;
3961 struct btrfs_key key;
3963 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3964 BTRFS_SUPER_INFO_SIZE);
3967 btrfs_set_buffer_uptodate(sb);
3968 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
3970 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3971 array_size = btrfs_super_sys_array_size(super_copy);
3973 ptr = super_copy->sys_chunk_array;
3974 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3977 while (cur < array_size) {
3978 disk_key = (struct btrfs_disk_key *)ptr;
3979 btrfs_disk_key_to_cpu(&key, disk_key);
3981 len = sizeof(*disk_key); ptr += len;
3985 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3986 chunk = (struct btrfs_chunk *)sb_ptr;
3987 ret = read_one_chunk(root, &key, sb, chunk);
3990 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3991 len = btrfs_chunk_item_size(num_stripes);
4000 free_extent_buffer(sb);
4004 int btrfs_read_chunk_tree(struct btrfs_root *root)
4006 struct btrfs_path *path;
4007 struct extent_buffer *leaf;
4008 struct btrfs_key key;
4009 struct btrfs_key found_key;
4013 root = root->fs_info->chunk_root;
4015 path = btrfs_alloc_path();
4019 /* first we search for all of the device items, and then we
4020 * read in all of the chunk items. This way we can create chunk
4021 * mappings that reference all of the devices that are afound
4023 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4027 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4031 leaf = path->nodes[0];
4032 slot = path->slots[0];
4033 if (slot >= btrfs_header_nritems(leaf)) {
4034 ret = btrfs_next_leaf(root, path);
4041 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4042 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4043 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4045 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4046 struct btrfs_dev_item *dev_item;
4047 dev_item = btrfs_item_ptr(leaf, slot,
4048 struct btrfs_dev_item);
4049 ret = read_one_dev(root, leaf, dev_item);
4053 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4054 struct btrfs_chunk *chunk;
4055 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4056 ret = read_one_chunk(root, &found_key, leaf, chunk);
4062 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4064 btrfs_release_path(path);
4069 btrfs_free_path(path);