2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <asm/div64.h>
29 #include "extent_map.h"
31 #include "transaction.h"
32 #include "print-tree.h"
34 #include "async-thread.h"
36 static int init_first_rw_device(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct btrfs_device *device);
39 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
41 static DEFINE_MUTEX(uuid_mutex);
42 static LIST_HEAD(fs_uuids);
44 static void lock_chunks(struct btrfs_root *root)
46 mutex_lock(&root->fs_info->chunk_mutex);
49 static void unlock_chunks(struct btrfs_root *root)
51 mutex_unlock(&root->fs_info->chunk_mutex);
54 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
56 struct btrfs_device *device;
57 WARN_ON(fs_devices->opened);
58 while (!list_empty(&fs_devices->devices)) {
59 device = list_entry(fs_devices->devices.next,
60 struct btrfs_device, dev_list);
61 list_del(&device->dev_list);
68 int btrfs_cleanup_fs_uuids(void)
70 struct btrfs_fs_devices *fs_devices;
72 while (!list_empty(&fs_uuids)) {
73 fs_devices = list_entry(fs_uuids.next,
74 struct btrfs_fs_devices, list);
75 list_del(&fs_devices->list);
76 free_fs_devices(fs_devices);
81 static noinline struct btrfs_device *__find_device(struct list_head *head,
84 struct btrfs_device *dev;
86 list_for_each_entry(dev, head, dev_list) {
87 if (dev->devid == devid &&
88 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
95 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
97 struct btrfs_fs_devices *fs_devices;
99 list_for_each_entry(fs_devices, &fs_uuids, list) {
100 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
106 static void requeue_list(struct btrfs_pending_bios *pending_bios,
107 struct bio *head, struct bio *tail)
110 struct bio *old_head;
112 old_head = pending_bios->head;
113 pending_bios->head = head;
114 if (pending_bios->tail)
115 tail->bi_next = old_head;
117 pending_bios->tail = tail;
121 * we try to collect pending bios for a device so we don't get a large
122 * number of procs sending bios down to the same device. This greatly
123 * improves the schedulers ability to collect and merge the bios.
125 * But, it also turns into a long list of bios to process and that is sure
126 * to eventually make the worker thread block. The solution here is to
127 * make some progress and then put this work struct back at the end of
128 * the list if the block device is congested. This way, multiple devices
129 * can make progress from a single worker thread.
131 static noinline int run_scheduled_bios(struct btrfs_device *device)
134 struct backing_dev_info *bdi;
135 struct btrfs_fs_info *fs_info;
136 struct btrfs_pending_bios *pending_bios;
140 unsigned long num_run;
141 unsigned long batch_run = 0;
143 unsigned long last_waited = 0;
145 int sync_pending = 0;
146 struct blk_plug plug;
149 * this function runs all the bios we've collected for
150 * a particular device. We don't want to wander off to
151 * another device without first sending all of these down.
152 * So, setup a plug here and finish it off before we return
154 blk_start_plug(&plug);
156 bdi = blk_get_backing_dev_info(device->bdev);
157 fs_info = device->dev_root->fs_info;
158 limit = btrfs_async_submit_limit(fs_info);
159 limit = limit * 2 / 3;
162 spin_lock(&device->io_lock);
167 /* take all the bios off the list at once and process them
168 * later on (without the lock held). But, remember the
169 * tail and other pointers so the bios can be properly reinserted
170 * into the list if we hit congestion
172 if (!force_reg && device->pending_sync_bios.head) {
173 pending_bios = &device->pending_sync_bios;
176 pending_bios = &device->pending_bios;
180 pending = pending_bios->head;
181 tail = pending_bios->tail;
182 WARN_ON(pending && !tail);
185 * if pending was null this time around, no bios need processing
186 * at all and we can stop. Otherwise it'll loop back up again
187 * and do an additional check so no bios are missed.
189 * device->running_pending is used to synchronize with the
192 if (device->pending_sync_bios.head == NULL &&
193 device->pending_bios.head == NULL) {
195 device->running_pending = 0;
198 device->running_pending = 1;
201 pending_bios->head = NULL;
202 pending_bios->tail = NULL;
204 spin_unlock(&device->io_lock);
209 /* we want to work on both lists, but do more bios on the
210 * sync list than the regular list
213 pending_bios != &device->pending_sync_bios &&
214 device->pending_sync_bios.head) ||
215 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
216 device->pending_bios.head)) {
217 spin_lock(&device->io_lock);
218 requeue_list(pending_bios, pending, tail);
223 pending = pending->bi_next;
225 atomic_dec(&fs_info->nr_async_bios);
227 if (atomic_read(&fs_info->nr_async_bios) < limit &&
228 waitqueue_active(&fs_info->async_submit_wait))
229 wake_up(&fs_info->async_submit_wait);
231 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
234 * if we're doing the sync list, record that our
235 * plug has some sync requests on it
237 * If we're doing the regular list and there are
238 * sync requests sitting around, unplug before
241 if (pending_bios == &device->pending_sync_bios) {
243 } else if (sync_pending) {
244 blk_finish_plug(&plug);
245 blk_start_plug(&plug);
249 submit_bio(cur->bi_rw, cur);
256 * we made progress, there is more work to do and the bdi
257 * is now congested. Back off and let other work structs
260 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
261 fs_info->fs_devices->open_devices > 1) {
262 struct io_context *ioc;
264 ioc = current->io_context;
267 * the main goal here is that we don't want to
268 * block if we're going to be able to submit
269 * more requests without blocking.
271 * This code does two great things, it pokes into
272 * the elevator code from a filesystem _and_
273 * it makes assumptions about how batching works.
275 if (ioc && ioc->nr_batch_requests > 0 &&
276 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
278 ioc->last_waited == last_waited)) {
280 * we want to go through our batch of
281 * requests and stop. So, we copy out
282 * the ioc->last_waited time and test
283 * against it before looping
285 last_waited = ioc->last_waited;
290 spin_lock(&device->io_lock);
291 requeue_list(pending_bios, pending, tail);
292 device->running_pending = 1;
294 spin_unlock(&device->io_lock);
295 btrfs_requeue_work(&device->work);
298 /* unplug every 64 requests just for good measure */
299 if (batch_run % 64 == 0) {
300 blk_finish_plug(&plug);
301 blk_start_plug(&plug);
310 spin_lock(&device->io_lock);
311 if (device->pending_bios.head || device->pending_sync_bios.head)
313 spin_unlock(&device->io_lock);
316 blk_finish_plug(&plug);
320 static void pending_bios_fn(struct btrfs_work *work)
322 struct btrfs_device *device;
324 device = container_of(work, struct btrfs_device, work);
325 run_scheduled_bios(device);
328 static noinline int device_list_add(const char *path,
329 struct btrfs_super_block *disk_super,
330 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
332 struct btrfs_device *device;
333 struct btrfs_fs_devices *fs_devices;
334 u64 found_transid = btrfs_super_generation(disk_super);
337 fs_devices = find_fsid(disk_super->fsid);
339 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
342 INIT_LIST_HEAD(&fs_devices->devices);
343 INIT_LIST_HEAD(&fs_devices->alloc_list);
344 list_add(&fs_devices->list, &fs_uuids);
345 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
346 fs_devices->latest_devid = devid;
347 fs_devices->latest_trans = found_transid;
348 mutex_init(&fs_devices->device_list_mutex);
351 device = __find_device(&fs_devices->devices, devid,
352 disk_super->dev_item.uuid);
355 if (fs_devices->opened)
358 device = kzalloc(sizeof(*device), GFP_NOFS);
360 /* we can safely leave the fs_devices entry around */
363 device->devid = devid;
364 device->work.func = pending_bios_fn;
365 memcpy(device->uuid, disk_super->dev_item.uuid,
367 spin_lock_init(&device->io_lock);
368 device->name = kstrdup(path, GFP_NOFS);
373 INIT_LIST_HEAD(&device->dev_alloc_list);
375 /* init readahead state */
376 spin_lock_init(&device->reada_lock);
377 device->reada_curr_zone = NULL;
378 atomic_set(&device->reada_in_flight, 0);
379 device->reada_next = 0;
380 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
381 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
383 mutex_lock(&fs_devices->device_list_mutex);
384 list_add_rcu(&device->dev_list, &fs_devices->devices);
385 mutex_unlock(&fs_devices->device_list_mutex);
387 device->fs_devices = fs_devices;
388 fs_devices->num_devices++;
389 } else if (!device->name || strcmp(device->name, path)) {
390 name = kstrdup(path, GFP_NOFS);
395 if (device->missing) {
396 fs_devices->missing_devices--;
401 if (found_transid > fs_devices->latest_trans) {
402 fs_devices->latest_devid = devid;
403 fs_devices->latest_trans = found_transid;
405 *fs_devices_ret = fs_devices;
409 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
411 struct btrfs_fs_devices *fs_devices;
412 struct btrfs_device *device;
413 struct btrfs_device *orig_dev;
415 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
417 return ERR_PTR(-ENOMEM);
419 INIT_LIST_HEAD(&fs_devices->devices);
420 INIT_LIST_HEAD(&fs_devices->alloc_list);
421 INIT_LIST_HEAD(&fs_devices->list);
422 mutex_init(&fs_devices->device_list_mutex);
423 fs_devices->latest_devid = orig->latest_devid;
424 fs_devices->latest_trans = orig->latest_trans;
425 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
427 /* We have held the volume lock, it is safe to get the devices. */
428 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
429 device = kzalloc(sizeof(*device), GFP_NOFS);
433 device->name = kstrdup(orig_dev->name, GFP_NOFS);
439 device->devid = orig_dev->devid;
440 device->work.func = pending_bios_fn;
441 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
442 spin_lock_init(&device->io_lock);
443 INIT_LIST_HEAD(&device->dev_list);
444 INIT_LIST_HEAD(&device->dev_alloc_list);
446 list_add(&device->dev_list, &fs_devices->devices);
447 device->fs_devices = fs_devices;
448 fs_devices->num_devices++;
452 free_fs_devices(fs_devices);
453 return ERR_PTR(-ENOMEM);
456 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
458 struct btrfs_device *device, *next;
460 mutex_lock(&uuid_mutex);
462 /* This is the initialized path, it is safe to release the devices. */
463 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
464 if (device->in_fs_metadata)
468 blkdev_put(device->bdev, device->mode);
470 fs_devices->open_devices--;
472 if (device->writeable) {
473 list_del_init(&device->dev_alloc_list);
474 device->writeable = 0;
475 fs_devices->rw_devices--;
477 list_del_init(&device->dev_list);
478 fs_devices->num_devices--;
483 if (fs_devices->seed) {
484 fs_devices = fs_devices->seed;
488 mutex_unlock(&uuid_mutex);
492 static void __free_device(struct work_struct *work)
494 struct btrfs_device *device;
496 device = container_of(work, struct btrfs_device, rcu_work);
499 blkdev_put(device->bdev, device->mode);
505 static void free_device(struct rcu_head *head)
507 struct btrfs_device *device;
509 device = container_of(head, struct btrfs_device, rcu);
511 INIT_WORK(&device->rcu_work, __free_device);
512 schedule_work(&device->rcu_work);
515 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
517 struct btrfs_device *device;
519 if (--fs_devices->opened > 0)
522 mutex_lock(&fs_devices->device_list_mutex);
523 list_for_each_entry(device, &fs_devices->devices, dev_list) {
524 struct btrfs_device *new_device;
527 fs_devices->open_devices--;
529 if (device->writeable) {
530 list_del_init(&device->dev_alloc_list);
531 fs_devices->rw_devices--;
534 if (device->can_discard)
535 fs_devices->num_can_discard--;
537 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
539 memcpy(new_device, device, sizeof(*new_device));
540 new_device->name = kstrdup(device->name, GFP_NOFS);
541 BUG_ON(device->name && !new_device->name);
542 new_device->bdev = NULL;
543 new_device->writeable = 0;
544 new_device->in_fs_metadata = 0;
545 new_device->can_discard = 0;
546 list_replace_rcu(&device->dev_list, &new_device->dev_list);
548 call_rcu(&device->rcu, free_device);
550 mutex_unlock(&fs_devices->device_list_mutex);
552 WARN_ON(fs_devices->open_devices);
553 WARN_ON(fs_devices->rw_devices);
554 fs_devices->opened = 0;
555 fs_devices->seeding = 0;
560 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
562 struct btrfs_fs_devices *seed_devices = NULL;
565 mutex_lock(&uuid_mutex);
566 ret = __btrfs_close_devices(fs_devices);
567 if (!fs_devices->opened) {
568 seed_devices = fs_devices->seed;
569 fs_devices->seed = NULL;
571 mutex_unlock(&uuid_mutex);
573 while (seed_devices) {
574 fs_devices = seed_devices;
575 seed_devices = fs_devices->seed;
576 __btrfs_close_devices(fs_devices);
577 free_fs_devices(fs_devices);
582 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
583 fmode_t flags, void *holder)
585 struct request_queue *q;
586 struct block_device *bdev;
587 struct list_head *head = &fs_devices->devices;
588 struct btrfs_device *device;
589 struct block_device *latest_bdev = NULL;
590 struct buffer_head *bh;
591 struct btrfs_super_block *disk_super;
592 u64 latest_devid = 0;
593 u64 latest_transid = 0;
600 list_for_each_entry(device, head, dev_list) {
606 bdev = blkdev_get_by_path(device->name, flags, holder);
608 printk(KERN_INFO "open %s failed\n", device->name);
611 set_blocksize(bdev, 4096);
613 bh = btrfs_read_dev_super(bdev);
617 disk_super = (struct btrfs_super_block *)bh->b_data;
618 devid = btrfs_stack_device_id(&disk_super->dev_item);
619 if (devid != device->devid)
622 if (memcmp(device->uuid, disk_super->dev_item.uuid,
626 device->generation = btrfs_super_generation(disk_super);
627 if (!latest_transid || device->generation > latest_transid) {
628 latest_devid = devid;
629 latest_transid = device->generation;
633 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
634 device->writeable = 0;
636 device->writeable = !bdev_read_only(bdev);
640 q = bdev_get_queue(bdev);
641 if (blk_queue_discard(q)) {
642 device->can_discard = 1;
643 fs_devices->num_can_discard++;
647 device->in_fs_metadata = 0;
648 device->mode = flags;
650 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
651 fs_devices->rotating = 1;
653 fs_devices->open_devices++;
654 if (device->writeable) {
655 fs_devices->rw_devices++;
656 list_add(&device->dev_alloc_list,
657 &fs_devices->alloc_list);
665 blkdev_put(bdev, flags);
669 if (fs_devices->open_devices == 0) {
673 fs_devices->seeding = seeding;
674 fs_devices->opened = 1;
675 fs_devices->latest_bdev = latest_bdev;
676 fs_devices->latest_devid = latest_devid;
677 fs_devices->latest_trans = latest_transid;
678 fs_devices->total_rw_bytes = 0;
683 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
684 fmode_t flags, void *holder)
688 mutex_lock(&uuid_mutex);
689 if (fs_devices->opened) {
690 fs_devices->opened++;
693 ret = __btrfs_open_devices(fs_devices, flags, holder);
695 mutex_unlock(&uuid_mutex);
699 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
700 struct btrfs_fs_devices **fs_devices_ret)
702 struct btrfs_super_block *disk_super;
703 struct block_device *bdev;
704 struct buffer_head *bh;
709 mutex_lock(&uuid_mutex);
712 bdev = blkdev_get_by_path(path, flags, holder);
719 ret = set_blocksize(bdev, 4096);
722 bh = btrfs_read_dev_super(bdev);
727 disk_super = (struct btrfs_super_block *)bh->b_data;
728 devid = btrfs_stack_device_id(&disk_super->dev_item);
729 transid = btrfs_super_generation(disk_super);
730 if (disk_super->label[0])
731 printk(KERN_INFO "device label %s ", disk_super->label);
733 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
734 printk(KERN_CONT "devid %llu transid %llu %s\n",
735 (unsigned long long)devid, (unsigned long long)transid, path);
736 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
740 blkdev_put(bdev, flags);
742 mutex_unlock(&uuid_mutex);
746 /* helper to account the used device space in the range */
747 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
748 u64 end, u64 *length)
750 struct btrfs_key key;
751 struct btrfs_root *root = device->dev_root;
752 struct btrfs_dev_extent *dev_extent;
753 struct btrfs_path *path;
757 struct extent_buffer *l;
761 if (start >= device->total_bytes)
764 path = btrfs_alloc_path();
769 key.objectid = device->devid;
771 key.type = BTRFS_DEV_EXTENT_KEY;
773 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
777 ret = btrfs_previous_item(root, path, key.objectid, key.type);
784 slot = path->slots[0];
785 if (slot >= btrfs_header_nritems(l)) {
786 ret = btrfs_next_leaf(root, path);
794 btrfs_item_key_to_cpu(l, &key, slot);
796 if (key.objectid < device->devid)
799 if (key.objectid > device->devid)
802 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
805 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
806 extent_end = key.offset + btrfs_dev_extent_length(l,
808 if (key.offset <= start && extent_end > end) {
809 *length = end - start + 1;
811 } else if (key.offset <= start && extent_end > start)
812 *length += extent_end - start;
813 else if (key.offset > start && extent_end <= end)
814 *length += extent_end - key.offset;
815 else if (key.offset > start && key.offset <= end) {
816 *length += end - key.offset + 1;
818 } else if (key.offset > end)
826 btrfs_free_path(path);
831 * find_free_dev_extent - find free space in the specified device
832 * @trans: transaction handler
833 * @device: the device which we search the free space in
834 * @num_bytes: the size of the free space that we need
835 * @start: store the start of the free space.
836 * @len: the size of the free space. that we find, or the size of the max
837 * free space if we don't find suitable free space
839 * this uses a pretty simple search, the expectation is that it is
840 * called very infrequently and that a given device has a small number
843 * @start is used to store the start of the free space if we find. But if we
844 * don't find suitable free space, it will be used to store the start position
845 * of the max free space.
847 * @len is used to store the size of the free space that we find.
848 * But if we don't find suitable free space, it is used to store the size of
849 * the max free space.
851 int find_free_dev_extent(struct btrfs_trans_handle *trans,
852 struct btrfs_device *device, u64 num_bytes,
853 u64 *start, u64 *len)
855 struct btrfs_key key;
856 struct btrfs_root *root = device->dev_root;
857 struct btrfs_dev_extent *dev_extent;
858 struct btrfs_path *path;
864 u64 search_end = device->total_bytes;
867 struct extent_buffer *l;
869 /* FIXME use last free of some kind */
871 /* we don't want to overwrite the superblock on the drive,
872 * so we make sure to start at an offset of at least 1MB
874 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
876 max_hole_start = search_start;
880 if (search_start >= search_end) {
885 path = btrfs_alloc_path();
892 key.objectid = device->devid;
893 key.offset = search_start;
894 key.type = BTRFS_DEV_EXTENT_KEY;
896 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
900 ret = btrfs_previous_item(root, path, key.objectid, key.type);
907 slot = path->slots[0];
908 if (slot >= btrfs_header_nritems(l)) {
909 ret = btrfs_next_leaf(root, path);
917 btrfs_item_key_to_cpu(l, &key, slot);
919 if (key.objectid < device->devid)
922 if (key.objectid > device->devid)
925 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
928 if (key.offset > search_start) {
929 hole_size = key.offset - search_start;
931 if (hole_size > max_hole_size) {
932 max_hole_start = search_start;
933 max_hole_size = hole_size;
937 * If this free space is greater than which we need,
938 * it must be the max free space that we have found
939 * until now, so max_hole_start must point to the start
940 * of this free space and the length of this free space
941 * is stored in max_hole_size. Thus, we return
942 * max_hole_start and max_hole_size and go back to the
945 if (hole_size >= num_bytes) {
951 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
952 extent_end = key.offset + btrfs_dev_extent_length(l,
954 if (extent_end > search_start)
955 search_start = extent_end;
962 * At this point, search_start should be the end of
963 * allocated dev extents, and when shrinking the device,
964 * search_end may be smaller than search_start.
966 if (search_end > search_start)
967 hole_size = search_end - search_start;
969 if (hole_size > max_hole_size) {
970 max_hole_start = search_start;
971 max_hole_size = hole_size;
975 if (hole_size < num_bytes)
981 btrfs_free_path(path);
983 *start = max_hole_start;
985 *len = max_hole_size;
989 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
990 struct btrfs_device *device,
994 struct btrfs_path *path;
995 struct btrfs_root *root = device->dev_root;
996 struct btrfs_key key;
997 struct btrfs_key found_key;
998 struct extent_buffer *leaf = NULL;
999 struct btrfs_dev_extent *extent = NULL;
1001 path = btrfs_alloc_path();
1005 key.objectid = device->devid;
1007 key.type = BTRFS_DEV_EXTENT_KEY;
1009 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1011 ret = btrfs_previous_item(root, path, key.objectid,
1012 BTRFS_DEV_EXTENT_KEY);
1015 leaf = path->nodes[0];
1016 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1017 extent = btrfs_item_ptr(leaf, path->slots[0],
1018 struct btrfs_dev_extent);
1019 BUG_ON(found_key.offset > start || found_key.offset +
1020 btrfs_dev_extent_length(leaf, extent) < start);
1022 btrfs_release_path(path);
1024 } else if (ret == 0) {
1025 leaf = path->nodes[0];
1026 extent = btrfs_item_ptr(leaf, path->slots[0],
1027 struct btrfs_dev_extent);
1031 if (device->bytes_used > 0) {
1032 u64 len = btrfs_dev_extent_length(leaf, extent);
1033 device->bytes_used -= len;
1034 spin_lock(&root->fs_info->free_chunk_lock);
1035 root->fs_info->free_chunk_space += len;
1036 spin_unlock(&root->fs_info->free_chunk_lock);
1038 ret = btrfs_del_item(trans, root, path);
1041 btrfs_free_path(path);
1045 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1046 struct btrfs_device *device,
1047 u64 chunk_tree, u64 chunk_objectid,
1048 u64 chunk_offset, u64 start, u64 num_bytes)
1051 struct btrfs_path *path;
1052 struct btrfs_root *root = device->dev_root;
1053 struct btrfs_dev_extent *extent;
1054 struct extent_buffer *leaf;
1055 struct btrfs_key key;
1057 WARN_ON(!device->in_fs_metadata);
1058 path = btrfs_alloc_path();
1062 key.objectid = device->devid;
1064 key.type = BTRFS_DEV_EXTENT_KEY;
1065 ret = btrfs_insert_empty_item(trans, root, path, &key,
1069 leaf = path->nodes[0];
1070 extent = btrfs_item_ptr(leaf, path->slots[0],
1071 struct btrfs_dev_extent);
1072 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1073 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1074 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1076 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1077 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1080 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1081 btrfs_mark_buffer_dirty(leaf);
1082 btrfs_free_path(path);
1086 static noinline int find_next_chunk(struct btrfs_root *root,
1087 u64 objectid, u64 *offset)
1089 struct btrfs_path *path;
1091 struct btrfs_key key;
1092 struct btrfs_chunk *chunk;
1093 struct btrfs_key found_key;
1095 path = btrfs_alloc_path();
1099 key.objectid = objectid;
1100 key.offset = (u64)-1;
1101 key.type = BTRFS_CHUNK_ITEM_KEY;
1103 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1109 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1113 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1115 if (found_key.objectid != objectid)
1118 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1119 struct btrfs_chunk);
1120 *offset = found_key.offset +
1121 btrfs_chunk_length(path->nodes[0], chunk);
1126 btrfs_free_path(path);
1130 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1133 struct btrfs_key key;
1134 struct btrfs_key found_key;
1135 struct btrfs_path *path;
1137 root = root->fs_info->chunk_root;
1139 path = btrfs_alloc_path();
1143 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1144 key.type = BTRFS_DEV_ITEM_KEY;
1145 key.offset = (u64)-1;
1147 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1153 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1154 BTRFS_DEV_ITEM_KEY);
1158 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1160 *objectid = found_key.offset + 1;
1164 btrfs_free_path(path);
1169 * the device information is stored in the chunk root
1170 * the btrfs_device struct should be fully filled in
1172 int btrfs_add_device(struct btrfs_trans_handle *trans,
1173 struct btrfs_root *root,
1174 struct btrfs_device *device)
1177 struct btrfs_path *path;
1178 struct btrfs_dev_item *dev_item;
1179 struct extent_buffer *leaf;
1180 struct btrfs_key key;
1183 root = root->fs_info->chunk_root;
1185 path = btrfs_alloc_path();
1189 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1190 key.type = BTRFS_DEV_ITEM_KEY;
1191 key.offset = device->devid;
1193 ret = btrfs_insert_empty_item(trans, root, path, &key,
1198 leaf = path->nodes[0];
1199 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1201 btrfs_set_device_id(leaf, dev_item, device->devid);
1202 btrfs_set_device_generation(leaf, dev_item, 0);
1203 btrfs_set_device_type(leaf, dev_item, device->type);
1204 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1205 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1206 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1207 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1208 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1209 btrfs_set_device_group(leaf, dev_item, 0);
1210 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1211 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1212 btrfs_set_device_start_offset(leaf, dev_item, 0);
1214 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1215 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1216 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1217 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1218 btrfs_mark_buffer_dirty(leaf);
1222 btrfs_free_path(path);
1226 static int btrfs_rm_dev_item(struct btrfs_root *root,
1227 struct btrfs_device *device)
1230 struct btrfs_path *path;
1231 struct btrfs_key key;
1232 struct btrfs_trans_handle *trans;
1234 root = root->fs_info->chunk_root;
1236 path = btrfs_alloc_path();
1240 trans = btrfs_start_transaction(root, 0);
1241 if (IS_ERR(trans)) {
1242 btrfs_free_path(path);
1243 return PTR_ERR(trans);
1245 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1246 key.type = BTRFS_DEV_ITEM_KEY;
1247 key.offset = device->devid;
1250 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1259 ret = btrfs_del_item(trans, root, path);
1263 btrfs_free_path(path);
1264 unlock_chunks(root);
1265 btrfs_commit_transaction(trans, root);
1269 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1271 struct btrfs_device *device;
1272 struct btrfs_device *next_device;
1273 struct block_device *bdev;
1274 struct buffer_head *bh = NULL;
1275 struct btrfs_super_block *disk_super;
1276 struct btrfs_fs_devices *cur_devices;
1282 bool clear_super = false;
1284 mutex_lock(&uuid_mutex);
1286 all_avail = root->fs_info->avail_data_alloc_bits |
1287 root->fs_info->avail_system_alloc_bits |
1288 root->fs_info->avail_metadata_alloc_bits;
1290 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1291 root->fs_info->fs_devices->num_devices <= 4) {
1292 printk(KERN_ERR "btrfs: unable to go below four devices "
1298 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1299 root->fs_info->fs_devices->num_devices <= 2) {
1300 printk(KERN_ERR "btrfs: unable to go below two "
1301 "devices on raid1\n");
1306 if (strcmp(device_path, "missing") == 0) {
1307 struct list_head *devices;
1308 struct btrfs_device *tmp;
1311 devices = &root->fs_info->fs_devices->devices;
1313 * It is safe to read the devices since the volume_mutex
1316 list_for_each_entry(tmp, devices, dev_list) {
1317 if (tmp->in_fs_metadata && !tmp->bdev) {
1326 printk(KERN_ERR "btrfs: no missing devices found to "
1331 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1332 root->fs_info->bdev_holder);
1334 ret = PTR_ERR(bdev);
1338 set_blocksize(bdev, 4096);
1339 bh = btrfs_read_dev_super(bdev);
1344 disk_super = (struct btrfs_super_block *)bh->b_data;
1345 devid = btrfs_stack_device_id(&disk_super->dev_item);
1346 dev_uuid = disk_super->dev_item.uuid;
1347 device = btrfs_find_device(root, devid, dev_uuid,
1355 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1356 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1362 if (device->writeable) {
1364 list_del_init(&device->dev_alloc_list);
1365 unlock_chunks(root);
1366 root->fs_info->fs_devices->rw_devices--;
1370 ret = btrfs_shrink_device(device, 0);
1374 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1378 spin_lock(&root->fs_info->free_chunk_lock);
1379 root->fs_info->free_chunk_space = device->total_bytes -
1381 spin_unlock(&root->fs_info->free_chunk_lock);
1383 device->in_fs_metadata = 0;
1384 btrfs_scrub_cancel_dev(root, device);
1387 * the device list mutex makes sure that we don't change
1388 * the device list while someone else is writing out all
1389 * the device supers.
1392 cur_devices = device->fs_devices;
1393 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1394 list_del_rcu(&device->dev_list);
1396 device->fs_devices->num_devices--;
1398 if (device->missing)
1399 root->fs_info->fs_devices->missing_devices--;
1401 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1402 struct btrfs_device, dev_list);
1403 if (device->bdev == root->fs_info->sb->s_bdev)
1404 root->fs_info->sb->s_bdev = next_device->bdev;
1405 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1406 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1409 device->fs_devices->open_devices--;
1411 call_rcu(&device->rcu, free_device);
1412 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1414 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1415 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1417 if (cur_devices->open_devices == 0) {
1418 struct btrfs_fs_devices *fs_devices;
1419 fs_devices = root->fs_info->fs_devices;
1420 while (fs_devices) {
1421 if (fs_devices->seed == cur_devices)
1423 fs_devices = fs_devices->seed;
1425 fs_devices->seed = cur_devices->seed;
1426 cur_devices->seed = NULL;
1428 __btrfs_close_devices(cur_devices);
1429 unlock_chunks(root);
1430 free_fs_devices(cur_devices);
1434 * at this point, the device is zero sized. We want to
1435 * remove it from the devices list and zero out the old super
1438 /* make sure this device isn't detected as part of
1441 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1442 set_buffer_dirty(bh);
1443 sync_dirty_buffer(bh);
1452 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1454 mutex_unlock(&uuid_mutex);
1457 if (device->writeable) {
1459 list_add(&device->dev_alloc_list,
1460 &root->fs_info->fs_devices->alloc_list);
1461 unlock_chunks(root);
1462 root->fs_info->fs_devices->rw_devices++;
1468 * does all the dirty work required for changing file system's UUID.
1470 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1471 struct btrfs_root *root)
1473 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1474 struct btrfs_fs_devices *old_devices;
1475 struct btrfs_fs_devices *seed_devices;
1476 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1477 struct btrfs_device *device;
1480 BUG_ON(!mutex_is_locked(&uuid_mutex));
1481 if (!fs_devices->seeding)
1484 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1488 old_devices = clone_fs_devices(fs_devices);
1489 if (IS_ERR(old_devices)) {
1490 kfree(seed_devices);
1491 return PTR_ERR(old_devices);
1494 list_add(&old_devices->list, &fs_uuids);
1496 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1497 seed_devices->opened = 1;
1498 INIT_LIST_HEAD(&seed_devices->devices);
1499 INIT_LIST_HEAD(&seed_devices->alloc_list);
1500 mutex_init(&seed_devices->device_list_mutex);
1502 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1503 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1505 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1507 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1508 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1509 device->fs_devices = seed_devices;
1512 fs_devices->seeding = 0;
1513 fs_devices->num_devices = 0;
1514 fs_devices->open_devices = 0;
1515 fs_devices->seed = seed_devices;
1517 generate_random_uuid(fs_devices->fsid);
1518 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1519 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1520 super_flags = btrfs_super_flags(disk_super) &
1521 ~BTRFS_SUPER_FLAG_SEEDING;
1522 btrfs_set_super_flags(disk_super, super_flags);
1528 * strore the expected generation for seed devices in device items.
1530 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1531 struct btrfs_root *root)
1533 struct btrfs_path *path;
1534 struct extent_buffer *leaf;
1535 struct btrfs_dev_item *dev_item;
1536 struct btrfs_device *device;
1537 struct btrfs_key key;
1538 u8 fs_uuid[BTRFS_UUID_SIZE];
1539 u8 dev_uuid[BTRFS_UUID_SIZE];
1543 path = btrfs_alloc_path();
1547 root = root->fs_info->chunk_root;
1548 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1550 key.type = BTRFS_DEV_ITEM_KEY;
1553 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1557 leaf = path->nodes[0];
1559 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1560 ret = btrfs_next_leaf(root, path);
1565 leaf = path->nodes[0];
1566 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1567 btrfs_release_path(path);
1571 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1572 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1573 key.type != BTRFS_DEV_ITEM_KEY)
1576 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1577 struct btrfs_dev_item);
1578 devid = btrfs_device_id(leaf, dev_item);
1579 read_extent_buffer(leaf, dev_uuid,
1580 (unsigned long)btrfs_device_uuid(dev_item),
1582 read_extent_buffer(leaf, fs_uuid,
1583 (unsigned long)btrfs_device_fsid(dev_item),
1585 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1588 if (device->fs_devices->seeding) {
1589 btrfs_set_device_generation(leaf, dev_item,
1590 device->generation);
1591 btrfs_mark_buffer_dirty(leaf);
1599 btrfs_free_path(path);
1603 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1605 struct request_queue *q;
1606 struct btrfs_trans_handle *trans;
1607 struct btrfs_device *device;
1608 struct block_device *bdev;
1609 struct list_head *devices;
1610 struct super_block *sb = root->fs_info->sb;
1612 int seeding_dev = 0;
1615 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1618 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1619 root->fs_info->bdev_holder);
1621 return PTR_ERR(bdev);
1623 if (root->fs_info->fs_devices->seeding) {
1625 down_write(&sb->s_umount);
1626 mutex_lock(&uuid_mutex);
1629 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1631 devices = &root->fs_info->fs_devices->devices;
1633 * we have the volume lock, so we don't need the extra
1634 * device list mutex while reading the list here.
1636 list_for_each_entry(device, devices, dev_list) {
1637 if (device->bdev == bdev) {
1643 device = kzalloc(sizeof(*device), GFP_NOFS);
1645 /* we can safely leave the fs_devices entry around */
1650 device->name = kstrdup(device_path, GFP_NOFS);
1651 if (!device->name) {
1657 ret = find_next_devid(root, &device->devid);
1659 kfree(device->name);
1664 trans = btrfs_start_transaction(root, 0);
1665 if (IS_ERR(trans)) {
1666 kfree(device->name);
1668 ret = PTR_ERR(trans);
1674 q = bdev_get_queue(bdev);
1675 if (blk_queue_discard(q))
1676 device->can_discard = 1;
1677 device->writeable = 1;
1678 device->work.func = pending_bios_fn;
1679 generate_random_uuid(device->uuid);
1680 spin_lock_init(&device->io_lock);
1681 device->generation = trans->transid;
1682 device->io_width = root->sectorsize;
1683 device->io_align = root->sectorsize;
1684 device->sector_size = root->sectorsize;
1685 device->total_bytes = i_size_read(bdev->bd_inode);
1686 device->disk_total_bytes = device->total_bytes;
1687 device->dev_root = root->fs_info->dev_root;
1688 device->bdev = bdev;
1689 device->in_fs_metadata = 1;
1690 device->mode = FMODE_EXCL;
1691 set_blocksize(device->bdev, 4096);
1694 sb->s_flags &= ~MS_RDONLY;
1695 ret = btrfs_prepare_sprout(trans, root);
1699 device->fs_devices = root->fs_info->fs_devices;
1702 * we don't want write_supers to jump in here with our device
1705 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1706 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1707 list_add(&device->dev_alloc_list,
1708 &root->fs_info->fs_devices->alloc_list);
1709 root->fs_info->fs_devices->num_devices++;
1710 root->fs_info->fs_devices->open_devices++;
1711 root->fs_info->fs_devices->rw_devices++;
1712 if (device->can_discard)
1713 root->fs_info->fs_devices->num_can_discard++;
1714 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1716 spin_lock(&root->fs_info->free_chunk_lock);
1717 root->fs_info->free_chunk_space += device->total_bytes;
1718 spin_unlock(&root->fs_info->free_chunk_lock);
1720 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1721 root->fs_info->fs_devices->rotating = 1;
1723 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1724 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1725 total_bytes + device->total_bytes);
1727 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1728 btrfs_set_super_num_devices(root->fs_info->super_copy,
1730 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1733 ret = init_first_rw_device(trans, root, device);
1735 ret = btrfs_finish_sprout(trans, root);
1738 ret = btrfs_add_device(trans, root, device);
1742 * we've got more storage, clear any full flags on the space
1745 btrfs_clear_space_info_full(root->fs_info);
1747 unlock_chunks(root);
1748 btrfs_commit_transaction(trans, root);
1751 mutex_unlock(&uuid_mutex);
1752 up_write(&sb->s_umount);
1754 ret = btrfs_relocate_sys_chunks(root);
1760 blkdev_put(bdev, FMODE_EXCL);
1762 mutex_unlock(&uuid_mutex);
1763 up_write(&sb->s_umount);
1768 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1769 struct btrfs_device *device)
1772 struct btrfs_path *path;
1773 struct btrfs_root *root;
1774 struct btrfs_dev_item *dev_item;
1775 struct extent_buffer *leaf;
1776 struct btrfs_key key;
1778 root = device->dev_root->fs_info->chunk_root;
1780 path = btrfs_alloc_path();
1784 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1785 key.type = BTRFS_DEV_ITEM_KEY;
1786 key.offset = device->devid;
1788 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1797 leaf = path->nodes[0];
1798 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1800 btrfs_set_device_id(leaf, dev_item, device->devid);
1801 btrfs_set_device_type(leaf, dev_item, device->type);
1802 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1803 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1804 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1805 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1806 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1807 btrfs_mark_buffer_dirty(leaf);
1810 btrfs_free_path(path);
1814 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1815 struct btrfs_device *device, u64 new_size)
1817 struct btrfs_super_block *super_copy =
1818 device->dev_root->fs_info->super_copy;
1819 u64 old_total = btrfs_super_total_bytes(super_copy);
1820 u64 diff = new_size - device->total_bytes;
1822 if (!device->writeable)
1824 if (new_size <= device->total_bytes)
1827 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1828 device->fs_devices->total_rw_bytes += diff;
1830 device->total_bytes = new_size;
1831 device->disk_total_bytes = new_size;
1832 btrfs_clear_space_info_full(device->dev_root->fs_info);
1834 return btrfs_update_device(trans, device);
1837 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1838 struct btrfs_device *device, u64 new_size)
1841 lock_chunks(device->dev_root);
1842 ret = __btrfs_grow_device(trans, device, new_size);
1843 unlock_chunks(device->dev_root);
1847 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1848 struct btrfs_root *root,
1849 u64 chunk_tree, u64 chunk_objectid,
1853 struct btrfs_path *path;
1854 struct btrfs_key key;
1856 root = root->fs_info->chunk_root;
1857 path = btrfs_alloc_path();
1861 key.objectid = chunk_objectid;
1862 key.offset = chunk_offset;
1863 key.type = BTRFS_CHUNK_ITEM_KEY;
1865 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1868 ret = btrfs_del_item(trans, root, path);
1870 btrfs_free_path(path);
1874 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1877 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1878 struct btrfs_disk_key *disk_key;
1879 struct btrfs_chunk *chunk;
1886 struct btrfs_key key;
1888 array_size = btrfs_super_sys_array_size(super_copy);
1890 ptr = super_copy->sys_chunk_array;
1893 while (cur < array_size) {
1894 disk_key = (struct btrfs_disk_key *)ptr;
1895 btrfs_disk_key_to_cpu(&key, disk_key);
1897 len = sizeof(*disk_key);
1899 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1900 chunk = (struct btrfs_chunk *)(ptr + len);
1901 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1902 len += btrfs_chunk_item_size(num_stripes);
1907 if (key.objectid == chunk_objectid &&
1908 key.offset == chunk_offset) {
1909 memmove(ptr, ptr + len, array_size - (cur + len));
1911 btrfs_set_super_sys_array_size(super_copy, array_size);
1920 static int btrfs_relocate_chunk(struct btrfs_root *root,
1921 u64 chunk_tree, u64 chunk_objectid,
1924 struct extent_map_tree *em_tree;
1925 struct btrfs_root *extent_root;
1926 struct btrfs_trans_handle *trans;
1927 struct extent_map *em;
1928 struct map_lookup *map;
1932 root = root->fs_info->chunk_root;
1933 extent_root = root->fs_info->extent_root;
1934 em_tree = &root->fs_info->mapping_tree.map_tree;
1936 ret = btrfs_can_relocate(extent_root, chunk_offset);
1940 /* step one, relocate all the extents inside this chunk */
1941 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1945 trans = btrfs_start_transaction(root, 0);
1946 BUG_ON(IS_ERR(trans));
1951 * step two, delete the device extents and the
1952 * chunk tree entries
1954 read_lock(&em_tree->lock);
1955 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1956 read_unlock(&em_tree->lock);
1958 BUG_ON(em->start > chunk_offset ||
1959 em->start + em->len < chunk_offset);
1960 map = (struct map_lookup *)em->bdev;
1962 for (i = 0; i < map->num_stripes; i++) {
1963 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1964 map->stripes[i].physical);
1967 if (map->stripes[i].dev) {
1968 ret = btrfs_update_device(trans, map->stripes[i].dev);
1972 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1977 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
1979 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1980 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1984 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1987 write_lock(&em_tree->lock);
1988 remove_extent_mapping(em_tree, em);
1989 write_unlock(&em_tree->lock);
1994 /* once for the tree */
1995 free_extent_map(em);
1997 free_extent_map(em);
1999 unlock_chunks(root);
2000 btrfs_end_transaction(trans, root);
2004 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2006 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2007 struct btrfs_path *path;
2008 struct extent_buffer *leaf;
2009 struct btrfs_chunk *chunk;
2010 struct btrfs_key key;
2011 struct btrfs_key found_key;
2012 u64 chunk_tree = chunk_root->root_key.objectid;
2014 bool retried = false;
2018 path = btrfs_alloc_path();
2023 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2024 key.offset = (u64)-1;
2025 key.type = BTRFS_CHUNK_ITEM_KEY;
2028 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2033 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2040 leaf = path->nodes[0];
2041 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2043 chunk = btrfs_item_ptr(leaf, path->slots[0],
2044 struct btrfs_chunk);
2045 chunk_type = btrfs_chunk_type(leaf, chunk);
2046 btrfs_release_path(path);
2048 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2049 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2058 if (found_key.offset == 0)
2060 key.offset = found_key.offset - 1;
2063 if (failed && !retried) {
2067 } else if (failed && retried) {
2072 btrfs_free_path(path);
2077 * Should be called with both balance and volume mutexes held to
2078 * serialize other volume operations (add_dev/rm_dev/resize) with
2079 * restriper. Same goes for unset_balance_control.
2081 static void set_balance_control(struct btrfs_balance_control *bctl)
2083 struct btrfs_fs_info *fs_info = bctl->fs_info;
2085 BUG_ON(fs_info->balance_ctl);
2087 spin_lock(&fs_info->balance_lock);
2088 fs_info->balance_ctl = bctl;
2089 spin_unlock(&fs_info->balance_lock);
2092 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2094 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2096 BUG_ON(!fs_info->balance_ctl);
2098 spin_lock(&fs_info->balance_lock);
2099 fs_info->balance_ctl = NULL;
2100 spin_unlock(&fs_info->balance_lock);
2106 * Balance filters. Return 1 if chunk should be filtered out
2107 * (should not be balanced).
2109 static int chunk_profiles_filter(u64 chunk_profile,
2110 struct btrfs_balance_args *bargs)
2112 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
2114 if (chunk_profile == 0)
2115 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2117 if (bargs->profiles & chunk_profile)
2123 static u64 div_factor_fine(u64 num, int factor)
2135 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2136 struct btrfs_balance_args *bargs)
2138 struct btrfs_block_group_cache *cache;
2139 u64 chunk_used, user_thresh;
2142 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2143 chunk_used = btrfs_block_group_used(&cache->item);
2145 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2146 if (chunk_used < user_thresh)
2149 btrfs_put_block_group(cache);
2153 static int chunk_devid_filter(struct extent_buffer *leaf,
2154 struct btrfs_chunk *chunk,
2155 struct btrfs_balance_args *bargs)
2157 struct btrfs_stripe *stripe;
2158 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2161 for (i = 0; i < num_stripes; i++) {
2162 stripe = btrfs_stripe_nr(chunk, i);
2163 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2170 /* [pstart, pend) */
2171 static int chunk_drange_filter(struct extent_buffer *leaf,
2172 struct btrfs_chunk *chunk,
2174 struct btrfs_balance_args *bargs)
2176 struct btrfs_stripe *stripe;
2177 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2183 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2186 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2187 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2191 factor = num_stripes / factor;
2193 for (i = 0; i < num_stripes; i++) {
2194 stripe = btrfs_stripe_nr(chunk, i);
2195 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2198 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2199 stripe_length = btrfs_chunk_length(leaf, chunk);
2200 do_div(stripe_length, factor);
2202 if (stripe_offset < bargs->pend &&
2203 stripe_offset + stripe_length > bargs->pstart)
2210 /* [vstart, vend) */
2211 static int chunk_vrange_filter(struct extent_buffer *leaf,
2212 struct btrfs_chunk *chunk,
2214 struct btrfs_balance_args *bargs)
2216 if (chunk_offset < bargs->vend &&
2217 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2218 /* at least part of the chunk is inside this vrange */
2224 static int chunk_soft_convert_filter(u64 chunk_profile,
2225 struct btrfs_balance_args *bargs)
2227 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2230 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
2232 if (chunk_profile == 0)
2233 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2235 if (bargs->target & chunk_profile)
2241 static int should_balance_chunk(struct btrfs_root *root,
2242 struct extent_buffer *leaf,
2243 struct btrfs_chunk *chunk, u64 chunk_offset)
2245 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2246 struct btrfs_balance_args *bargs = NULL;
2247 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2250 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2251 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2255 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2256 bargs = &bctl->data;
2257 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2259 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2260 bargs = &bctl->meta;
2262 /* profiles filter */
2263 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2264 chunk_profiles_filter(chunk_type, bargs)) {
2269 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2270 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2275 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2276 chunk_devid_filter(leaf, chunk, bargs)) {
2280 /* drange filter, makes sense only with devid filter */
2281 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2282 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2287 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2288 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2292 /* soft profile changing mode */
2293 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2294 chunk_soft_convert_filter(chunk_type, bargs)) {
2301 static u64 div_factor(u64 num, int factor)
2310 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2312 struct btrfs_root *chunk_root = fs_info->chunk_root;
2313 struct btrfs_root *dev_root = fs_info->dev_root;
2314 struct list_head *devices;
2315 struct btrfs_device *device;
2318 struct btrfs_chunk *chunk;
2319 struct btrfs_path *path;
2320 struct btrfs_key key;
2321 struct btrfs_key found_key;
2322 struct btrfs_trans_handle *trans;
2323 struct extent_buffer *leaf;
2326 int enospc_errors = 0;
2328 /* step one make some room on all the devices */
2329 devices = &fs_info->fs_devices->devices;
2330 list_for_each_entry(device, devices, dev_list) {
2331 old_size = device->total_bytes;
2332 size_to_free = div_factor(old_size, 1);
2333 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2334 if (!device->writeable ||
2335 device->total_bytes - device->bytes_used > size_to_free)
2338 ret = btrfs_shrink_device(device, old_size - size_to_free);
2343 trans = btrfs_start_transaction(dev_root, 0);
2344 BUG_ON(IS_ERR(trans));
2346 ret = btrfs_grow_device(trans, device, old_size);
2349 btrfs_end_transaction(trans, dev_root);
2352 /* step two, relocate all the chunks */
2353 path = btrfs_alloc_path();
2358 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2359 key.offset = (u64)-1;
2360 key.type = BTRFS_CHUNK_ITEM_KEY;
2363 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2368 * this shouldn't happen, it means the last relocate
2372 BUG(); /* FIXME break ? */
2374 ret = btrfs_previous_item(chunk_root, path, 0,
2375 BTRFS_CHUNK_ITEM_KEY);
2381 leaf = path->nodes[0];
2382 slot = path->slots[0];
2383 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2385 if (found_key.objectid != key.objectid)
2388 /* chunk zero is special */
2389 if (found_key.offset == 0)
2392 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2394 ret = should_balance_chunk(chunk_root, leaf, chunk,
2396 btrfs_release_path(path);
2400 ret = btrfs_relocate_chunk(chunk_root,
2401 chunk_root->root_key.objectid,
2404 if (ret && ret != -ENOSPC)
2409 key.offset = found_key.offset - 1;
2413 btrfs_free_path(path);
2414 if (enospc_errors) {
2415 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2424 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2426 unset_balance_control(fs_info);
2429 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
2430 struct btrfs_ioctl_balance_args *bargs);
2433 * Should be called with both balance and volume mutexes held
2435 int btrfs_balance(struct btrfs_balance_control *bctl,
2436 struct btrfs_ioctl_balance_args *bargs)
2438 struct btrfs_fs_info *fs_info = bctl->fs_info;
2442 if (btrfs_fs_closing(fs_info)) {
2448 * In case of mixed groups both data and meta should be picked,
2449 * and identical options should be given for both of them.
2451 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2452 if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2453 (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
2454 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2455 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2456 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2457 printk(KERN_ERR "btrfs: with mixed groups data and "
2458 "metadata balance options must be the same\n");
2465 * Profile changing sanity checks. Skip them if a simple
2466 * balance is requested.
2468 if (!((bctl->data.flags | bctl->sys.flags | bctl->meta.flags) &
2469 BTRFS_BALANCE_ARGS_CONVERT))
2472 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2473 if (fs_info->fs_devices->num_devices == 1)
2474 allowed |= BTRFS_BLOCK_GROUP_DUP;
2475 else if (fs_info->fs_devices->num_devices < 4)
2476 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2478 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2479 BTRFS_BLOCK_GROUP_RAID10);
2481 if (!profile_is_valid(bctl->data.target, 1) ||
2482 bctl->data.target & ~allowed) {
2483 printk(KERN_ERR "btrfs: unable to start balance with target "
2484 "data profile %llu\n",
2485 (unsigned long long)bctl->data.target);
2489 if (!profile_is_valid(bctl->meta.target, 1) ||
2490 bctl->meta.target & ~allowed) {
2491 printk(KERN_ERR "btrfs: unable to start balance with target "
2492 "metadata profile %llu\n",
2493 (unsigned long long)bctl->meta.target);
2497 if (!profile_is_valid(bctl->sys.target, 1) ||
2498 bctl->sys.target & ~allowed) {
2499 printk(KERN_ERR "btrfs: unable to start balance with target "
2500 "system profile %llu\n",
2501 (unsigned long long)bctl->sys.target);
2506 if (bctl->data.target & BTRFS_BLOCK_GROUP_DUP) {
2507 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2512 /* allow to reduce meta or sys integrity only if force set */
2513 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2514 BTRFS_BLOCK_GROUP_RAID10;
2515 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2516 (fs_info->avail_system_alloc_bits & allowed) &&
2517 !(bctl->sys.target & allowed)) ||
2518 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2519 (fs_info->avail_metadata_alloc_bits & allowed) &&
2520 !(bctl->meta.target & allowed))) {
2521 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2522 printk(KERN_INFO "btrfs: force reducing metadata "
2525 printk(KERN_ERR "btrfs: balance will reduce metadata "
2526 "integrity, use force if you want this\n");
2533 set_balance_control(bctl);
2535 mutex_unlock(&fs_info->balance_mutex);
2537 ret = __btrfs_balance(fs_info);
2539 mutex_lock(&fs_info->balance_mutex);
2542 memset(bargs, 0, sizeof(*bargs));
2543 update_ioctl_balance_args(fs_info, bargs);
2546 __cancel_balance(fs_info);
2555 * shrinking a device means finding all of the device extents past
2556 * the new size, and then following the back refs to the chunks.
2557 * The chunk relocation code actually frees the device extent
2559 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2561 struct btrfs_trans_handle *trans;
2562 struct btrfs_root *root = device->dev_root;
2563 struct btrfs_dev_extent *dev_extent = NULL;
2564 struct btrfs_path *path;
2572 bool retried = false;
2573 struct extent_buffer *l;
2574 struct btrfs_key key;
2575 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2576 u64 old_total = btrfs_super_total_bytes(super_copy);
2577 u64 old_size = device->total_bytes;
2578 u64 diff = device->total_bytes - new_size;
2580 if (new_size >= device->total_bytes)
2583 path = btrfs_alloc_path();
2591 device->total_bytes = new_size;
2592 if (device->writeable) {
2593 device->fs_devices->total_rw_bytes -= diff;
2594 spin_lock(&root->fs_info->free_chunk_lock);
2595 root->fs_info->free_chunk_space -= diff;
2596 spin_unlock(&root->fs_info->free_chunk_lock);
2598 unlock_chunks(root);
2601 key.objectid = device->devid;
2602 key.offset = (u64)-1;
2603 key.type = BTRFS_DEV_EXTENT_KEY;
2606 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2610 ret = btrfs_previous_item(root, path, 0, key.type);
2615 btrfs_release_path(path);
2620 slot = path->slots[0];
2621 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2623 if (key.objectid != device->devid) {
2624 btrfs_release_path(path);
2628 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2629 length = btrfs_dev_extent_length(l, dev_extent);
2631 if (key.offset + length <= new_size) {
2632 btrfs_release_path(path);
2636 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2637 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2638 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2639 btrfs_release_path(path);
2641 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2643 if (ret && ret != -ENOSPC)
2650 if (failed && !retried) {
2654 } else if (failed && retried) {
2658 device->total_bytes = old_size;
2659 if (device->writeable)
2660 device->fs_devices->total_rw_bytes += diff;
2661 spin_lock(&root->fs_info->free_chunk_lock);
2662 root->fs_info->free_chunk_space += diff;
2663 spin_unlock(&root->fs_info->free_chunk_lock);
2664 unlock_chunks(root);
2668 /* Shrinking succeeded, else we would be at "done". */
2669 trans = btrfs_start_transaction(root, 0);
2670 if (IS_ERR(trans)) {
2671 ret = PTR_ERR(trans);
2677 device->disk_total_bytes = new_size;
2678 /* Now btrfs_update_device() will change the on-disk size. */
2679 ret = btrfs_update_device(trans, device);
2681 unlock_chunks(root);
2682 btrfs_end_transaction(trans, root);
2685 WARN_ON(diff > old_total);
2686 btrfs_set_super_total_bytes(super_copy, old_total - diff);
2687 unlock_chunks(root);
2688 btrfs_end_transaction(trans, root);
2690 btrfs_free_path(path);
2694 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2695 struct btrfs_root *root,
2696 struct btrfs_key *key,
2697 struct btrfs_chunk *chunk, int item_size)
2699 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2700 struct btrfs_disk_key disk_key;
2704 array_size = btrfs_super_sys_array_size(super_copy);
2705 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
2708 ptr = super_copy->sys_chunk_array + array_size;
2709 btrfs_cpu_key_to_disk(&disk_key, key);
2710 memcpy(ptr, &disk_key, sizeof(disk_key));
2711 ptr += sizeof(disk_key);
2712 memcpy(ptr, chunk, item_size);
2713 item_size += sizeof(disk_key);
2714 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
2719 * sort the devices in descending order by max_avail, total_avail
2721 static int btrfs_cmp_device_info(const void *a, const void *b)
2723 const struct btrfs_device_info *di_a = a;
2724 const struct btrfs_device_info *di_b = b;
2726 if (di_a->max_avail > di_b->max_avail)
2728 if (di_a->max_avail < di_b->max_avail)
2730 if (di_a->total_avail > di_b->total_avail)
2732 if (di_a->total_avail < di_b->total_avail)
2737 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2738 struct btrfs_root *extent_root,
2739 struct map_lookup **map_ret,
2740 u64 *num_bytes_out, u64 *stripe_size_out,
2741 u64 start, u64 type)
2743 struct btrfs_fs_info *info = extent_root->fs_info;
2744 struct btrfs_fs_devices *fs_devices = info->fs_devices;
2745 struct list_head *cur;
2746 struct map_lookup *map = NULL;
2747 struct extent_map_tree *em_tree;
2748 struct extent_map *em;
2749 struct btrfs_device_info *devices_info = NULL;
2751 int num_stripes; /* total number of stripes to allocate */
2752 int sub_stripes; /* sub_stripes info for map */
2753 int dev_stripes; /* stripes per dev */
2754 int devs_max; /* max devs to use */
2755 int devs_min; /* min devs needed */
2756 int devs_increment; /* ndevs has to be a multiple of this */
2757 int ncopies; /* how many copies to data has */
2759 u64 max_stripe_size;
2767 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2768 (type & BTRFS_BLOCK_GROUP_DUP)) {
2770 type &= ~BTRFS_BLOCK_GROUP_DUP;
2773 if (list_empty(&fs_devices->alloc_list))
2780 devs_max = 0; /* 0 == as many as possible */
2784 * define the properties of each RAID type.
2785 * FIXME: move this to a global table and use it in all RAID
2788 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2792 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2794 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2799 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2808 if (type & BTRFS_BLOCK_GROUP_DATA) {
2809 max_stripe_size = 1024 * 1024 * 1024;
2810 max_chunk_size = 10 * max_stripe_size;
2811 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2812 max_stripe_size = 256 * 1024 * 1024;
2813 max_chunk_size = max_stripe_size;
2814 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2815 max_stripe_size = 8 * 1024 * 1024;
2816 max_chunk_size = 2 * max_stripe_size;
2818 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
2823 /* we don't want a chunk larger than 10% of writeable space */
2824 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2827 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
2832 cur = fs_devices->alloc_list.next;
2835 * in the first pass through the devices list, we gather information
2836 * about the available holes on each device.
2839 while (cur != &fs_devices->alloc_list) {
2840 struct btrfs_device *device;
2844 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2848 if (!device->writeable) {
2850 "btrfs: read-only device in alloc_list\n");
2855 if (!device->in_fs_metadata)
2858 if (device->total_bytes > device->bytes_used)
2859 total_avail = device->total_bytes - device->bytes_used;
2863 /* If there is no space on this device, skip it. */
2864 if (total_avail == 0)
2867 ret = find_free_dev_extent(trans, device,
2868 max_stripe_size * dev_stripes,
2869 &dev_offset, &max_avail);
2870 if (ret && ret != -ENOSPC)
2874 max_avail = max_stripe_size * dev_stripes;
2876 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
2879 devices_info[ndevs].dev_offset = dev_offset;
2880 devices_info[ndevs].max_avail = max_avail;
2881 devices_info[ndevs].total_avail = total_avail;
2882 devices_info[ndevs].dev = device;
2887 * now sort the devices by hole size / available space
2889 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
2890 btrfs_cmp_device_info, NULL);
2892 /* round down to number of usable stripes */
2893 ndevs -= ndevs % devs_increment;
2895 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
2900 if (devs_max && ndevs > devs_max)
2903 * the primary goal is to maximize the number of stripes, so use as many
2904 * devices as possible, even if the stripes are not maximum sized.
2906 stripe_size = devices_info[ndevs-1].max_avail;
2907 num_stripes = ndevs * dev_stripes;
2909 if (stripe_size * num_stripes > max_chunk_size * ncopies) {
2910 stripe_size = max_chunk_size * ncopies;
2911 do_div(stripe_size, num_stripes);
2914 do_div(stripe_size, dev_stripes);
2915 do_div(stripe_size, BTRFS_STRIPE_LEN);
2916 stripe_size *= BTRFS_STRIPE_LEN;
2918 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2923 map->num_stripes = num_stripes;
2925 for (i = 0; i < ndevs; ++i) {
2926 for (j = 0; j < dev_stripes; ++j) {
2927 int s = i * dev_stripes + j;
2928 map->stripes[s].dev = devices_info[i].dev;
2929 map->stripes[s].physical = devices_info[i].dev_offset +
2933 map->sector_size = extent_root->sectorsize;
2934 map->stripe_len = BTRFS_STRIPE_LEN;
2935 map->io_align = BTRFS_STRIPE_LEN;
2936 map->io_width = BTRFS_STRIPE_LEN;
2938 map->sub_stripes = sub_stripes;
2941 num_bytes = stripe_size * (num_stripes / ncopies);
2943 *stripe_size_out = stripe_size;
2944 *num_bytes_out = num_bytes;
2946 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
2948 em = alloc_extent_map();
2953 em->bdev = (struct block_device *)map;
2955 em->len = num_bytes;
2956 em->block_start = 0;
2957 em->block_len = em->len;
2959 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2960 write_lock(&em_tree->lock);
2961 ret = add_extent_mapping(em_tree, em);
2962 write_unlock(&em_tree->lock);
2964 free_extent_map(em);
2966 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2967 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2971 for (i = 0; i < map->num_stripes; ++i) {
2972 struct btrfs_device *device;
2975 device = map->stripes[i].dev;
2976 dev_offset = map->stripes[i].physical;
2978 ret = btrfs_alloc_dev_extent(trans, device,
2979 info->chunk_root->root_key.objectid,
2980 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2981 start, dev_offset, stripe_size);
2985 kfree(devices_info);
2990 kfree(devices_info);
2994 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2995 struct btrfs_root *extent_root,
2996 struct map_lookup *map, u64 chunk_offset,
2997 u64 chunk_size, u64 stripe_size)
3000 struct btrfs_key key;
3001 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3002 struct btrfs_device *device;
3003 struct btrfs_chunk *chunk;
3004 struct btrfs_stripe *stripe;
3005 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3009 chunk = kzalloc(item_size, GFP_NOFS);
3014 while (index < map->num_stripes) {
3015 device = map->stripes[index].dev;
3016 device->bytes_used += stripe_size;
3017 ret = btrfs_update_device(trans, device);
3022 spin_lock(&extent_root->fs_info->free_chunk_lock);
3023 extent_root->fs_info->free_chunk_space -= (stripe_size *
3025 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3028 stripe = &chunk->stripe;
3029 while (index < map->num_stripes) {
3030 device = map->stripes[index].dev;
3031 dev_offset = map->stripes[index].physical;
3033 btrfs_set_stack_stripe_devid(stripe, device->devid);
3034 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3035 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3040 btrfs_set_stack_chunk_length(chunk, chunk_size);
3041 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3042 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3043 btrfs_set_stack_chunk_type(chunk, map->type);
3044 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3045 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3046 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3047 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3048 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3050 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3051 key.type = BTRFS_CHUNK_ITEM_KEY;
3052 key.offset = chunk_offset;
3054 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3057 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3058 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
3068 * Chunk allocation falls into two parts. The first part does works
3069 * that make the new allocated chunk useable, but not do any operation
3070 * that modifies the chunk tree. The second part does the works that
3071 * require modifying the chunk tree. This division is important for the
3072 * bootstrap process of adding storage to a seed btrfs.
3074 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3075 struct btrfs_root *extent_root, u64 type)
3080 struct map_lookup *map;
3081 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3084 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3089 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3090 &stripe_size, chunk_offset, type);
3094 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3095 chunk_size, stripe_size);
3100 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3101 struct btrfs_root *root,
3102 struct btrfs_device *device)
3105 u64 sys_chunk_offset;
3109 u64 sys_stripe_size;
3111 struct map_lookup *map;
3112 struct map_lookup *sys_map;
3113 struct btrfs_fs_info *fs_info = root->fs_info;
3114 struct btrfs_root *extent_root = fs_info->extent_root;
3117 ret = find_next_chunk(fs_info->chunk_root,
3118 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3122 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3123 fs_info->avail_metadata_alloc_bits;
3124 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3126 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3127 &stripe_size, chunk_offset, alloc_profile);
3130 sys_chunk_offset = chunk_offset + chunk_size;
3132 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3133 fs_info->avail_system_alloc_bits;
3134 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3136 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3137 &sys_chunk_size, &sys_stripe_size,
3138 sys_chunk_offset, alloc_profile);
3141 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3145 * Modifying chunk tree needs allocating new blocks from both
3146 * system block group and metadata block group. So we only can
3147 * do operations require modifying the chunk tree after both
3148 * block groups were created.
3150 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3151 chunk_size, stripe_size);
3154 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3155 sys_chunk_offset, sys_chunk_size,
3161 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3163 struct extent_map *em;
3164 struct map_lookup *map;
3165 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3169 read_lock(&map_tree->map_tree.lock);
3170 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3171 read_unlock(&map_tree->map_tree.lock);
3175 if (btrfs_test_opt(root, DEGRADED)) {
3176 free_extent_map(em);
3180 map = (struct map_lookup *)em->bdev;
3181 for (i = 0; i < map->num_stripes; i++) {
3182 if (!map->stripes[i].dev->writeable) {
3187 free_extent_map(em);
3191 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3193 extent_map_tree_init(&tree->map_tree);
3196 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3198 struct extent_map *em;
3201 write_lock(&tree->map_tree.lock);
3202 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3204 remove_extent_mapping(&tree->map_tree, em);
3205 write_unlock(&tree->map_tree.lock);
3210 free_extent_map(em);
3211 /* once for the tree */
3212 free_extent_map(em);
3216 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3218 struct extent_map *em;
3219 struct map_lookup *map;
3220 struct extent_map_tree *em_tree = &map_tree->map_tree;
3223 read_lock(&em_tree->lock);
3224 em = lookup_extent_mapping(em_tree, logical, len);
3225 read_unlock(&em_tree->lock);
3228 BUG_ON(em->start > logical || em->start + em->len < logical);
3229 map = (struct map_lookup *)em->bdev;
3230 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3231 ret = map->num_stripes;
3232 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3233 ret = map->sub_stripes;
3236 free_extent_map(em);
3240 static int find_live_mirror(struct map_lookup *map, int first, int num,
3244 if (map->stripes[optimal].dev->bdev)
3246 for (i = first; i < first + num; i++) {
3247 if (map->stripes[i].dev->bdev)
3250 /* we couldn't find one that doesn't fail. Just return something
3251 * and the io error handling code will clean up eventually
3256 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3257 u64 logical, u64 *length,
3258 struct btrfs_bio **bbio_ret,
3261 struct extent_map *em;
3262 struct map_lookup *map;
3263 struct extent_map_tree *em_tree = &map_tree->map_tree;
3266 u64 stripe_end_offset;
3270 int stripes_allocated = 8;
3271 int stripes_required = 1;
3276 struct btrfs_bio *bbio = NULL;
3278 if (bbio_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
3279 stripes_allocated = 1;
3282 bbio = kzalloc(btrfs_bio_size(stripes_allocated),
3287 atomic_set(&bbio->error, 0);
3290 read_lock(&em_tree->lock);
3291 em = lookup_extent_mapping(em_tree, logical, *length);
3292 read_unlock(&em_tree->lock);
3295 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3296 (unsigned long long)logical,
3297 (unsigned long long)*length);
3301 BUG_ON(em->start > logical || em->start + em->len < logical);
3302 map = (struct map_lookup *)em->bdev;
3303 offset = logical - em->start;
3305 if (mirror_num > map->num_stripes)
3308 /* if our btrfs_bio struct is too small, back off and try again */
3309 if (rw & REQ_WRITE) {
3310 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3311 BTRFS_BLOCK_GROUP_DUP)) {
3312 stripes_required = map->num_stripes;
3314 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3315 stripes_required = map->sub_stripes;
3319 if (rw & REQ_DISCARD) {
3320 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3321 stripes_required = map->num_stripes;
3323 if (bbio_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
3324 stripes_allocated < stripes_required) {
3325 stripes_allocated = map->num_stripes;
3326 free_extent_map(em);
3332 * stripe_nr counts the total number of stripes we have to stride
3333 * to get to this block
3335 do_div(stripe_nr, map->stripe_len);
3337 stripe_offset = stripe_nr * map->stripe_len;
3338 BUG_ON(offset < stripe_offset);
3340 /* stripe_offset is the offset of this block in its stripe*/
3341 stripe_offset = offset - stripe_offset;
3343 if (rw & REQ_DISCARD)
3344 *length = min_t(u64, em->len - offset, *length);
3345 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3346 /* we limit the length of each bio to what fits in a stripe */
3347 *length = min_t(u64, em->len - offset,
3348 map->stripe_len - stripe_offset);
3350 *length = em->len - offset;
3358 stripe_nr_orig = stripe_nr;
3359 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3360 (~(map->stripe_len - 1));
3361 do_div(stripe_nr_end, map->stripe_len);
3362 stripe_end_offset = stripe_nr_end * map->stripe_len -
3364 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3365 if (rw & REQ_DISCARD)
3366 num_stripes = min_t(u64, map->num_stripes,
3367 stripe_nr_end - stripe_nr_orig);
3368 stripe_index = do_div(stripe_nr, map->num_stripes);
3369 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3370 if (rw & (REQ_WRITE | REQ_DISCARD))
3371 num_stripes = map->num_stripes;
3372 else if (mirror_num)
3373 stripe_index = mirror_num - 1;
3375 stripe_index = find_live_mirror(map, 0,
3377 current->pid % map->num_stripes);
3378 mirror_num = stripe_index + 1;
3381 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3382 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3383 num_stripes = map->num_stripes;
3384 } else if (mirror_num) {
3385 stripe_index = mirror_num - 1;
3390 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3391 int factor = map->num_stripes / map->sub_stripes;
3393 stripe_index = do_div(stripe_nr, factor);
3394 stripe_index *= map->sub_stripes;
3397 num_stripes = map->sub_stripes;
3398 else if (rw & REQ_DISCARD)
3399 num_stripes = min_t(u64, map->sub_stripes *
3400 (stripe_nr_end - stripe_nr_orig),
3402 else if (mirror_num)
3403 stripe_index += mirror_num - 1;
3405 stripe_index = find_live_mirror(map, stripe_index,
3406 map->sub_stripes, stripe_index +
3407 current->pid % map->sub_stripes);
3408 mirror_num = stripe_index + 1;
3412 * after this do_div call, stripe_nr is the number of stripes
3413 * on this device we have to walk to find the data, and
3414 * stripe_index is the number of our device in the stripe array
3416 stripe_index = do_div(stripe_nr, map->num_stripes);
3417 mirror_num = stripe_index + 1;
3419 BUG_ON(stripe_index >= map->num_stripes);
3421 if (rw & REQ_DISCARD) {
3422 for (i = 0; i < num_stripes; i++) {
3423 bbio->stripes[i].physical =
3424 map->stripes[stripe_index].physical +
3425 stripe_offset + stripe_nr * map->stripe_len;
3426 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3428 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3430 u32 last_stripe = 0;
3433 div_u64_rem(stripe_nr_end - 1,
3437 for (j = 0; j < map->num_stripes; j++) {
3440 div_u64_rem(stripe_nr_end - 1 - j,
3441 map->num_stripes, &test);
3442 if (test == stripe_index)
3445 stripes = stripe_nr_end - 1 - j;
3446 do_div(stripes, map->num_stripes);
3447 bbio->stripes[i].length = map->stripe_len *
3448 (stripes - stripe_nr + 1);
3451 bbio->stripes[i].length -=
3455 if (stripe_index == last_stripe)
3456 bbio->stripes[i].length -=
3458 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3461 int factor = map->num_stripes /
3463 u32 last_stripe = 0;
3465 div_u64_rem(stripe_nr_end - 1,
3466 factor, &last_stripe);
3467 last_stripe *= map->sub_stripes;
3469 for (j = 0; j < factor; j++) {
3472 div_u64_rem(stripe_nr_end - 1 - j,
3476 stripe_index / map->sub_stripes)
3479 stripes = stripe_nr_end - 1 - j;
3480 do_div(stripes, factor);
3481 bbio->stripes[i].length = map->stripe_len *
3482 (stripes - stripe_nr + 1);
3484 if (i < map->sub_stripes) {
3485 bbio->stripes[i].length -=
3487 if (i == map->sub_stripes - 1)
3490 if (stripe_index >= last_stripe &&
3491 stripe_index <= (last_stripe +
3492 map->sub_stripes - 1)) {
3493 bbio->stripes[i].length -=
3497 bbio->stripes[i].length = *length;
3500 if (stripe_index == map->num_stripes) {
3501 /* This could only happen for RAID0/10 */
3507 for (i = 0; i < num_stripes; i++) {
3508 bbio->stripes[i].physical =
3509 map->stripes[stripe_index].physical +
3511 stripe_nr * map->stripe_len;
3512 bbio->stripes[i].dev =
3513 map->stripes[stripe_index].dev;
3519 bbio->num_stripes = num_stripes;
3520 bbio->max_errors = max_errors;
3521 bbio->mirror_num = mirror_num;
3524 free_extent_map(em);
3528 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3529 u64 logical, u64 *length,
3530 struct btrfs_bio **bbio_ret, int mirror_num)
3532 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3536 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3537 u64 chunk_start, u64 physical, u64 devid,
3538 u64 **logical, int *naddrs, int *stripe_len)
3540 struct extent_map_tree *em_tree = &map_tree->map_tree;
3541 struct extent_map *em;
3542 struct map_lookup *map;
3549 read_lock(&em_tree->lock);
3550 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3551 read_unlock(&em_tree->lock);
3553 BUG_ON(!em || em->start != chunk_start);
3554 map = (struct map_lookup *)em->bdev;
3557 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3558 do_div(length, map->num_stripes / map->sub_stripes);
3559 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3560 do_div(length, map->num_stripes);
3562 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3565 for (i = 0; i < map->num_stripes; i++) {
3566 if (devid && map->stripes[i].dev->devid != devid)
3568 if (map->stripes[i].physical > physical ||
3569 map->stripes[i].physical + length <= physical)
3572 stripe_nr = physical - map->stripes[i].physical;
3573 do_div(stripe_nr, map->stripe_len);
3575 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3576 stripe_nr = stripe_nr * map->num_stripes + i;
3577 do_div(stripe_nr, map->sub_stripes);
3578 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3579 stripe_nr = stripe_nr * map->num_stripes + i;
3581 bytenr = chunk_start + stripe_nr * map->stripe_len;
3582 WARN_ON(nr >= map->num_stripes);
3583 for (j = 0; j < nr; j++) {
3584 if (buf[j] == bytenr)
3588 WARN_ON(nr >= map->num_stripes);
3595 *stripe_len = map->stripe_len;
3597 free_extent_map(em);
3601 static void btrfs_end_bio(struct bio *bio, int err)
3603 struct btrfs_bio *bbio = bio->bi_private;
3604 int is_orig_bio = 0;
3607 atomic_inc(&bbio->error);
3609 if (bio == bbio->orig_bio)
3612 if (atomic_dec_and_test(&bbio->stripes_pending)) {
3615 bio = bbio->orig_bio;
3617 bio->bi_private = bbio->private;
3618 bio->bi_end_io = bbio->end_io;
3619 bio->bi_bdev = (struct block_device *)
3620 (unsigned long)bbio->mirror_num;
3621 /* only send an error to the higher layers if it is
3622 * beyond the tolerance of the multi-bio
3624 if (atomic_read(&bbio->error) > bbio->max_errors) {
3628 * this bio is actually up to date, we didn't
3629 * go over the max number of errors
3631 set_bit(BIO_UPTODATE, &bio->bi_flags);
3636 bio_endio(bio, err);
3637 } else if (!is_orig_bio) {
3642 struct async_sched {
3645 struct btrfs_fs_info *info;
3646 struct btrfs_work work;
3650 * see run_scheduled_bios for a description of why bios are collected for
3653 * This will add one bio to the pending list for a device and make sure
3654 * the work struct is scheduled.
3656 static noinline int schedule_bio(struct btrfs_root *root,
3657 struct btrfs_device *device,
3658 int rw, struct bio *bio)
3660 int should_queue = 1;
3661 struct btrfs_pending_bios *pending_bios;
3663 /* don't bother with additional async steps for reads, right now */
3664 if (!(rw & REQ_WRITE)) {
3666 submit_bio(rw, bio);
3672 * nr_async_bios allows us to reliably return congestion to the
3673 * higher layers. Otherwise, the async bio makes it appear we have
3674 * made progress against dirty pages when we've really just put it
3675 * on a queue for later
3677 atomic_inc(&root->fs_info->nr_async_bios);
3678 WARN_ON(bio->bi_next);
3679 bio->bi_next = NULL;
3682 spin_lock(&device->io_lock);
3683 if (bio->bi_rw & REQ_SYNC)
3684 pending_bios = &device->pending_sync_bios;
3686 pending_bios = &device->pending_bios;
3688 if (pending_bios->tail)
3689 pending_bios->tail->bi_next = bio;
3691 pending_bios->tail = bio;
3692 if (!pending_bios->head)
3693 pending_bios->head = bio;
3694 if (device->running_pending)
3697 spin_unlock(&device->io_lock);
3700 btrfs_queue_worker(&root->fs_info->submit_workers,
3705 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
3706 int mirror_num, int async_submit)
3708 struct btrfs_mapping_tree *map_tree;
3709 struct btrfs_device *dev;
3710 struct bio *first_bio = bio;
3711 u64 logical = (u64)bio->bi_sector << 9;
3717 struct btrfs_bio *bbio = NULL;
3719 length = bio->bi_size;
3720 map_tree = &root->fs_info->mapping_tree;
3721 map_length = length;
3723 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
3727 total_devs = bbio->num_stripes;
3728 if (map_length < length) {
3729 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
3730 "len %llu\n", (unsigned long long)logical,
3731 (unsigned long long)length,
3732 (unsigned long long)map_length);
3736 bbio->orig_bio = first_bio;
3737 bbio->private = first_bio->bi_private;
3738 bbio->end_io = first_bio->bi_end_io;
3739 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
3741 while (dev_nr < total_devs) {
3742 if (dev_nr < total_devs - 1) {
3743 bio = bio_clone(first_bio, GFP_NOFS);
3748 bio->bi_private = bbio;
3749 bio->bi_end_io = btrfs_end_bio;
3750 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
3751 dev = bbio->stripes[dev_nr].dev;
3752 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
3753 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
3754 "(%s id %llu), size=%u\n", rw,
3755 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
3756 dev->name, dev->devid, bio->bi_size);
3757 bio->bi_bdev = dev->bdev;
3759 schedule_bio(root, dev, rw, bio);
3761 submit_bio(rw, bio);
3763 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
3764 bio->bi_sector = logical >> 9;
3765 bio_endio(bio, -EIO);
3772 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
3775 struct btrfs_device *device;
3776 struct btrfs_fs_devices *cur_devices;
3778 cur_devices = root->fs_info->fs_devices;
3779 while (cur_devices) {
3781 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3782 device = __find_device(&cur_devices->devices,
3787 cur_devices = cur_devices->seed;
3792 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3793 u64 devid, u8 *dev_uuid)
3795 struct btrfs_device *device;
3796 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
3798 device = kzalloc(sizeof(*device), GFP_NOFS);
3801 list_add(&device->dev_list,
3802 &fs_devices->devices);
3803 device->dev_root = root->fs_info->dev_root;
3804 device->devid = devid;
3805 device->work.func = pending_bios_fn;
3806 device->fs_devices = fs_devices;
3807 device->missing = 1;
3808 fs_devices->num_devices++;
3809 fs_devices->missing_devices++;
3810 spin_lock_init(&device->io_lock);
3811 INIT_LIST_HEAD(&device->dev_alloc_list);
3812 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
3816 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3817 struct extent_buffer *leaf,
3818 struct btrfs_chunk *chunk)
3820 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3821 struct map_lookup *map;
3822 struct extent_map *em;
3826 u8 uuid[BTRFS_UUID_SIZE];
3831 logical = key->offset;
3832 length = btrfs_chunk_length(leaf, chunk);
3834 read_lock(&map_tree->map_tree.lock);
3835 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3836 read_unlock(&map_tree->map_tree.lock);
3838 /* already mapped? */
3839 if (em && em->start <= logical && em->start + em->len > logical) {
3840 free_extent_map(em);
3843 free_extent_map(em);
3846 em = alloc_extent_map();
3849 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3850 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3852 free_extent_map(em);
3856 em->bdev = (struct block_device *)map;
3857 em->start = logical;
3859 em->block_start = 0;
3860 em->block_len = em->len;
3862 map->num_stripes = num_stripes;
3863 map->io_width = btrfs_chunk_io_width(leaf, chunk);
3864 map->io_align = btrfs_chunk_io_align(leaf, chunk);
3865 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
3866 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3867 map->type = btrfs_chunk_type(leaf, chunk);
3868 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
3869 for (i = 0; i < num_stripes; i++) {
3870 map->stripes[i].physical =
3871 btrfs_stripe_offset_nr(leaf, chunk, i);
3872 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
3873 read_extent_buffer(leaf, uuid, (unsigned long)
3874 btrfs_stripe_dev_uuid_nr(chunk, i),
3876 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
3878 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
3880 free_extent_map(em);
3883 if (!map->stripes[i].dev) {
3884 map->stripes[i].dev =
3885 add_missing_dev(root, devid, uuid);
3886 if (!map->stripes[i].dev) {
3888 free_extent_map(em);
3892 map->stripes[i].dev->in_fs_metadata = 1;
3895 write_lock(&map_tree->map_tree.lock);
3896 ret = add_extent_mapping(&map_tree->map_tree, em);
3897 write_unlock(&map_tree->map_tree.lock);
3899 free_extent_map(em);
3904 static int fill_device_from_item(struct extent_buffer *leaf,
3905 struct btrfs_dev_item *dev_item,
3906 struct btrfs_device *device)
3910 device->devid = btrfs_device_id(leaf, dev_item);
3911 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
3912 device->total_bytes = device->disk_total_bytes;
3913 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
3914 device->type = btrfs_device_type(leaf, dev_item);
3915 device->io_align = btrfs_device_io_align(leaf, dev_item);
3916 device->io_width = btrfs_device_io_width(leaf, dev_item);
3917 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3919 ptr = (unsigned long)btrfs_device_uuid(dev_item);
3920 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3925 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3927 struct btrfs_fs_devices *fs_devices;
3930 mutex_lock(&uuid_mutex);
3932 fs_devices = root->fs_info->fs_devices->seed;
3933 while (fs_devices) {
3934 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3938 fs_devices = fs_devices->seed;
3941 fs_devices = find_fsid(fsid);
3947 fs_devices = clone_fs_devices(fs_devices);
3948 if (IS_ERR(fs_devices)) {
3949 ret = PTR_ERR(fs_devices);
3953 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3954 root->fs_info->bdev_holder);
3958 if (!fs_devices->seeding) {
3959 __btrfs_close_devices(fs_devices);
3960 free_fs_devices(fs_devices);
3965 fs_devices->seed = root->fs_info->fs_devices->seed;
3966 root->fs_info->fs_devices->seed = fs_devices;
3968 mutex_unlock(&uuid_mutex);
3972 static int read_one_dev(struct btrfs_root *root,
3973 struct extent_buffer *leaf,
3974 struct btrfs_dev_item *dev_item)
3976 struct btrfs_device *device;
3979 u8 fs_uuid[BTRFS_UUID_SIZE];
3980 u8 dev_uuid[BTRFS_UUID_SIZE];
3982 devid = btrfs_device_id(leaf, dev_item);
3983 read_extent_buffer(leaf, dev_uuid,
3984 (unsigned long)btrfs_device_uuid(dev_item),
3986 read_extent_buffer(leaf, fs_uuid,
3987 (unsigned long)btrfs_device_fsid(dev_item),
3990 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3991 ret = open_seed_devices(root, fs_uuid);
3992 if (ret && !btrfs_test_opt(root, DEGRADED))
3996 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3997 if (!device || !device->bdev) {
3998 if (!btrfs_test_opt(root, DEGRADED))
4002 printk(KERN_WARNING "warning devid %llu missing\n",
4003 (unsigned long long)devid);
4004 device = add_missing_dev(root, devid, dev_uuid);
4007 } else if (!device->missing) {
4009 * this happens when a device that was properly setup
4010 * in the device info lists suddenly goes bad.
4011 * device->bdev is NULL, and so we have to set
4012 * device->missing to one here
4014 root->fs_info->fs_devices->missing_devices++;
4015 device->missing = 1;
4019 if (device->fs_devices != root->fs_info->fs_devices) {
4020 BUG_ON(device->writeable);
4021 if (device->generation !=
4022 btrfs_device_generation(leaf, dev_item))
4026 fill_device_from_item(leaf, dev_item, device);
4027 device->dev_root = root->fs_info->dev_root;
4028 device->in_fs_metadata = 1;
4029 if (device->writeable) {
4030 device->fs_devices->total_rw_bytes += device->total_bytes;
4031 spin_lock(&root->fs_info->free_chunk_lock);
4032 root->fs_info->free_chunk_space += device->total_bytes -
4034 spin_unlock(&root->fs_info->free_chunk_lock);
4040 int btrfs_read_sys_array(struct btrfs_root *root)
4042 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4043 struct extent_buffer *sb;
4044 struct btrfs_disk_key *disk_key;
4045 struct btrfs_chunk *chunk;
4047 unsigned long sb_ptr;
4053 struct btrfs_key key;
4055 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4056 BTRFS_SUPER_INFO_SIZE);
4059 btrfs_set_buffer_uptodate(sb);
4060 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4062 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4063 array_size = btrfs_super_sys_array_size(super_copy);
4065 ptr = super_copy->sys_chunk_array;
4066 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4069 while (cur < array_size) {
4070 disk_key = (struct btrfs_disk_key *)ptr;
4071 btrfs_disk_key_to_cpu(&key, disk_key);
4073 len = sizeof(*disk_key); ptr += len;
4077 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4078 chunk = (struct btrfs_chunk *)sb_ptr;
4079 ret = read_one_chunk(root, &key, sb, chunk);
4082 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4083 len = btrfs_chunk_item_size(num_stripes);
4092 free_extent_buffer(sb);
4096 int btrfs_read_chunk_tree(struct btrfs_root *root)
4098 struct btrfs_path *path;
4099 struct extent_buffer *leaf;
4100 struct btrfs_key key;
4101 struct btrfs_key found_key;
4105 root = root->fs_info->chunk_root;
4107 path = btrfs_alloc_path();
4111 /* first we search for all of the device items, and then we
4112 * read in all of the chunk items. This way we can create chunk
4113 * mappings that reference all of the devices that are afound
4115 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4119 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4123 leaf = path->nodes[0];
4124 slot = path->slots[0];
4125 if (slot >= btrfs_header_nritems(leaf)) {
4126 ret = btrfs_next_leaf(root, path);
4133 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4134 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4135 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4137 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4138 struct btrfs_dev_item *dev_item;
4139 dev_item = btrfs_item_ptr(leaf, slot,
4140 struct btrfs_dev_item);
4141 ret = read_one_dev(root, leaf, dev_item);
4145 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4146 struct btrfs_chunk *chunk;
4147 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4148 ret = read_one_chunk(root, &found_key, leaf, chunk);
4154 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4156 btrfs_release_path(path);
4161 btrfs_free_path(path);