2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/kthread.h>
27 #include <asm/div64.h>
30 #include "extent_map.h"
32 #include "transaction.h"
33 #include "print-tree.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
38 static int init_first_rw_device(struct btrfs_trans_handle *trans,
39 struct btrfs_root *root,
40 struct btrfs_device *device);
41 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
43 static DEFINE_MUTEX(uuid_mutex);
44 static LIST_HEAD(fs_uuids);
46 static void lock_chunks(struct btrfs_root *root)
48 mutex_lock(&root->fs_info->chunk_mutex);
51 static void unlock_chunks(struct btrfs_root *root)
53 mutex_unlock(&root->fs_info->chunk_mutex);
56 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
58 struct btrfs_device *device;
59 WARN_ON(fs_devices->opened);
60 while (!list_empty(&fs_devices->devices)) {
61 device = list_entry(fs_devices->devices.next,
62 struct btrfs_device, dev_list);
63 list_del(&device->dev_list);
70 int btrfs_cleanup_fs_uuids(void)
72 struct btrfs_fs_devices *fs_devices;
74 while (!list_empty(&fs_uuids)) {
75 fs_devices = list_entry(fs_uuids.next,
76 struct btrfs_fs_devices, list);
77 list_del(&fs_devices->list);
78 free_fs_devices(fs_devices);
83 static noinline struct btrfs_device *__find_device(struct list_head *head,
86 struct btrfs_device *dev;
88 list_for_each_entry(dev, head, dev_list) {
89 if (dev->devid == devid &&
90 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
97 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
99 struct btrfs_fs_devices *fs_devices;
101 list_for_each_entry(fs_devices, &fs_uuids, list) {
102 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
108 static void requeue_list(struct btrfs_pending_bios *pending_bios,
109 struct bio *head, struct bio *tail)
112 struct bio *old_head;
114 old_head = pending_bios->head;
115 pending_bios->head = head;
116 if (pending_bios->tail)
117 tail->bi_next = old_head;
119 pending_bios->tail = tail;
123 * we try to collect pending bios for a device so we don't get a large
124 * number of procs sending bios down to the same device. This greatly
125 * improves the schedulers ability to collect and merge the bios.
127 * But, it also turns into a long list of bios to process and that is sure
128 * to eventually make the worker thread block. The solution here is to
129 * make some progress and then put this work struct back at the end of
130 * the list if the block device is congested. This way, multiple devices
131 * can make progress from a single worker thread.
133 static noinline int run_scheduled_bios(struct btrfs_device *device)
136 struct backing_dev_info *bdi;
137 struct btrfs_fs_info *fs_info;
138 struct btrfs_pending_bios *pending_bios;
142 unsigned long num_run;
143 unsigned long batch_run = 0;
145 unsigned long last_waited = 0;
147 int sync_pending = 0;
148 struct blk_plug plug;
151 * this function runs all the bios we've collected for
152 * a particular device. We don't want to wander off to
153 * another device without first sending all of these down.
154 * So, setup a plug here and finish it off before we return
156 blk_start_plug(&plug);
158 bdi = blk_get_backing_dev_info(device->bdev);
159 fs_info = device->dev_root->fs_info;
160 limit = btrfs_async_submit_limit(fs_info);
161 limit = limit * 2 / 3;
164 spin_lock(&device->io_lock);
169 /* take all the bios off the list at once and process them
170 * later on (without the lock held). But, remember the
171 * tail and other pointers so the bios can be properly reinserted
172 * into the list if we hit congestion
174 if (!force_reg && device->pending_sync_bios.head) {
175 pending_bios = &device->pending_sync_bios;
178 pending_bios = &device->pending_bios;
182 pending = pending_bios->head;
183 tail = pending_bios->tail;
184 WARN_ON(pending && !tail);
187 * if pending was null this time around, no bios need processing
188 * at all and we can stop. Otherwise it'll loop back up again
189 * and do an additional check so no bios are missed.
191 * device->running_pending is used to synchronize with the
194 if (device->pending_sync_bios.head == NULL &&
195 device->pending_bios.head == NULL) {
197 device->running_pending = 0;
200 device->running_pending = 1;
203 pending_bios->head = NULL;
204 pending_bios->tail = NULL;
206 spin_unlock(&device->io_lock);
211 /* we want to work on both lists, but do more bios on the
212 * sync list than the regular list
215 pending_bios != &device->pending_sync_bios &&
216 device->pending_sync_bios.head) ||
217 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
218 device->pending_bios.head)) {
219 spin_lock(&device->io_lock);
220 requeue_list(pending_bios, pending, tail);
225 pending = pending->bi_next;
227 atomic_dec(&fs_info->nr_async_bios);
229 if (atomic_read(&fs_info->nr_async_bios) < limit &&
230 waitqueue_active(&fs_info->async_submit_wait))
231 wake_up(&fs_info->async_submit_wait);
233 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
236 * if we're doing the sync list, record that our
237 * plug has some sync requests on it
239 * If we're doing the regular list and there are
240 * sync requests sitting around, unplug before
243 if (pending_bios == &device->pending_sync_bios) {
245 } else if (sync_pending) {
246 blk_finish_plug(&plug);
247 blk_start_plug(&plug);
251 btrfsic_submit_bio(cur->bi_rw, cur);
258 * we made progress, there is more work to do and the bdi
259 * is now congested. Back off and let other work structs
262 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
263 fs_info->fs_devices->open_devices > 1) {
264 struct io_context *ioc;
266 ioc = current->io_context;
269 * the main goal here is that we don't want to
270 * block if we're going to be able to submit
271 * more requests without blocking.
273 * This code does two great things, it pokes into
274 * the elevator code from a filesystem _and_
275 * it makes assumptions about how batching works.
277 if (ioc && ioc->nr_batch_requests > 0 &&
278 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
280 ioc->last_waited == last_waited)) {
282 * we want to go through our batch of
283 * requests and stop. So, we copy out
284 * the ioc->last_waited time and test
285 * against it before looping
287 last_waited = ioc->last_waited;
292 spin_lock(&device->io_lock);
293 requeue_list(pending_bios, pending, tail);
294 device->running_pending = 1;
296 spin_unlock(&device->io_lock);
297 btrfs_requeue_work(&device->work);
300 /* unplug every 64 requests just for good measure */
301 if (batch_run % 64 == 0) {
302 blk_finish_plug(&plug);
303 blk_start_plug(&plug);
312 spin_lock(&device->io_lock);
313 if (device->pending_bios.head || device->pending_sync_bios.head)
315 spin_unlock(&device->io_lock);
318 blk_finish_plug(&plug);
322 static void pending_bios_fn(struct btrfs_work *work)
324 struct btrfs_device *device;
326 device = container_of(work, struct btrfs_device, work);
327 run_scheduled_bios(device);
330 static noinline int device_list_add(const char *path,
331 struct btrfs_super_block *disk_super,
332 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
334 struct btrfs_device *device;
335 struct btrfs_fs_devices *fs_devices;
336 u64 found_transid = btrfs_super_generation(disk_super);
339 fs_devices = find_fsid(disk_super->fsid);
341 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
344 INIT_LIST_HEAD(&fs_devices->devices);
345 INIT_LIST_HEAD(&fs_devices->alloc_list);
346 list_add(&fs_devices->list, &fs_uuids);
347 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
348 fs_devices->latest_devid = devid;
349 fs_devices->latest_trans = found_transid;
350 mutex_init(&fs_devices->device_list_mutex);
353 device = __find_device(&fs_devices->devices, devid,
354 disk_super->dev_item.uuid);
357 if (fs_devices->opened)
360 device = kzalloc(sizeof(*device), GFP_NOFS);
362 /* we can safely leave the fs_devices entry around */
365 device->devid = devid;
366 device->work.func = pending_bios_fn;
367 memcpy(device->uuid, disk_super->dev_item.uuid,
369 spin_lock_init(&device->io_lock);
370 device->name = kstrdup(path, GFP_NOFS);
375 INIT_LIST_HEAD(&device->dev_alloc_list);
377 /* init readahead state */
378 spin_lock_init(&device->reada_lock);
379 device->reada_curr_zone = NULL;
380 atomic_set(&device->reada_in_flight, 0);
381 device->reada_next = 0;
382 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
383 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
385 mutex_lock(&fs_devices->device_list_mutex);
386 list_add_rcu(&device->dev_list, &fs_devices->devices);
387 mutex_unlock(&fs_devices->device_list_mutex);
389 device->fs_devices = fs_devices;
390 fs_devices->num_devices++;
391 } else if (!device->name || strcmp(device->name, path)) {
392 name = kstrdup(path, GFP_NOFS);
397 if (device->missing) {
398 fs_devices->missing_devices--;
403 if (found_transid > fs_devices->latest_trans) {
404 fs_devices->latest_devid = devid;
405 fs_devices->latest_trans = found_transid;
407 *fs_devices_ret = fs_devices;
411 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
413 struct btrfs_fs_devices *fs_devices;
414 struct btrfs_device *device;
415 struct btrfs_device *orig_dev;
417 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
419 return ERR_PTR(-ENOMEM);
421 INIT_LIST_HEAD(&fs_devices->devices);
422 INIT_LIST_HEAD(&fs_devices->alloc_list);
423 INIT_LIST_HEAD(&fs_devices->list);
424 mutex_init(&fs_devices->device_list_mutex);
425 fs_devices->latest_devid = orig->latest_devid;
426 fs_devices->latest_trans = orig->latest_trans;
427 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
429 /* We have held the volume lock, it is safe to get the devices. */
430 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
431 device = kzalloc(sizeof(*device), GFP_NOFS);
435 device->name = kstrdup(orig_dev->name, GFP_NOFS);
441 device->devid = orig_dev->devid;
442 device->work.func = pending_bios_fn;
443 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
444 spin_lock_init(&device->io_lock);
445 INIT_LIST_HEAD(&device->dev_list);
446 INIT_LIST_HEAD(&device->dev_alloc_list);
448 list_add(&device->dev_list, &fs_devices->devices);
449 device->fs_devices = fs_devices;
450 fs_devices->num_devices++;
454 free_fs_devices(fs_devices);
455 return ERR_PTR(-ENOMEM);
458 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
460 struct btrfs_device *device, *next;
462 mutex_lock(&uuid_mutex);
464 /* This is the initialized path, it is safe to release the devices. */
465 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
466 if (device->in_fs_metadata)
470 blkdev_put(device->bdev, device->mode);
472 fs_devices->open_devices--;
474 if (device->writeable) {
475 list_del_init(&device->dev_alloc_list);
476 device->writeable = 0;
477 fs_devices->rw_devices--;
479 list_del_init(&device->dev_list);
480 fs_devices->num_devices--;
485 if (fs_devices->seed) {
486 fs_devices = fs_devices->seed;
490 mutex_unlock(&uuid_mutex);
494 static void __free_device(struct work_struct *work)
496 struct btrfs_device *device;
498 device = container_of(work, struct btrfs_device, rcu_work);
501 blkdev_put(device->bdev, device->mode);
507 static void free_device(struct rcu_head *head)
509 struct btrfs_device *device;
511 device = container_of(head, struct btrfs_device, rcu);
513 INIT_WORK(&device->rcu_work, __free_device);
514 schedule_work(&device->rcu_work);
517 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
519 struct btrfs_device *device;
521 if (--fs_devices->opened > 0)
524 mutex_lock(&fs_devices->device_list_mutex);
525 list_for_each_entry(device, &fs_devices->devices, dev_list) {
526 struct btrfs_device *new_device;
529 fs_devices->open_devices--;
531 if (device->writeable) {
532 list_del_init(&device->dev_alloc_list);
533 fs_devices->rw_devices--;
536 if (device->can_discard)
537 fs_devices->num_can_discard--;
539 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
541 memcpy(new_device, device, sizeof(*new_device));
542 new_device->name = kstrdup(device->name, GFP_NOFS);
543 BUG_ON(device->name && !new_device->name);
544 new_device->bdev = NULL;
545 new_device->writeable = 0;
546 new_device->in_fs_metadata = 0;
547 new_device->can_discard = 0;
548 list_replace_rcu(&device->dev_list, &new_device->dev_list);
550 call_rcu(&device->rcu, free_device);
552 mutex_unlock(&fs_devices->device_list_mutex);
554 WARN_ON(fs_devices->open_devices);
555 WARN_ON(fs_devices->rw_devices);
556 fs_devices->opened = 0;
557 fs_devices->seeding = 0;
562 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
564 struct btrfs_fs_devices *seed_devices = NULL;
567 mutex_lock(&uuid_mutex);
568 ret = __btrfs_close_devices(fs_devices);
569 if (!fs_devices->opened) {
570 seed_devices = fs_devices->seed;
571 fs_devices->seed = NULL;
573 mutex_unlock(&uuid_mutex);
575 while (seed_devices) {
576 fs_devices = seed_devices;
577 seed_devices = fs_devices->seed;
578 __btrfs_close_devices(fs_devices);
579 free_fs_devices(fs_devices);
584 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
585 fmode_t flags, void *holder)
587 struct request_queue *q;
588 struct block_device *bdev;
589 struct list_head *head = &fs_devices->devices;
590 struct btrfs_device *device;
591 struct block_device *latest_bdev = NULL;
592 struct buffer_head *bh;
593 struct btrfs_super_block *disk_super;
594 u64 latest_devid = 0;
595 u64 latest_transid = 0;
602 list_for_each_entry(device, head, dev_list) {
608 bdev = blkdev_get_by_path(device->name, flags, holder);
610 printk(KERN_INFO "open %s failed\n", device->name);
613 set_blocksize(bdev, 4096);
615 bh = btrfs_read_dev_super(bdev);
619 disk_super = (struct btrfs_super_block *)bh->b_data;
620 devid = btrfs_stack_device_id(&disk_super->dev_item);
621 if (devid != device->devid)
624 if (memcmp(device->uuid, disk_super->dev_item.uuid,
628 device->generation = btrfs_super_generation(disk_super);
629 if (!latest_transid || device->generation > latest_transid) {
630 latest_devid = devid;
631 latest_transid = device->generation;
635 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
636 device->writeable = 0;
638 device->writeable = !bdev_read_only(bdev);
642 q = bdev_get_queue(bdev);
643 if (blk_queue_discard(q)) {
644 device->can_discard = 1;
645 fs_devices->num_can_discard++;
649 device->in_fs_metadata = 0;
650 device->mode = flags;
652 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
653 fs_devices->rotating = 1;
655 fs_devices->open_devices++;
656 if (device->writeable) {
657 fs_devices->rw_devices++;
658 list_add(&device->dev_alloc_list,
659 &fs_devices->alloc_list);
667 blkdev_put(bdev, flags);
671 if (fs_devices->open_devices == 0) {
675 fs_devices->seeding = seeding;
676 fs_devices->opened = 1;
677 fs_devices->latest_bdev = latest_bdev;
678 fs_devices->latest_devid = latest_devid;
679 fs_devices->latest_trans = latest_transid;
680 fs_devices->total_rw_bytes = 0;
685 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
686 fmode_t flags, void *holder)
690 mutex_lock(&uuid_mutex);
691 if (fs_devices->opened) {
692 fs_devices->opened++;
695 ret = __btrfs_open_devices(fs_devices, flags, holder);
697 mutex_unlock(&uuid_mutex);
701 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
702 struct btrfs_fs_devices **fs_devices_ret)
704 struct btrfs_super_block *disk_super;
705 struct block_device *bdev;
706 struct buffer_head *bh;
711 mutex_lock(&uuid_mutex);
714 bdev = blkdev_get_by_path(path, flags, holder);
721 ret = set_blocksize(bdev, 4096);
724 bh = btrfs_read_dev_super(bdev);
729 disk_super = (struct btrfs_super_block *)bh->b_data;
730 devid = btrfs_stack_device_id(&disk_super->dev_item);
731 transid = btrfs_super_generation(disk_super);
732 if (disk_super->label[0])
733 printk(KERN_INFO "device label %s ", disk_super->label);
735 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
736 printk(KERN_CONT "devid %llu transid %llu %s\n",
737 (unsigned long long)devid, (unsigned long long)transid, path);
738 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
742 blkdev_put(bdev, flags);
744 mutex_unlock(&uuid_mutex);
748 /* helper to account the used device space in the range */
749 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
750 u64 end, u64 *length)
752 struct btrfs_key key;
753 struct btrfs_root *root = device->dev_root;
754 struct btrfs_dev_extent *dev_extent;
755 struct btrfs_path *path;
759 struct extent_buffer *l;
763 if (start >= device->total_bytes)
766 path = btrfs_alloc_path();
771 key.objectid = device->devid;
773 key.type = BTRFS_DEV_EXTENT_KEY;
775 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
779 ret = btrfs_previous_item(root, path, key.objectid, key.type);
786 slot = path->slots[0];
787 if (slot >= btrfs_header_nritems(l)) {
788 ret = btrfs_next_leaf(root, path);
796 btrfs_item_key_to_cpu(l, &key, slot);
798 if (key.objectid < device->devid)
801 if (key.objectid > device->devid)
804 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
807 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
808 extent_end = key.offset + btrfs_dev_extent_length(l,
810 if (key.offset <= start && extent_end > end) {
811 *length = end - start + 1;
813 } else if (key.offset <= start && extent_end > start)
814 *length += extent_end - start;
815 else if (key.offset > start && extent_end <= end)
816 *length += extent_end - key.offset;
817 else if (key.offset > start && key.offset <= end) {
818 *length += end - key.offset + 1;
820 } else if (key.offset > end)
828 btrfs_free_path(path);
833 * find_free_dev_extent - find free space in the specified device
834 * @device: the device which we search the free space in
835 * @num_bytes: the size of the free space that we need
836 * @start: store the start of the free space.
837 * @len: the size of the free space. that we find, or the size of the max
838 * free space if we don't find suitable free space
840 * this uses a pretty simple search, the expectation is that it is
841 * called very infrequently and that a given device has a small number
844 * @start is used to store the start of the free space if we find. But if we
845 * don't find suitable free space, it will be used to store the start position
846 * of the max free space.
848 * @len is used to store the size of the free space that we find.
849 * But if we don't find suitable free space, it is used to store the size of
850 * the max free space.
852 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
853 u64 *start, u64 *len)
855 struct btrfs_key key;
856 struct btrfs_root *root = device->dev_root;
857 struct btrfs_dev_extent *dev_extent;
858 struct btrfs_path *path;
864 u64 search_end = device->total_bytes;
867 struct extent_buffer *l;
869 /* FIXME use last free of some kind */
871 /* we don't want to overwrite the superblock on the drive,
872 * so we make sure to start at an offset of at least 1MB
874 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
876 max_hole_start = search_start;
880 if (search_start >= search_end) {
885 path = btrfs_alloc_path();
892 key.objectid = device->devid;
893 key.offset = search_start;
894 key.type = BTRFS_DEV_EXTENT_KEY;
896 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
900 ret = btrfs_previous_item(root, path, key.objectid, key.type);
907 slot = path->slots[0];
908 if (slot >= btrfs_header_nritems(l)) {
909 ret = btrfs_next_leaf(root, path);
917 btrfs_item_key_to_cpu(l, &key, slot);
919 if (key.objectid < device->devid)
922 if (key.objectid > device->devid)
925 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
928 if (key.offset > search_start) {
929 hole_size = key.offset - search_start;
931 if (hole_size > max_hole_size) {
932 max_hole_start = search_start;
933 max_hole_size = hole_size;
937 * If this free space is greater than which we need,
938 * it must be the max free space that we have found
939 * until now, so max_hole_start must point to the start
940 * of this free space and the length of this free space
941 * is stored in max_hole_size. Thus, we return
942 * max_hole_start and max_hole_size and go back to the
945 if (hole_size >= num_bytes) {
951 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
952 extent_end = key.offset + btrfs_dev_extent_length(l,
954 if (extent_end > search_start)
955 search_start = extent_end;
962 * At this point, search_start should be the end of
963 * allocated dev extents, and when shrinking the device,
964 * search_end may be smaller than search_start.
966 if (search_end > search_start)
967 hole_size = search_end - search_start;
969 if (hole_size > max_hole_size) {
970 max_hole_start = search_start;
971 max_hole_size = hole_size;
975 if (hole_size < num_bytes)
981 btrfs_free_path(path);
983 *start = max_hole_start;
985 *len = max_hole_size;
989 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
990 struct btrfs_device *device,
994 struct btrfs_path *path;
995 struct btrfs_root *root = device->dev_root;
996 struct btrfs_key key;
997 struct btrfs_key found_key;
998 struct extent_buffer *leaf = NULL;
999 struct btrfs_dev_extent *extent = NULL;
1001 path = btrfs_alloc_path();
1005 key.objectid = device->devid;
1007 key.type = BTRFS_DEV_EXTENT_KEY;
1009 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1011 ret = btrfs_previous_item(root, path, key.objectid,
1012 BTRFS_DEV_EXTENT_KEY);
1015 leaf = path->nodes[0];
1016 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1017 extent = btrfs_item_ptr(leaf, path->slots[0],
1018 struct btrfs_dev_extent);
1019 BUG_ON(found_key.offset > start || found_key.offset +
1020 btrfs_dev_extent_length(leaf, extent) < start);
1022 btrfs_release_path(path);
1024 } else if (ret == 0) {
1025 leaf = path->nodes[0];
1026 extent = btrfs_item_ptr(leaf, path->slots[0],
1027 struct btrfs_dev_extent);
1031 if (device->bytes_used > 0) {
1032 u64 len = btrfs_dev_extent_length(leaf, extent);
1033 device->bytes_used -= len;
1034 spin_lock(&root->fs_info->free_chunk_lock);
1035 root->fs_info->free_chunk_space += len;
1036 spin_unlock(&root->fs_info->free_chunk_lock);
1038 ret = btrfs_del_item(trans, root, path);
1041 btrfs_free_path(path);
1045 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1046 struct btrfs_device *device,
1047 u64 chunk_tree, u64 chunk_objectid,
1048 u64 chunk_offset, u64 start, u64 num_bytes)
1051 struct btrfs_path *path;
1052 struct btrfs_root *root = device->dev_root;
1053 struct btrfs_dev_extent *extent;
1054 struct extent_buffer *leaf;
1055 struct btrfs_key key;
1057 WARN_ON(!device->in_fs_metadata);
1058 path = btrfs_alloc_path();
1062 key.objectid = device->devid;
1064 key.type = BTRFS_DEV_EXTENT_KEY;
1065 ret = btrfs_insert_empty_item(trans, root, path, &key,
1069 leaf = path->nodes[0];
1070 extent = btrfs_item_ptr(leaf, path->slots[0],
1071 struct btrfs_dev_extent);
1072 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1073 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1074 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1076 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1077 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1080 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1081 btrfs_mark_buffer_dirty(leaf);
1082 btrfs_free_path(path);
1086 static noinline int find_next_chunk(struct btrfs_root *root,
1087 u64 objectid, u64 *offset)
1089 struct btrfs_path *path;
1091 struct btrfs_key key;
1092 struct btrfs_chunk *chunk;
1093 struct btrfs_key found_key;
1095 path = btrfs_alloc_path();
1099 key.objectid = objectid;
1100 key.offset = (u64)-1;
1101 key.type = BTRFS_CHUNK_ITEM_KEY;
1103 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1109 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1113 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1115 if (found_key.objectid != objectid)
1118 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1119 struct btrfs_chunk);
1120 *offset = found_key.offset +
1121 btrfs_chunk_length(path->nodes[0], chunk);
1126 btrfs_free_path(path);
1130 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1133 struct btrfs_key key;
1134 struct btrfs_key found_key;
1135 struct btrfs_path *path;
1137 root = root->fs_info->chunk_root;
1139 path = btrfs_alloc_path();
1143 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1144 key.type = BTRFS_DEV_ITEM_KEY;
1145 key.offset = (u64)-1;
1147 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1153 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1154 BTRFS_DEV_ITEM_KEY);
1158 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1160 *objectid = found_key.offset + 1;
1164 btrfs_free_path(path);
1169 * the device information is stored in the chunk root
1170 * the btrfs_device struct should be fully filled in
1172 int btrfs_add_device(struct btrfs_trans_handle *trans,
1173 struct btrfs_root *root,
1174 struct btrfs_device *device)
1177 struct btrfs_path *path;
1178 struct btrfs_dev_item *dev_item;
1179 struct extent_buffer *leaf;
1180 struct btrfs_key key;
1183 root = root->fs_info->chunk_root;
1185 path = btrfs_alloc_path();
1189 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1190 key.type = BTRFS_DEV_ITEM_KEY;
1191 key.offset = device->devid;
1193 ret = btrfs_insert_empty_item(trans, root, path, &key,
1198 leaf = path->nodes[0];
1199 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1201 btrfs_set_device_id(leaf, dev_item, device->devid);
1202 btrfs_set_device_generation(leaf, dev_item, 0);
1203 btrfs_set_device_type(leaf, dev_item, device->type);
1204 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1205 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1206 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1207 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1208 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1209 btrfs_set_device_group(leaf, dev_item, 0);
1210 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1211 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1212 btrfs_set_device_start_offset(leaf, dev_item, 0);
1214 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1215 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1216 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1217 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1218 btrfs_mark_buffer_dirty(leaf);
1222 btrfs_free_path(path);
1226 static int btrfs_rm_dev_item(struct btrfs_root *root,
1227 struct btrfs_device *device)
1230 struct btrfs_path *path;
1231 struct btrfs_key key;
1232 struct btrfs_trans_handle *trans;
1234 root = root->fs_info->chunk_root;
1236 path = btrfs_alloc_path();
1240 trans = btrfs_start_transaction(root, 0);
1241 if (IS_ERR(trans)) {
1242 btrfs_free_path(path);
1243 return PTR_ERR(trans);
1245 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1246 key.type = BTRFS_DEV_ITEM_KEY;
1247 key.offset = device->devid;
1250 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1259 ret = btrfs_del_item(trans, root, path);
1263 btrfs_free_path(path);
1264 unlock_chunks(root);
1265 btrfs_commit_transaction(trans, root);
1269 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1271 struct btrfs_device *device;
1272 struct btrfs_device *next_device;
1273 struct block_device *bdev;
1274 struct buffer_head *bh = NULL;
1275 struct btrfs_super_block *disk_super;
1276 struct btrfs_fs_devices *cur_devices;
1282 bool clear_super = false;
1284 mutex_lock(&uuid_mutex);
1286 all_avail = root->fs_info->avail_data_alloc_bits |
1287 root->fs_info->avail_system_alloc_bits |
1288 root->fs_info->avail_metadata_alloc_bits;
1290 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1291 root->fs_info->fs_devices->num_devices <= 4) {
1292 printk(KERN_ERR "btrfs: unable to go below four devices "
1298 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1299 root->fs_info->fs_devices->num_devices <= 2) {
1300 printk(KERN_ERR "btrfs: unable to go below two "
1301 "devices on raid1\n");
1306 if (strcmp(device_path, "missing") == 0) {
1307 struct list_head *devices;
1308 struct btrfs_device *tmp;
1311 devices = &root->fs_info->fs_devices->devices;
1313 * It is safe to read the devices since the volume_mutex
1316 list_for_each_entry(tmp, devices, dev_list) {
1317 if (tmp->in_fs_metadata && !tmp->bdev) {
1326 printk(KERN_ERR "btrfs: no missing devices found to "
1331 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1332 root->fs_info->bdev_holder);
1334 ret = PTR_ERR(bdev);
1338 set_blocksize(bdev, 4096);
1339 bh = btrfs_read_dev_super(bdev);
1344 disk_super = (struct btrfs_super_block *)bh->b_data;
1345 devid = btrfs_stack_device_id(&disk_super->dev_item);
1346 dev_uuid = disk_super->dev_item.uuid;
1347 device = btrfs_find_device(root, devid, dev_uuid,
1355 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1356 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1362 if (device->writeable) {
1364 list_del_init(&device->dev_alloc_list);
1365 unlock_chunks(root);
1366 root->fs_info->fs_devices->rw_devices--;
1370 ret = btrfs_shrink_device(device, 0);
1374 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1378 spin_lock(&root->fs_info->free_chunk_lock);
1379 root->fs_info->free_chunk_space = device->total_bytes -
1381 spin_unlock(&root->fs_info->free_chunk_lock);
1383 device->in_fs_metadata = 0;
1384 btrfs_scrub_cancel_dev(root, device);
1387 * the device list mutex makes sure that we don't change
1388 * the device list while someone else is writing out all
1389 * the device supers.
1392 cur_devices = device->fs_devices;
1393 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1394 list_del_rcu(&device->dev_list);
1396 device->fs_devices->num_devices--;
1398 if (device->missing)
1399 root->fs_info->fs_devices->missing_devices--;
1401 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1402 struct btrfs_device, dev_list);
1403 if (device->bdev == root->fs_info->sb->s_bdev)
1404 root->fs_info->sb->s_bdev = next_device->bdev;
1405 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1406 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1409 device->fs_devices->open_devices--;
1411 call_rcu(&device->rcu, free_device);
1412 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1414 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1415 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1417 if (cur_devices->open_devices == 0) {
1418 struct btrfs_fs_devices *fs_devices;
1419 fs_devices = root->fs_info->fs_devices;
1420 while (fs_devices) {
1421 if (fs_devices->seed == cur_devices)
1423 fs_devices = fs_devices->seed;
1425 fs_devices->seed = cur_devices->seed;
1426 cur_devices->seed = NULL;
1428 __btrfs_close_devices(cur_devices);
1429 unlock_chunks(root);
1430 free_fs_devices(cur_devices);
1434 * at this point, the device is zero sized. We want to
1435 * remove it from the devices list and zero out the old super
1438 /* make sure this device isn't detected as part of
1441 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1442 set_buffer_dirty(bh);
1443 sync_dirty_buffer(bh);
1452 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1454 mutex_unlock(&uuid_mutex);
1457 if (device->writeable) {
1459 list_add(&device->dev_alloc_list,
1460 &root->fs_info->fs_devices->alloc_list);
1461 unlock_chunks(root);
1462 root->fs_info->fs_devices->rw_devices++;
1468 * does all the dirty work required for changing file system's UUID.
1470 static int btrfs_prepare_sprout(struct btrfs_root *root)
1472 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1473 struct btrfs_fs_devices *old_devices;
1474 struct btrfs_fs_devices *seed_devices;
1475 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1476 struct btrfs_device *device;
1479 BUG_ON(!mutex_is_locked(&uuid_mutex));
1480 if (!fs_devices->seeding)
1483 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1487 old_devices = clone_fs_devices(fs_devices);
1488 if (IS_ERR(old_devices)) {
1489 kfree(seed_devices);
1490 return PTR_ERR(old_devices);
1493 list_add(&old_devices->list, &fs_uuids);
1495 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1496 seed_devices->opened = 1;
1497 INIT_LIST_HEAD(&seed_devices->devices);
1498 INIT_LIST_HEAD(&seed_devices->alloc_list);
1499 mutex_init(&seed_devices->device_list_mutex);
1501 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1502 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1504 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1506 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1507 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1508 device->fs_devices = seed_devices;
1511 fs_devices->seeding = 0;
1512 fs_devices->num_devices = 0;
1513 fs_devices->open_devices = 0;
1514 fs_devices->seed = seed_devices;
1516 generate_random_uuid(fs_devices->fsid);
1517 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1518 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1519 super_flags = btrfs_super_flags(disk_super) &
1520 ~BTRFS_SUPER_FLAG_SEEDING;
1521 btrfs_set_super_flags(disk_super, super_flags);
1527 * strore the expected generation for seed devices in device items.
1529 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1530 struct btrfs_root *root)
1532 struct btrfs_path *path;
1533 struct extent_buffer *leaf;
1534 struct btrfs_dev_item *dev_item;
1535 struct btrfs_device *device;
1536 struct btrfs_key key;
1537 u8 fs_uuid[BTRFS_UUID_SIZE];
1538 u8 dev_uuid[BTRFS_UUID_SIZE];
1542 path = btrfs_alloc_path();
1546 root = root->fs_info->chunk_root;
1547 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1549 key.type = BTRFS_DEV_ITEM_KEY;
1552 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1556 leaf = path->nodes[0];
1558 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1559 ret = btrfs_next_leaf(root, path);
1564 leaf = path->nodes[0];
1565 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1566 btrfs_release_path(path);
1570 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1571 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1572 key.type != BTRFS_DEV_ITEM_KEY)
1575 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1576 struct btrfs_dev_item);
1577 devid = btrfs_device_id(leaf, dev_item);
1578 read_extent_buffer(leaf, dev_uuid,
1579 (unsigned long)btrfs_device_uuid(dev_item),
1581 read_extent_buffer(leaf, fs_uuid,
1582 (unsigned long)btrfs_device_fsid(dev_item),
1584 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1587 if (device->fs_devices->seeding) {
1588 btrfs_set_device_generation(leaf, dev_item,
1589 device->generation);
1590 btrfs_mark_buffer_dirty(leaf);
1598 btrfs_free_path(path);
1602 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1604 struct request_queue *q;
1605 struct btrfs_trans_handle *trans;
1606 struct btrfs_device *device;
1607 struct block_device *bdev;
1608 struct list_head *devices;
1609 struct super_block *sb = root->fs_info->sb;
1611 int seeding_dev = 0;
1614 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1617 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1618 root->fs_info->bdev_holder);
1620 return PTR_ERR(bdev);
1622 if (root->fs_info->fs_devices->seeding) {
1624 down_write(&sb->s_umount);
1625 mutex_lock(&uuid_mutex);
1628 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1630 devices = &root->fs_info->fs_devices->devices;
1632 * we have the volume lock, so we don't need the extra
1633 * device list mutex while reading the list here.
1635 list_for_each_entry(device, devices, dev_list) {
1636 if (device->bdev == bdev) {
1642 device = kzalloc(sizeof(*device), GFP_NOFS);
1644 /* we can safely leave the fs_devices entry around */
1649 device->name = kstrdup(device_path, GFP_NOFS);
1650 if (!device->name) {
1656 ret = find_next_devid(root, &device->devid);
1658 kfree(device->name);
1663 trans = btrfs_start_transaction(root, 0);
1664 if (IS_ERR(trans)) {
1665 kfree(device->name);
1667 ret = PTR_ERR(trans);
1673 q = bdev_get_queue(bdev);
1674 if (blk_queue_discard(q))
1675 device->can_discard = 1;
1676 device->writeable = 1;
1677 device->work.func = pending_bios_fn;
1678 generate_random_uuid(device->uuid);
1679 spin_lock_init(&device->io_lock);
1680 device->generation = trans->transid;
1681 device->io_width = root->sectorsize;
1682 device->io_align = root->sectorsize;
1683 device->sector_size = root->sectorsize;
1684 device->total_bytes = i_size_read(bdev->bd_inode);
1685 device->disk_total_bytes = device->total_bytes;
1686 device->dev_root = root->fs_info->dev_root;
1687 device->bdev = bdev;
1688 device->in_fs_metadata = 1;
1689 device->mode = FMODE_EXCL;
1690 set_blocksize(device->bdev, 4096);
1693 sb->s_flags &= ~MS_RDONLY;
1694 ret = btrfs_prepare_sprout(root);
1698 device->fs_devices = root->fs_info->fs_devices;
1701 * we don't want write_supers to jump in here with our device
1704 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1705 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1706 list_add(&device->dev_alloc_list,
1707 &root->fs_info->fs_devices->alloc_list);
1708 root->fs_info->fs_devices->num_devices++;
1709 root->fs_info->fs_devices->open_devices++;
1710 root->fs_info->fs_devices->rw_devices++;
1711 if (device->can_discard)
1712 root->fs_info->fs_devices->num_can_discard++;
1713 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1715 spin_lock(&root->fs_info->free_chunk_lock);
1716 root->fs_info->free_chunk_space += device->total_bytes;
1717 spin_unlock(&root->fs_info->free_chunk_lock);
1719 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1720 root->fs_info->fs_devices->rotating = 1;
1722 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1723 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1724 total_bytes + device->total_bytes);
1726 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1727 btrfs_set_super_num_devices(root->fs_info->super_copy,
1729 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1732 ret = init_first_rw_device(trans, root, device);
1734 ret = btrfs_finish_sprout(trans, root);
1737 ret = btrfs_add_device(trans, root, device);
1741 * we've got more storage, clear any full flags on the space
1744 btrfs_clear_space_info_full(root->fs_info);
1746 unlock_chunks(root);
1747 btrfs_commit_transaction(trans, root);
1750 mutex_unlock(&uuid_mutex);
1751 up_write(&sb->s_umount);
1753 ret = btrfs_relocate_sys_chunks(root);
1759 blkdev_put(bdev, FMODE_EXCL);
1761 mutex_unlock(&uuid_mutex);
1762 up_write(&sb->s_umount);
1767 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1768 struct btrfs_device *device)
1771 struct btrfs_path *path;
1772 struct btrfs_root *root;
1773 struct btrfs_dev_item *dev_item;
1774 struct extent_buffer *leaf;
1775 struct btrfs_key key;
1777 root = device->dev_root->fs_info->chunk_root;
1779 path = btrfs_alloc_path();
1783 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1784 key.type = BTRFS_DEV_ITEM_KEY;
1785 key.offset = device->devid;
1787 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1796 leaf = path->nodes[0];
1797 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1799 btrfs_set_device_id(leaf, dev_item, device->devid);
1800 btrfs_set_device_type(leaf, dev_item, device->type);
1801 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1802 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1803 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1804 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1805 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1806 btrfs_mark_buffer_dirty(leaf);
1809 btrfs_free_path(path);
1813 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1814 struct btrfs_device *device, u64 new_size)
1816 struct btrfs_super_block *super_copy =
1817 device->dev_root->fs_info->super_copy;
1818 u64 old_total = btrfs_super_total_bytes(super_copy);
1819 u64 diff = new_size - device->total_bytes;
1821 if (!device->writeable)
1823 if (new_size <= device->total_bytes)
1826 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1827 device->fs_devices->total_rw_bytes += diff;
1829 device->total_bytes = new_size;
1830 device->disk_total_bytes = new_size;
1831 btrfs_clear_space_info_full(device->dev_root->fs_info);
1833 return btrfs_update_device(trans, device);
1836 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1837 struct btrfs_device *device, u64 new_size)
1840 lock_chunks(device->dev_root);
1841 ret = __btrfs_grow_device(trans, device, new_size);
1842 unlock_chunks(device->dev_root);
1846 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1847 struct btrfs_root *root,
1848 u64 chunk_tree, u64 chunk_objectid,
1852 struct btrfs_path *path;
1853 struct btrfs_key key;
1855 root = root->fs_info->chunk_root;
1856 path = btrfs_alloc_path();
1860 key.objectid = chunk_objectid;
1861 key.offset = chunk_offset;
1862 key.type = BTRFS_CHUNK_ITEM_KEY;
1864 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1867 ret = btrfs_del_item(trans, root, path);
1869 btrfs_free_path(path);
1873 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1876 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1877 struct btrfs_disk_key *disk_key;
1878 struct btrfs_chunk *chunk;
1885 struct btrfs_key key;
1887 array_size = btrfs_super_sys_array_size(super_copy);
1889 ptr = super_copy->sys_chunk_array;
1892 while (cur < array_size) {
1893 disk_key = (struct btrfs_disk_key *)ptr;
1894 btrfs_disk_key_to_cpu(&key, disk_key);
1896 len = sizeof(*disk_key);
1898 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1899 chunk = (struct btrfs_chunk *)(ptr + len);
1900 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1901 len += btrfs_chunk_item_size(num_stripes);
1906 if (key.objectid == chunk_objectid &&
1907 key.offset == chunk_offset) {
1908 memmove(ptr, ptr + len, array_size - (cur + len));
1910 btrfs_set_super_sys_array_size(super_copy, array_size);
1919 static int btrfs_relocate_chunk(struct btrfs_root *root,
1920 u64 chunk_tree, u64 chunk_objectid,
1923 struct extent_map_tree *em_tree;
1924 struct btrfs_root *extent_root;
1925 struct btrfs_trans_handle *trans;
1926 struct extent_map *em;
1927 struct map_lookup *map;
1931 root = root->fs_info->chunk_root;
1932 extent_root = root->fs_info->extent_root;
1933 em_tree = &root->fs_info->mapping_tree.map_tree;
1935 ret = btrfs_can_relocate(extent_root, chunk_offset);
1939 /* step one, relocate all the extents inside this chunk */
1940 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1944 trans = btrfs_start_transaction(root, 0);
1945 BUG_ON(IS_ERR(trans));
1950 * step two, delete the device extents and the
1951 * chunk tree entries
1953 read_lock(&em_tree->lock);
1954 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1955 read_unlock(&em_tree->lock);
1957 BUG_ON(em->start > chunk_offset ||
1958 em->start + em->len < chunk_offset);
1959 map = (struct map_lookup *)em->bdev;
1961 for (i = 0; i < map->num_stripes; i++) {
1962 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1963 map->stripes[i].physical);
1966 if (map->stripes[i].dev) {
1967 ret = btrfs_update_device(trans, map->stripes[i].dev);
1971 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1976 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
1978 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1979 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1983 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1986 write_lock(&em_tree->lock);
1987 remove_extent_mapping(em_tree, em);
1988 write_unlock(&em_tree->lock);
1993 /* once for the tree */
1994 free_extent_map(em);
1996 free_extent_map(em);
1998 unlock_chunks(root);
1999 btrfs_end_transaction(trans, root);
2003 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2005 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2006 struct btrfs_path *path;
2007 struct extent_buffer *leaf;
2008 struct btrfs_chunk *chunk;
2009 struct btrfs_key key;
2010 struct btrfs_key found_key;
2011 u64 chunk_tree = chunk_root->root_key.objectid;
2013 bool retried = false;
2017 path = btrfs_alloc_path();
2022 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2023 key.offset = (u64)-1;
2024 key.type = BTRFS_CHUNK_ITEM_KEY;
2027 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2032 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2039 leaf = path->nodes[0];
2040 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2042 chunk = btrfs_item_ptr(leaf, path->slots[0],
2043 struct btrfs_chunk);
2044 chunk_type = btrfs_chunk_type(leaf, chunk);
2045 btrfs_release_path(path);
2047 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2048 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2057 if (found_key.offset == 0)
2059 key.offset = found_key.offset - 1;
2062 if (failed && !retried) {
2066 } else if (failed && retried) {
2071 btrfs_free_path(path);
2075 static int insert_balance_item(struct btrfs_root *root,
2076 struct btrfs_balance_control *bctl)
2078 struct btrfs_trans_handle *trans;
2079 struct btrfs_balance_item *item;
2080 struct btrfs_disk_balance_args disk_bargs;
2081 struct btrfs_path *path;
2082 struct extent_buffer *leaf;
2083 struct btrfs_key key;
2086 path = btrfs_alloc_path();
2090 trans = btrfs_start_transaction(root, 0);
2091 if (IS_ERR(trans)) {
2092 btrfs_free_path(path);
2093 return PTR_ERR(trans);
2096 key.objectid = BTRFS_BALANCE_OBJECTID;
2097 key.type = BTRFS_BALANCE_ITEM_KEY;
2100 ret = btrfs_insert_empty_item(trans, root, path, &key,
2105 leaf = path->nodes[0];
2106 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2108 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2110 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2111 btrfs_set_balance_data(leaf, item, &disk_bargs);
2112 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2113 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2114 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2115 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2117 btrfs_set_balance_flags(leaf, item, bctl->flags);
2119 btrfs_mark_buffer_dirty(leaf);
2121 btrfs_free_path(path);
2122 err = btrfs_commit_transaction(trans, root);
2128 static int del_balance_item(struct btrfs_root *root)
2130 struct btrfs_trans_handle *trans;
2131 struct btrfs_path *path;
2132 struct btrfs_key key;
2135 path = btrfs_alloc_path();
2139 trans = btrfs_start_transaction(root, 0);
2140 if (IS_ERR(trans)) {
2141 btrfs_free_path(path);
2142 return PTR_ERR(trans);
2145 key.objectid = BTRFS_BALANCE_OBJECTID;
2146 key.type = BTRFS_BALANCE_ITEM_KEY;
2149 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2157 ret = btrfs_del_item(trans, root, path);
2159 btrfs_free_path(path);
2160 err = btrfs_commit_transaction(trans, root);
2167 * This is a heuristic used to reduce the number of chunks balanced on
2168 * resume after balance was interrupted.
2170 static void update_balance_args(struct btrfs_balance_control *bctl)
2173 * Turn on soft mode for chunk types that were being converted.
2175 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2176 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2177 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2178 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2179 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2180 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2183 * Turn on usage filter if is not already used. The idea is
2184 * that chunks that we have already balanced should be
2185 * reasonably full. Don't do it for chunks that are being
2186 * converted - that will keep us from relocating unconverted
2187 * (albeit full) chunks.
2189 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2190 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2191 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2192 bctl->data.usage = 90;
2194 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2195 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2196 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2197 bctl->sys.usage = 90;
2199 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2200 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2201 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2202 bctl->meta.usage = 90;
2207 * Should be called with both balance and volume mutexes held to
2208 * serialize other volume operations (add_dev/rm_dev/resize) with
2209 * restriper. Same goes for unset_balance_control.
2211 static void set_balance_control(struct btrfs_balance_control *bctl)
2213 struct btrfs_fs_info *fs_info = bctl->fs_info;
2215 BUG_ON(fs_info->balance_ctl);
2217 spin_lock(&fs_info->balance_lock);
2218 fs_info->balance_ctl = bctl;
2219 spin_unlock(&fs_info->balance_lock);
2222 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2224 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2226 BUG_ON(!fs_info->balance_ctl);
2228 spin_lock(&fs_info->balance_lock);
2229 fs_info->balance_ctl = NULL;
2230 spin_unlock(&fs_info->balance_lock);
2236 * Balance filters. Return 1 if chunk should be filtered out
2237 * (should not be balanced).
2239 static int chunk_profiles_filter(u64 chunk_profile,
2240 struct btrfs_balance_args *bargs)
2242 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
2244 if (chunk_profile == 0)
2245 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2247 if (bargs->profiles & chunk_profile)
2253 static u64 div_factor_fine(u64 num, int factor)
2265 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2266 struct btrfs_balance_args *bargs)
2268 struct btrfs_block_group_cache *cache;
2269 u64 chunk_used, user_thresh;
2272 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2273 chunk_used = btrfs_block_group_used(&cache->item);
2275 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2276 if (chunk_used < user_thresh)
2279 btrfs_put_block_group(cache);
2283 static int chunk_devid_filter(struct extent_buffer *leaf,
2284 struct btrfs_chunk *chunk,
2285 struct btrfs_balance_args *bargs)
2287 struct btrfs_stripe *stripe;
2288 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2291 for (i = 0; i < num_stripes; i++) {
2292 stripe = btrfs_stripe_nr(chunk, i);
2293 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2300 /* [pstart, pend) */
2301 static int chunk_drange_filter(struct extent_buffer *leaf,
2302 struct btrfs_chunk *chunk,
2304 struct btrfs_balance_args *bargs)
2306 struct btrfs_stripe *stripe;
2307 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2313 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2316 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2317 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2321 factor = num_stripes / factor;
2323 for (i = 0; i < num_stripes; i++) {
2324 stripe = btrfs_stripe_nr(chunk, i);
2325 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2328 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2329 stripe_length = btrfs_chunk_length(leaf, chunk);
2330 do_div(stripe_length, factor);
2332 if (stripe_offset < bargs->pend &&
2333 stripe_offset + stripe_length > bargs->pstart)
2340 /* [vstart, vend) */
2341 static int chunk_vrange_filter(struct extent_buffer *leaf,
2342 struct btrfs_chunk *chunk,
2344 struct btrfs_balance_args *bargs)
2346 if (chunk_offset < bargs->vend &&
2347 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2348 /* at least part of the chunk is inside this vrange */
2354 static int chunk_soft_convert_filter(u64 chunk_profile,
2355 struct btrfs_balance_args *bargs)
2357 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2360 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
2362 if (chunk_profile == 0)
2363 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2365 if (bargs->target & chunk_profile)
2371 static int should_balance_chunk(struct btrfs_root *root,
2372 struct extent_buffer *leaf,
2373 struct btrfs_chunk *chunk, u64 chunk_offset)
2375 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2376 struct btrfs_balance_args *bargs = NULL;
2377 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2380 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2381 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2385 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2386 bargs = &bctl->data;
2387 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2389 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2390 bargs = &bctl->meta;
2392 /* profiles filter */
2393 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2394 chunk_profiles_filter(chunk_type, bargs)) {
2399 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2400 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2405 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2406 chunk_devid_filter(leaf, chunk, bargs)) {
2410 /* drange filter, makes sense only with devid filter */
2411 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2412 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2417 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2418 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2422 /* soft profile changing mode */
2423 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2424 chunk_soft_convert_filter(chunk_type, bargs)) {
2431 static u64 div_factor(u64 num, int factor)
2440 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2442 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2443 struct btrfs_root *chunk_root = fs_info->chunk_root;
2444 struct btrfs_root *dev_root = fs_info->dev_root;
2445 struct list_head *devices;
2446 struct btrfs_device *device;
2449 struct btrfs_chunk *chunk;
2450 struct btrfs_path *path;
2451 struct btrfs_key key;
2452 struct btrfs_key found_key;
2453 struct btrfs_trans_handle *trans;
2454 struct extent_buffer *leaf;
2457 int enospc_errors = 0;
2458 bool counting = true;
2460 /* step one make some room on all the devices */
2461 devices = &fs_info->fs_devices->devices;
2462 list_for_each_entry(device, devices, dev_list) {
2463 old_size = device->total_bytes;
2464 size_to_free = div_factor(old_size, 1);
2465 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2466 if (!device->writeable ||
2467 device->total_bytes - device->bytes_used > size_to_free)
2470 ret = btrfs_shrink_device(device, old_size - size_to_free);
2475 trans = btrfs_start_transaction(dev_root, 0);
2476 BUG_ON(IS_ERR(trans));
2478 ret = btrfs_grow_device(trans, device, old_size);
2481 btrfs_end_transaction(trans, dev_root);
2484 /* step two, relocate all the chunks */
2485 path = btrfs_alloc_path();
2491 /* zero out stat counters */
2492 spin_lock(&fs_info->balance_lock);
2493 memset(&bctl->stat, 0, sizeof(bctl->stat));
2494 spin_unlock(&fs_info->balance_lock);
2496 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2497 key.offset = (u64)-1;
2498 key.type = BTRFS_CHUNK_ITEM_KEY;
2501 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2502 atomic_read(&fs_info->balance_cancel_req)) {
2507 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2512 * this shouldn't happen, it means the last relocate
2516 BUG(); /* FIXME break ? */
2518 ret = btrfs_previous_item(chunk_root, path, 0,
2519 BTRFS_CHUNK_ITEM_KEY);
2525 leaf = path->nodes[0];
2526 slot = path->slots[0];
2527 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2529 if (found_key.objectid != key.objectid)
2532 /* chunk zero is special */
2533 if (found_key.offset == 0)
2536 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2539 spin_lock(&fs_info->balance_lock);
2540 bctl->stat.considered++;
2541 spin_unlock(&fs_info->balance_lock);
2544 ret = should_balance_chunk(chunk_root, leaf, chunk,
2546 btrfs_release_path(path);
2551 spin_lock(&fs_info->balance_lock);
2552 bctl->stat.expected++;
2553 spin_unlock(&fs_info->balance_lock);
2557 ret = btrfs_relocate_chunk(chunk_root,
2558 chunk_root->root_key.objectid,
2561 if (ret && ret != -ENOSPC)
2563 if (ret == -ENOSPC) {
2566 spin_lock(&fs_info->balance_lock);
2567 bctl->stat.completed++;
2568 spin_unlock(&fs_info->balance_lock);
2571 key.offset = found_key.offset - 1;
2575 btrfs_release_path(path);
2580 btrfs_free_path(path);
2581 if (enospc_errors) {
2582 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2591 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2593 /* cancel requested || normal exit path */
2594 return atomic_read(&fs_info->balance_cancel_req) ||
2595 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2596 atomic_read(&fs_info->balance_cancel_req) == 0);
2599 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2603 unset_balance_control(fs_info);
2604 ret = del_balance_item(fs_info->tree_root);
2608 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2609 struct btrfs_ioctl_balance_args *bargs);
2612 * Should be called with both balance and volume mutexes held
2614 int btrfs_balance(struct btrfs_balance_control *bctl,
2615 struct btrfs_ioctl_balance_args *bargs)
2617 struct btrfs_fs_info *fs_info = bctl->fs_info;
2621 if (btrfs_fs_closing(fs_info) ||
2622 atomic_read(&fs_info->balance_pause_req) ||
2623 atomic_read(&fs_info->balance_cancel_req)) {
2629 * In case of mixed groups both data and meta should be picked,
2630 * and identical options should be given for both of them.
2632 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2633 if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2634 (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
2635 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2636 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2637 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2638 printk(KERN_ERR "btrfs: with mixed groups data and "
2639 "metadata balance options must be the same\n");
2646 * Profile changing sanity checks. Skip them if a simple
2647 * balance is requested.
2649 if (!((bctl->data.flags | bctl->sys.flags | bctl->meta.flags) &
2650 BTRFS_BALANCE_ARGS_CONVERT))
2653 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2654 if (fs_info->fs_devices->num_devices == 1)
2655 allowed |= BTRFS_BLOCK_GROUP_DUP;
2656 else if (fs_info->fs_devices->num_devices < 4)
2657 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2659 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2660 BTRFS_BLOCK_GROUP_RAID10);
2662 if (!profile_is_valid(bctl->data.target, 1) ||
2663 bctl->data.target & ~allowed) {
2664 printk(KERN_ERR "btrfs: unable to start balance with target "
2665 "data profile %llu\n",
2666 (unsigned long long)bctl->data.target);
2670 if (!profile_is_valid(bctl->meta.target, 1) ||
2671 bctl->meta.target & ~allowed) {
2672 printk(KERN_ERR "btrfs: unable to start balance with target "
2673 "metadata profile %llu\n",
2674 (unsigned long long)bctl->meta.target);
2678 if (!profile_is_valid(bctl->sys.target, 1) ||
2679 bctl->sys.target & ~allowed) {
2680 printk(KERN_ERR "btrfs: unable to start balance with target "
2681 "system profile %llu\n",
2682 (unsigned long long)bctl->sys.target);
2687 if (bctl->data.target & BTRFS_BLOCK_GROUP_DUP) {
2688 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2693 /* allow to reduce meta or sys integrity only if force set */
2694 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2695 BTRFS_BLOCK_GROUP_RAID10;
2696 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2697 (fs_info->avail_system_alloc_bits & allowed) &&
2698 !(bctl->sys.target & allowed)) ||
2699 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2700 (fs_info->avail_metadata_alloc_bits & allowed) &&
2701 !(bctl->meta.target & allowed))) {
2702 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2703 printk(KERN_INFO "btrfs: force reducing metadata "
2706 printk(KERN_ERR "btrfs: balance will reduce metadata "
2707 "integrity, use force if you want this\n");
2714 ret = insert_balance_item(fs_info->tree_root, bctl);
2715 if (ret && ret != -EEXIST)
2718 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2719 BUG_ON(ret == -EEXIST);
2720 set_balance_control(bctl);
2722 BUG_ON(ret != -EEXIST);
2723 spin_lock(&fs_info->balance_lock);
2724 update_balance_args(bctl);
2725 spin_unlock(&fs_info->balance_lock);
2728 atomic_inc(&fs_info->balance_running);
2729 mutex_unlock(&fs_info->balance_mutex);
2731 ret = __btrfs_balance(fs_info);
2733 mutex_lock(&fs_info->balance_mutex);
2734 atomic_dec(&fs_info->balance_running);
2737 memset(bargs, 0, sizeof(*bargs));
2738 update_ioctl_balance_args(fs_info, 0, bargs);
2741 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2742 balance_need_close(fs_info)) {
2743 __cancel_balance(fs_info);
2746 wake_up(&fs_info->balance_wait_q);
2750 if (bctl->flags & BTRFS_BALANCE_RESUME)
2751 __cancel_balance(fs_info);
2757 static int balance_kthread(void *data)
2759 struct btrfs_balance_control *bctl =
2760 (struct btrfs_balance_control *)data;
2761 struct btrfs_fs_info *fs_info = bctl->fs_info;
2764 mutex_lock(&fs_info->volume_mutex);
2765 mutex_lock(&fs_info->balance_mutex);
2767 set_balance_control(bctl);
2769 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2770 printk(KERN_INFO "btrfs: force skipping balance\n");
2772 printk(KERN_INFO "btrfs: continuing balance\n");
2773 ret = btrfs_balance(bctl, NULL);
2776 mutex_unlock(&fs_info->balance_mutex);
2777 mutex_unlock(&fs_info->volume_mutex);
2781 int btrfs_recover_balance(struct btrfs_root *tree_root)
2783 struct task_struct *tsk;
2784 struct btrfs_balance_control *bctl;
2785 struct btrfs_balance_item *item;
2786 struct btrfs_disk_balance_args disk_bargs;
2787 struct btrfs_path *path;
2788 struct extent_buffer *leaf;
2789 struct btrfs_key key;
2792 path = btrfs_alloc_path();
2796 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2802 key.objectid = BTRFS_BALANCE_OBJECTID;
2803 key.type = BTRFS_BALANCE_ITEM_KEY;
2806 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2809 if (ret > 0) { /* ret = -ENOENT; */
2814 leaf = path->nodes[0];
2815 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2817 bctl->fs_info = tree_root->fs_info;
2818 bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
2820 btrfs_balance_data(leaf, item, &disk_bargs);
2821 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2822 btrfs_balance_meta(leaf, item, &disk_bargs);
2823 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2824 btrfs_balance_sys(leaf, item, &disk_bargs);
2825 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2827 tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
2836 btrfs_free_path(path);
2840 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2844 mutex_lock(&fs_info->balance_mutex);
2845 if (!fs_info->balance_ctl) {
2846 mutex_unlock(&fs_info->balance_mutex);
2850 if (atomic_read(&fs_info->balance_running)) {
2851 atomic_inc(&fs_info->balance_pause_req);
2852 mutex_unlock(&fs_info->balance_mutex);
2854 wait_event(fs_info->balance_wait_q,
2855 atomic_read(&fs_info->balance_running) == 0);
2857 mutex_lock(&fs_info->balance_mutex);
2858 /* we are good with balance_ctl ripped off from under us */
2859 BUG_ON(atomic_read(&fs_info->balance_running));
2860 atomic_dec(&fs_info->balance_pause_req);
2865 mutex_unlock(&fs_info->balance_mutex);
2869 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
2871 mutex_lock(&fs_info->balance_mutex);
2872 if (!fs_info->balance_ctl) {
2873 mutex_unlock(&fs_info->balance_mutex);
2877 atomic_inc(&fs_info->balance_cancel_req);
2879 * if we are running just wait and return, balance item is
2880 * deleted in btrfs_balance in this case
2882 if (atomic_read(&fs_info->balance_running)) {
2883 mutex_unlock(&fs_info->balance_mutex);
2884 wait_event(fs_info->balance_wait_q,
2885 atomic_read(&fs_info->balance_running) == 0);
2886 mutex_lock(&fs_info->balance_mutex);
2888 /* __cancel_balance needs volume_mutex */
2889 mutex_unlock(&fs_info->balance_mutex);
2890 mutex_lock(&fs_info->volume_mutex);
2891 mutex_lock(&fs_info->balance_mutex);
2893 if (fs_info->balance_ctl)
2894 __cancel_balance(fs_info);
2896 mutex_unlock(&fs_info->volume_mutex);
2899 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
2900 atomic_dec(&fs_info->balance_cancel_req);
2901 mutex_unlock(&fs_info->balance_mutex);
2906 * shrinking a device means finding all of the device extents past
2907 * the new size, and then following the back refs to the chunks.
2908 * The chunk relocation code actually frees the device extent
2910 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2912 struct btrfs_trans_handle *trans;
2913 struct btrfs_root *root = device->dev_root;
2914 struct btrfs_dev_extent *dev_extent = NULL;
2915 struct btrfs_path *path;
2923 bool retried = false;
2924 struct extent_buffer *l;
2925 struct btrfs_key key;
2926 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2927 u64 old_total = btrfs_super_total_bytes(super_copy);
2928 u64 old_size = device->total_bytes;
2929 u64 diff = device->total_bytes - new_size;
2931 if (new_size >= device->total_bytes)
2934 path = btrfs_alloc_path();
2942 device->total_bytes = new_size;
2943 if (device->writeable) {
2944 device->fs_devices->total_rw_bytes -= diff;
2945 spin_lock(&root->fs_info->free_chunk_lock);
2946 root->fs_info->free_chunk_space -= diff;
2947 spin_unlock(&root->fs_info->free_chunk_lock);
2949 unlock_chunks(root);
2952 key.objectid = device->devid;
2953 key.offset = (u64)-1;
2954 key.type = BTRFS_DEV_EXTENT_KEY;
2957 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2961 ret = btrfs_previous_item(root, path, 0, key.type);
2966 btrfs_release_path(path);
2971 slot = path->slots[0];
2972 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2974 if (key.objectid != device->devid) {
2975 btrfs_release_path(path);
2979 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2980 length = btrfs_dev_extent_length(l, dev_extent);
2982 if (key.offset + length <= new_size) {
2983 btrfs_release_path(path);
2987 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2988 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2989 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2990 btrfs_release_path(path);
2992 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2994 if (ret && ret != -ENOSPC)
3001 if (failed && !retried) {
3005 } else if (failed && retried) {
3009 device->total_bytes = old_size;
3010 if (device->writeable)
3011 device->fs_devices->total_rw_bytes += diff;
3012 spin_lock(&root->fs_info->free_chunk_lock);
3013 root->fs_info->free_chunk_space += diff;
3014 spin_unlock(&root->fs_info->free_chunk_lock);
3015 unlock_chunks(root);
3019 /* Shrinking succeeded, else we would be at "done". */
3020 trans = btrfs_start_transaction(root, 0);
3021 if (IS_ERR(trans)) {
3022 ret = PTR_ERR(trans);
3028 device->disk_total_bytes = new_size;
3029 /* Now btrfs_update_device() will change the on-disk size. */
3030 ret = btrfs_update_device(trans, device);
3032 unlock_chunks(root);
3033 btrfs_end_transaction(trans, root);
3036 WARN_ON(diff > old_total);
3037 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3038 unlock_chunks(root);
3039 btrfs_end_transaction(trans, root);
3041 btrfs_free_path(path);
3045 static int btrfs_add_system_chunk(struct btrfs_root *root,
3046 struct btrfs_key *key,
3047 struct btrfs_chunk *chunk, int item_size)
3049 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3050 struct btrfs_disk_key disk_key;
3054 array_size = btrfs_super_sys_array_size(super_copy);
3055 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3058 ptr = super_copy->sys_chunk_array + array_size;
3059 btrfs_cpu_key_to_disk(&disk_key, key);
3060 memcpy(ptr, &disk_key, sizeof(disk_key));
3061 ptr += sizeof(disk_key);
3062 memcpy(ptr, chunk, item_size);
3063 item_size += sizeof(disk_key);
3064 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3069 * sort the devices in descending order by max_avail, total_avail
3071 static int btrfs_cmp_device_info(const void *a, const void *b)
3073 const struct btrfs_device_info *di_a = a;
3074 const struct btrfs_device_info *di_b = b;
3076 if (di_a->max_avail > di_b->max_avail)
3078 if (di_a->max_avail < di_b->max_avail)
3080 if (di_a->total_avail > di_b->total_avail)
3082 if (di_a->total_avail < di_b->total_avail)
3087 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3088 struct btrfs_root *extent_root,
3089 struct map_lookup **map_ret,
3090 u64 *num_bytes_out, u64 *stripe_size_out,
3091 u64 start, u64 type)
3093 struct btrfs_fs_info *info = extent_root->fs_info;
3094 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3095 struct list_head *cur;
3096 struct map_lookup *map = NULL;
3097 struct extent_map_tree *em_tree;
3098 struct extent_map *em;
3099 struct btrfs_device_info *devices_info = NULL;
3101 int num_stripes; /* total number of stripes to allocate */
3102 int sub_stripes; /* sub_stripes info for map */
3103 int dev_stripes; /* stripes per dev */
3104 int devs_max; /* max devs to use */
3105 int devs_min; /* min devs needed */
3106 int devs_increment; /* ndevs has to be a multiple of this */
3107 int ncopies; /* how many copies to data has */
3109 u64 max_stripe_size;
3117 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
3118 (type & BTRFS_BLOCK_GROUP_DUP)) {
3120 type &= ~BTRFS_BLOCK_GROUP_DUP;
3123 if (list_empty(&fs_devices->alloc_list))
3130 devs_max = 0; /* 0 == as many as possible */
3134 * define the properties of each RAID type.
3135 * FIXME: move this to a global table and use it in all RAID
3138 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3142 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3144 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3149 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3158 if (type & BTRFS_BLOCK_GROUP_DATA) {
3159 max_stripe_size = 1024 * 1024 * 1024;
3160 max_chunk_size = 10 * max_stripe_size;
3161 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3162 /* for larger filesystems, use larger metadata chunks */
3163 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3164 max_stripe_size = 1024 * 1024 * 1024;
3166 max_stripe_size = 256 * 1024 * 1024;
3167 max_chunk_size = max_stripe_size;
3168 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3169 max_stripe_size = 8 * 1024 * 1024;
3170 max_chunk_size = 2 * max_stripe_size;
3172 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3177 /* we don't want a chunk larger than 10% of writeable space */
3178 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3181 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3186 cur = fs_devices->alloc_list.next;
3189 * in the first pass through the devices list, we gather information
3190 * about the available holes on each device.
3193 while (cur != &fs_devices->alloc_list) {
3194 struct btrfs_device *device;
3198 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3202 if (!device->writeable) {
3204 "btrfs: read-only device in alloc_list\n");
3209 if (!device->in_fs_metadata)
3212 if (device->total_bytes > device->bytes_used)
3213 total_avail = device->total_bytes - device->bytes_used;
3217 /* If there is no space on this device, skip it. */
3218 if (total_avail == 0)
3221 ret = find_free_dev_extent(device,
3222 max_stripe_size * dev_stripes,
3223 &dev_offset, &max_avail);
3224 if (ret && ret != -ENOSPC)
3228 max_avail = max_stripe_size * dev_stripes;
3230 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3233 devices_info[ndevs].dev_offset = dev_offset;
3234 devices_info[ndevs].max_avail = max_avail;
3235 devices_info[ndevs].total_avail = total_avail;
3236 devices_info[ndevs].dev = device;
3241 * now sort the devices by hole size / available space
3243 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3244 btrfs_cmp_device_info, NULL);
3246 /* round down to number of usable stripes */
3247 ndevs -= ndevs % devs_increment;
3249 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3254 if (devs_max && ndevs > devs_max)
3257 * the primary goal is to maximize the number of stripes, so use as many
3258 * devices as possible, even if the stripes are not maximum sized.
3260 stripe_size = devices_info[ndevs-1].max_avail;
3261 num_stripes = ndevs * dev_stripes;
3263 if (stripe_size * num_stripes > max_chunk_size * ncopies) {
3264 stripe_size = max_chunk_size * ncopies;
3265 do_div(stripe_size, num_stripes);
3268 do_div(stripe_size, dev_stripes);
3269 do_div(stripe_size, BTRFS_STRIPE_LEN);
3270 stripe_size *= BTRFS_STRIPE_LEN;
3272 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3277 map->num_stripes = num_stripes;
3279 for (i = 0; i < ndevs; ++i) {
3280 for (j = 0; j < dev_stripes; ++j) {
3281 int s = i * dev_stripes + j;
3282 map->stripes[s].dev = devices_info[i].dev;
3283 map->stripes[s].physical = devices_info[i].dev_offset +
3287 map->sector_size = extent_root->sectorsize;
3288 map->stripe_len = BTRFS_STRIPE_LEN;
3289 map->io_align = BTRFS_STRIPE_LEN;
3290 map->io_width = BTRFS_STRIPE_LEN;
3292 map->sub_stripes = sub_stripes;
3295 num_bytes = stripe_size * (num_stripes / ncopies);
3297 *stripe_size_out = stripe_size;
3298 *num_bytes_out = num_bytes;
3300 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3302 em = alloc_extent_map();
3307 em->bdev = (struct block_device *)map;
3309 em->len = num_bytes;
3310 em->block_start = 0;
3311 em->block_len = em->len;
3313 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3314 write_lock(&em_tree->lock);
3315 ret = add_extent_mapping(em_tree, em);
3316 write_unlock(&em_tree->lock);
3318 free_extent_map(em);
3320 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3321 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3325 for (i = 0; i < map->num_stripes; ++i) {
3326 struct btrfs_device *device;
3329 device = map->stripes[i].dev;
3330 dev_offset = map->stripes[i].physical;
3332 ret = btrfs_alloc_dev_extent(trans, device,
3333 info->chunk_root->root_key.objectid,
3334 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3335 start, dev_offset, stripe_size);
3339 kfree(devices_info);
3344 kfree(devices_info);
3348 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3349 struct btrfs_root *extent_root,
3350 struct map_lookup *map, u64 chunk_offset,
3351 u64 chunk_size, u64 stripe_size)
3354 struct btrfs_key key;
3355 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3356 struct btrfs_device *device;
3357 struct btrfs_chunk *chunk;
3358 struct btrfs_stripe *stripe;
3359 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3363 chunk = kzalloc(item_size, GFP_NOFS);
3368 while (index < map->num_stripes) {
3369 device = map->stripes[index].dev;
3370 device->bytes_used += stripe_size;
3371 ret = btrfs_update_device(trans, device);
3376 spin_lock(&extent_root->fs_info->free_chunk_lock);
3377 extent_root->fs_info->free_chunk_space -= (stripe_size *
3379 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3382 stripe = &chunk->stripe;
3383 while (index < map->num_stripes) {
3384 device = map->stripes[index].dev;
3385 dev_offset = map->stripes[index].physical;
3387 btrfs_set_stack_stripe_devid(stripe, device->devid);
3388 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3389 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3394 btrfs_set_stack_chunk_length(chunk, chunk_size);
3395 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3396 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3397 btrfs_set_stack_chunk_type(chunk, map->type);
3398 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3399 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3400 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3401 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3402 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3404 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3405 key.type = BTRFS_CHUNK_ITEM_KEY;
3406 key.offset = chunk_offset;
3408 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3411 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3412 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3422 * Chunk allocation falls into two parts. The first part does works
3423 * that make the new allocated chunk useable, but not do any operation
3424 * that modifies the chunk tree. The second part does the works that
3425 * require modifying the chunk tree. This division is important for the
3426 * bootstrap process of adding storage to a seed btrfs.
3428 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3429 struct btrfs_root *extent_root, u64 type)
3434 struct map_lookup *map;
3435 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3438 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3443 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3444 &stripe_size, chunk_offset, type);
3448 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3449 chunk_size, stripe_size);
3454 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3455 struct btrfs_root *root,
3456 struct btrfs_device *device)
3459 u64 sys_chunk_offset;
3463 u64 sys_stripe_size;
3465 struct map_lookup *map;
3466 struct map_lookup *sys_map;
3467 struct btrfs_fs_info *fs_info = root->fs_info;
3468 struct btrfs_root *extent_root = fs_info->extent_root;
3471 ret = find_next_chunk(fs_info->chunk_root,
3472 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3476 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3477 fs_info->avail_metadata_alloc_bits;
3478 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3480 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3481 &stripe_size, chunk_offset, alloc_profile);
3484 sys_chunk_offset = chunk_offset + chunk_size;
3486 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3487 fs_info->avail_system_alloc_bits;
3488 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3490 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3491 &sys_chunk_size, &sys_stripe_size,
3492 sys_chunk_offset, alloc_profile);
3495 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3499 * Modifying chunk tree needs allocating new blocks from both
3500 * system block group and metadata block group. So we only can
3501 * do operations require modifying the chunk tree after both
3502 * block groups were created.
3504 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3505 chunk_size, stripe_size);
3508 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3509 sys_chunk_offset, sys_chunk_size,
3515 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3517 struct extent_map *em;
3518 struct map_lookup *map;
3519 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3523 read_lock(&map_tree->map_tree.lock);
3524 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3525 read_unlock(&map_tree->map_tree.lock);
3529 if (btrfs_test_opt(root, DEGRADED)) {
3530 free_extent_map(em);
3534 map = (struct map_lookup *)em->bdev;
3535 for (i = 0; i < map->num_stripes; i++) {
3536 if (!map->stripes[i].dev->writeable) {
3541 free_extent_map(em);
3545 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3547 extent_map_tree_init(&tree->map_tree);
3550 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3552 struct extent_map *em;
3555 write_lock(&tree->map_tree.lock);
3556 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3558 remove_extent_mapping(&tree->map_tree, em);
3559 write_unlock(&tree->map_tree.lock);
3564 free_extent_map(em);
3565 /* once for the tree */
3566 free_extent_map(em);
3570 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3572 struct extent_map *em;
3573 struct map_lookup *map;
3574 struct extent_map_tree *em_tree = &map_tree->map_tree;
3577 read_lock(&em_tree->lock);
3578 em = lookup_extent_mapping(em_tree, logical, len);
3579 read_unlock(&em_tree->lock);
3582 BUG_ON(em->start > logical || em->start + em->len < logical);
3583 map = (struct map_lookup *)em->bdev;
3584 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3585 ret = map->num_stripes;
3586 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3587 ret = map->sub_stripes;
3590 free_extent_map(em);
3594 static int find_live_mirror(struct map_lookup *map, int first, int num,
3598 if (map->stripes[optimal].dev->bdev)
3600 for (i = first; i < first + num; i++) {
3601 if (map->stripes[i].dev->bdev)
3604 /* we couldn't find one that doesn't fail. Just return something
3605 * and the io error handling code will clean up eventually
3610 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3611 u64 logical, u64 *length,
3612 struct btrfs_bio **bbio_ret,
3615 struct extent_map *em;
3616 struct map_lookup *map;
3617 struct extent_map_tree *em_tree = &map_tree->map_tree;
3620 u64 stripe_end_offset;
3629 struct btrfs_bio *bbio = NULL;
3631 read_lock(&em_tree->lock);
3632 em = lookup_extent_mapping(em_tree, logical, *length);
3633 read_unlock(&em_tree->lock);
3636 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3637 (unsigned long long)logical,
3638 (unsigned long long)*length);
3642 BUG_ON(em->start > logical || em->start + em->len < logical);
3643 map = (struct map_lookup *)em->bdev;
3644 offset = logical - em->start;
3646 if (mirror_num > map->num_stripes)
3651 * stripe_nr counts the total number of stripes we have to stride
3652 * to get to this block
3654 do_div(stripe_nr, map->stripe_len);
3656 stripe_offset = stripe_nr * map->stripe_len;
3657 BUG_ON(offset < stripe_offset);
3659 /* stripe_offset is the offset of this block in its stripe*/
3660 stripe_offset = offset - stripe_offset;
3662 if (rw & REQ_DISCARD)
3663 *length = min_t(u64, em->len - offset, *length);
3664 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3665 /* we limit the length of each bio to what fits in a stripe */
3666 *length = min_t(u64, em->len - offset,
3667 map->stripe_len - stripe_offset);
3669 *length = em->len - offset;
3677 stripe_nr_orig = stripe_nr;
3678 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3679 (~(map->stripe_len - 1));
3680 do_div(stripe_nr_end, map->stripe_len);
3681 stripe_end_offset = stripe_nr_end * map->stripe_len -
3683 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3684 if (rw & REQ_DISCARD)
3685 num_stripes = min_t(u64, map->num_stripes,
3686 stripe_nr_end - stripe_nr_orig);
3687 stripe_index = do_div(stripe_nr, map->num_stripes);
3688 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3689 if (rw & (REQ_WRITE | REQ_DISCARD))
3690 num_stripes = map->num_stripes;
3691 else if (mirror_num)
3692 stripe_index = mirror_num - 1;
3694 stripe_index = find_live_mirror(map, 0,
3696 current->pid % map->num_stripes);
3697 mirror_num = stripe_index + 1;
3700 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3701 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3702 num_stripes = map->num_stripes;
3703 } else if (mirror_num) {
3704 stripe_index = mirror_num - 1;
3709 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3710 int factor = map->num_stripes / map->sub_stripes;
3712 stripe_index = do_div(stripe_nr, factor);
3713 stripe_index *= map->sub_stripes;
3716 num_stripes = map->sub_stripes;
3717 else if (rw & REQ_DISCARD)
3718 num_stripes = min_t(u64, map->sub_stripes *
3719 (stripe_nr_end - stripe_nr_orig),
3721 else if (mirror_num)
3722 stripe_index += mirror_num - 1;
3724 stripe_index = find_live_mirror(map, stripe_index,
3725 map->sub_stripes, stripe_index +
3726 current->pid % map->sub_stripes);
3727 mirror_num = stripe_index + 1;
3731 * after this do_div call, stripe_nr is the number of stripes
3732 * on this device we have to walk to find the data, and
3733 * stripe_index is the number of our device in the stripe array
3735 stripe_index = do_div(stripe_nr, map->num_stripes);
3736 mirror_num = stripe_index + 1;
3738 BUG_ON(stripe_index >= map->num_stripes);
3740 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3745 atomic_set(&bbio->error, 0);
3747 if (rw & REQ_DISCARD) {
3749 int sub_stripes = 0;
3750 u64 stripes_per_dev = 0;
3751 u32 remaining_stripes = 0;
3754 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3755 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3758 sub_stripes = map->sub_stripes;
3760 factor = map->num_stripes / sub_stripes;
3761 stripes_per_dev = div_u64_rem(stripe_nr_end -
3764 &remaining_stripes);
3767 for (i = 0; i < num_stripes; i++) {
3768 bbio->stripes[i].physical =
3769 map->stripes[stripe_index].physical +
3770 stripe_offset + stripe_nr * map->stripe_len;
3771 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3773 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3774 BTRFS_BLOCK_GROUP_RAID10)) {
3775 bbio->stripes[i].length = stripes_per_dev *
3777 if (i / sub_stripes < remaining_stripes)
3778 bbio->stripes[i].length +=
3780 if (i < sub_stripes)
3781 bbio->stripes[i].length -=
3783 if ((i / sub_stripes + 1) %
3784 sub_stripes == remaining_stripes)
3785 bbio->stripes[i].length -=
3787 if (i == sub_stripes - 1)
3790 bbio->stripes[i].length = *length;
3793 if (stripe_index == map->num_stripes) {
3794 /* This could only happen for RAID0/10 */
3800 for (i = 0; i < num_stripes; i++) {
3801 bbio->stripes[i].physical =
3802 map->stripes[stripe_index].physical +
3804 stripe_nr * map->stripe_len;
3805 bbio->stripes[i].dev =
3806 map->stripes[stripe_index].dev;
3811 if (rw & REQ_WRITE) {
3812 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3813 BTRFS_BLOCK_GROUP_RAID10 |
3814 BTRFS_BLOCK_GROUP_DUP)) {
3820 bbio->num_stripes = num_stripes;
3821 bbio->max_errors = max_errors;
3822 bbio->mirror_num = mirror_num;
3824 free_extent_map(em);
3828 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3829 u64 logical, u64 *length,
3830 struct btrfs_bio **bbio_ret, int mirror_num)
3832 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3836 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3837 u64 chunk_start, u64 physical, u64 devid,
3838 u64 **logical, int *naddrs, int *stripe_len)
3840 struct extent_map_tree *em_tree = &map_tree->map_tree;
3841 struct extent_map *em;
3842 struct map_lookup *map;
3849 read_lock(&em_tree->lock);
3850 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3851 read_unlock(&em_tree->lock);
3853 BUG_ON(!em || em->start != chunk_start);
3854 map = (struct map_lookup *)em->bdev;
3857 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3858 do_div(length, map->num_stripes / map->sub_stripes);
3859 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3860 do_div(length, map->num_stripes);
3862 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3865 for (i = 0; i < map->num_stripes; i++) {
3866 if (devid && map->stripes[i].dev->devid != devid)
3868 if (map->stripes[i].physical > physical ||
3869 map->stripes[i].physical + length <= physical)
3872 stripe_nr = physical - map->stripes[i].physical;
3873 do_div(stripe_nr, map->stripe_len);
3875 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3876 stripe_nr = stripe_nr * map->num_stripes + i;
3877 do_div(stripe_nr, map->sub_stripes);
3878 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3879 stripe_nr = stripe_nr * map->num_stripes + i;
3881 bytenr = chunk_start + stripe_nr * map->stripe_len;
3882 WARN_ON(nr >= map->num_stripes);
3883 for (j = 0; j < nr; j++) {
3884 if (buf[j] == bytenr)
3888 WARN_ON(nr >= map->num_stripes);
3895 *stripe_len = map->stripe_len;
3897 free_extent_map(em);
3901 static void btrfs_end_bio(struct bio *bio, int err)
3903 struct btrfs_bio *bbio = bio->bi_private;
3904 int is_orig_bio = 0;
3907 atomic_inc(&bbio->error);
3909 if (bio == bbio->orig_bio)
3912 if (atomic_dec_and_test(&bbio->stripes_pending)) {
3915 bio = bbio->orig_bio;
3917 bio->bi_private = bbio->private;
3918 bio->bi_end_io = bbio->end_io;
3919 bio->bi_bdev = (struct block_device *)
3920 (unsigned long)bbio->mirror_num;
3921 /* only send an error to the higher layers if it is
3922 * beyond the tolerance of the multi-bio
3924 if (atomic_read(&bbio->error) > bbio->max_errors) {
3928 * this bio is actually up to date, we didn't
3929 * go over the max number of errors
3931 set_bit(BIO_UPTODATE, &bio->bi_flags);
3936 bio_endio(bio, err);
3937 } else if (!is_orig_bio) {
3942 struct async_sched {
3945 struct btrfs_fs_info *info;
3946 struct btrfs_work work;
3950 * see run_scheduled_bios for a description of why bios are collected for
3953 * This will add one bio to the pending list for a device and make sure
3954 * the work struct is scheduled.
3956 static noinline int schedule_bio(struct btrfs_root *root,
3957 struct btrfs_device *device,
3958 int rw, struct bio *bio)
3960 int should_queue = 1;
3961 struct btrfs_pending_bios *pending_bios;
3963 /* don't bother with additional async steps for reads, right now */
3964 if (!(rw & REQ_WRITE)) {
3966 btrfsic_submit_bio(rw, bio);
3972 * nr_async_bios allows us to reliably return congestion to the
3973 * higher layers. Otherwise, the async bio makes it appear we have
3974 * made progress against dirty pages when we've really just put it
3975 * on a queue for later
3977 atomic_inc(&root->fs_info->nr_async_bios);
3978 WARN_ON(bio->bi_next);
3979 bio->bi_next = NULL;
3982 spin_lock(&device->io_lock);
3983 if (bio->bi_rw & REQ_SYNC)
3984 pending_bios = &device->pending_sync_bios;
3986 pending_bios = &device->pending_bios;
3988 if (pending_bios->tail)
3989 pending_bios->tail->bi_next = bio;
3991 pending_bios->tail = bio;
3992 if (!pending_bios->head)
3993 pending_bios->head = bio;
3994 if (device->running_pending)
3997 spin_unlock(&device->io_lock);
4000 btrfs_queue_worker(&root->fs_info->submit_workers,
4005 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4006 int mirror_num, int async_submit)
4008 struct btrfs_mapping_tree *map_tree;
4009 struct btrfs_device *dev;
4010 struct bio *first_bio = bio;
4011 u64 logical = (u64)bio->bi_sector << 9;
4017 struct btrfs_bio *bbio = NULL;
4019 length = bio->bi_size;
4020 map_tree = &root->fs_info->mapping_tree;
4021 map_length = length;
4023 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4027 total_devs = bbio->num_stripes;
4028 if (map_length < length) {
4029 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
4030 "len %llu\n", (unsigned long long)logical,
4031 (unsigned long long)length,
4032 (unsigned long long)map_length);
4036 bbio->orig_bio = first_bio;
4037 bbio->private = first_bio->bi_private;
4038 bbio->end_io = first_bio->bi_end_io;
4039 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4041 while (dev_nr < total_devs) {
4042 if (dev_nr < total_devs - 1) {
4043 bio = bio_clone(first_bio, GFP_NOFS);
4048 bio->bi_private = bbio;
4049 bio->bi_end_io = btrfs_end_bio;
4050 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4051 dev = bbio->stripes[dev_nr].dev;
4052 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4053 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4054 "(%s id %llu), size=%u\n", rw,
4055 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4056 dev->name, dev->devid, bio->bi_size);
4057 bio->bi_bdev = dev->bdev;
4059 schedule_bio(root, dev, rw, bio);
4061 btrfsic_submit_bio(rw, bio);
4063 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4064 bio->bi_sector = logical >> 9;
4065 bio_endio(bio, -EIO);
4072 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4075 struct btrfs_device *device;
4076 struct btrfs_fs_devices *cur_devices;
4078 cur_devices = root->fs_info->fs_devices;
4079 while (cur_devices) {
4081 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4082 device = __find_device(&cur_devices->devices,
4087 cur_devices = cur_devices->seed;
4092 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4093 u64 devid, u8 *dev_uuid)
4095 struct btrfs_device *device;
4096 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4098 device = kzalloc(sizeof(*device), GFP_NOFS);
4101 list_add(&device->dev_list,
4102 &fs_devices->devices);
4103 device->dev_root = root->fs_info->dev_root;
4104 device->devid = devid;
4105 device->work.func = pending_bios_fn;
4106 device->fs_devices = fs_devices;
4107 device->missing = 1;
4108 fs_devices->num_devices++;
4109 fs_devices->missing_devices++;
4110 spin_lock_init(&device->io_lock);
4111 INIT_LIST_HEAD(&device->dev_alloc_list);
4112 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4116 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4117 struct extent_buffer *leaf,
4118 struct btrfs_chunk *chunk)
4120 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4121 struct map_lookup *map;
4122 struct extent_map *em;
4126 u8 uuid[BTRFS_UUID_SIZE];
4131 logical = key->offset;
4132 length = btrfs_chunk_length(leaf, chunk);
4134 read_lock(&map_tree->map_tree.lock);
4135 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4136 read_unlock(&map_tree->map_tree.lock);
4138 /* already mapped? */
4139 if (em && em->start <= logical && em->start + em->len > logical) {
4140 free_extent_map(em);
4143 free_extent_map(em);
4146 em = alloc_extent_map();
4149 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4150 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4152 free_extent_map(em);
4156 em->bdev = (struct block_device *)map;
4157 em->start = logical;
4159 em->block_start = 0;
4160 em->block_len = em->len;
4162 map->num_stripes = num_stripes;
4163 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4164 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4165 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4166 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4167 map->type = btrfs_chunk_type(leaf, chunk);
4168 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4169 for (i = 0; i < num_stripes; i++) {
4170 map->stripes[i].physical =
4171 btrfs_stripe_offset_nr(leaf, chunk, i);
4172 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4173 read_extent_buffer(leaf, uuid, (unsigned long)
4174 btrfs_stripe_dev_uuid_nr(chunk, i),
4176 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4178 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4180 free_extent_map(em);
4183 if (!map->stripes[i].dev) {
4184 map->stripes[i].dev =
4185 add_missing_dev(root, devid, uuid);
4186 if (!map->stripes[i].dev) {
4188 free_extent_map(em);
4192 map->stripes[i].dev->in_fs_metadata = 1;
4195 write_lock(&map_tree->map_tree.lock);
4196 ret = add_extent_mapping(&map_tree->map_tree, em);
4197 write_unlock(&map_tree->map_tree.lock);
4199 free_extent_map(em);
4204 static int fill_device_from_item(struct extent_buffer *leaf,
4205 struct btrfs_dev_item *dev_item,
4206 struct btrfs_device *device)
4210 device->devid = btrfs_device_id(leaf, dev_item);
4211 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4212 device->total_bytes = device->disk_total_bytes;
4213 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4214 device->type = btrfs_device_type(leaf, dev_item);
4215 device->io_align = btrfs_device_io_align(leaf, dev_item);
4216 device->io_width = btrfs_device_io_width(leaf, dev_item);
4217 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4219 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4220 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4225 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4227 struct btrfs_fs_devices *fs_devices;
4230 BUG_ON(!mutex_is_locked(&uuid_mutex));
4232 fs_devices = root->fs_info->fs_devices->seed;
4233 while (fs_devices) {
4234 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4238 fs_devices = fs_devices->seed;
4241 fs_devices = find_fsid(fsid);
4247 fs_devices = clone_fs_devices(fs_devices);
4248 if (IS_ERR(fs_devices)) {
4249 ret = PTR_ERR(fs_devices);
4253 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4254 root->fs_info->bdev_holder);
4258 if (!fs_devices->seeding) {
4259 __btrfs_close_devices(fs_devices);
4260 free_fs_devices(fs_devices);
4265 fs_devices->seed = root->fs_info->fs_devices->seed;
4266 root->fs_info->fs_devices->seed = fs_devices;
4271 static int read_one_dev(struct btrfs_root *root,
4272 struct extent_buffer *leaf,
4273 struct btrfs_dev_item *dev_item)
4275 struct btrfs_device *device;
4278 u8 fs_uuid[BTRFS_UUID_SIZE];
4279 u8 dev_uuid[BTRFS_UUID_SIZE];
4281 devid = btrfs_device_id(leaf, dev_item);
4282 read_extent_buffer(leaf, dev_uuid,
4283 (unsigned long)btrfs_device_uuid(dev_item),
4285 read_extent_buffer(leaf, fs_uuid,
4286 (unsigned long)btrfs_device_fsid(dev_item),
4289 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4290 ret = open_seed_devices(root, fs_uuid);
4291 if (ret && !btrfs_test_opt(root, DEGRADED))
4295 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4296 if (!device || !device->bdev) {
4297 if (!btrfs_test_opt(root, DEGRADED))
4301 printk(KERN_WARNING "warning devid %llu missing\n",
4302 (unsigned long long)devid);
4303 device = add_missing_dev(root, devid, dev_uuid);
4306 } else if (!device->missing) {
4308 * this happens when a device that was properly setup
4309 * in the device info lists suddenly goes bad.
4310 * device->bdev is NULL, and so we have to set
4311 * device->missing to one here
4313 root->fs_info->fs_devices->missing_devices++;
4314 device->missing = 1;
4318 if (device->fs_devices != root->fs_info->fs_devices) {
4319 BUG_ON(device->writeable);
4320 if (device->generation !=
4321 btrfs_device_generation(leaf, dev_item))
4325 fill_device_from_item(leaf, dev_item, device);
4326 device->dev_root = root->fs_info->dev_root;
4327 device->in_fs_metadata = 1;
4328 if (device->writeable) {
4329 device->fs_devices->total_rw_bytes += device->total_bytes;
4330 spin_lock(&root->fs_info->free_chunk_lock);
4331 root->fs_info->free_chunk_space += device->total_bytes -
4333 spin_unlock(&root->fs_info->free_chunk_lock);
4339 int btrfs_read_sys_array(struct btrfs_root *root)
4341 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4342 struct extent_buffer *sb;
4343 struct btrfs_disk_key *disk_key;
4344 struct btrfs_chunk *chunk;
4346 unsigned long sb_ptr;
4352 struct btrfs_key key;
4354 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4355 BTRFS_SUPER_INFO_SIZE);
4358 btrfs_set_buffer_uptodate(sb);
4359 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4361 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4362 array_size = btrfs_super_sys_array_size(super_copy);
4364 ptr = super_copy->sys_chunk_array;
4365 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4368 while (cur < array_size) {
4369 disk_key = (struct btrfs_disk_key *)ptr;
4370 btrfs_disk_key_to_cpu(&key, disk_key);
4372 len = sizeof(*disk_key); ptr += len;
4376 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4377 chunk = (struct btrfs_chunk *)sb_ptr;
4378 ret = read_one_chunk(root, &key, sb, chunk);
4381 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4382 len = btrfs_chunk_item_size(num_stripes);
4391 free_extent_buffer(sb);
4395 int btrfs_read_chunk_tree(struct btrfs_root *root)
4397 struct btrfs_path *path;
4398 struct extent_buffer *leaf;
4399 struct btrfs_key key;
4400 struct btrfs_key found_key;
4404 root = root->fs_info->chunk_root;
4406 path = btrfs_alloc_path();
4410 mutex_lock(&uuid_mutex);
4413 /* first we search for all of the device items, and then we
4414 * read in all of the chunk items. This way we can create chunk
4415 * mappings that reference all of the devices that are afound
4417 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4421 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4425 leaf = path->nodes[0];
4426 slot = path->slots[0];
4427 if (slot >= btrfs_header_nritems(leaf)) {
4428 ret = btrfs_next_leaf(root, path);
4435 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4436 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4437 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4439 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4440 struct btrfs_dev_item *dev_item;
4441 dev_item = btrfs_item_ptr(leaf, slot,
4442 struct btrfs_dev_item);
4443 ret = read_one_dev(root, leaf, dev_item);
4447 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4448 struct btrfs_chunk *chunk;
4449 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4450 ret = read_one_chunk(root, &found_key, leaf, chunk);
4456 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4458 btrfs_release_path(path);
4463 unlock_chunks(root);
4464 mutex_unlock(&uuid_mutex);
4466 btrfs_free_path(path);