2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <asm/div64.h>
31 #include "extent_map.h"
33 #include "transaction.h"
34 #include "print-tree.h"
36 #include "async-thread.h"
37 #include "check-integrity.h"
39 static int init_first_rw_device(struct btrfs_trans_handle *trans,
40 struct btrfs_root *root,
41 struct btrfs_device *device);
42 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44 static DEFINE_MUTEX(uuid_mutex);
45 static LIST_HEAD(fs_uuids);
47 static void lock_chunks(struct btrfs_root *root)
49 mutex_lock(&root->fs_info->chunk_mutex);
52 static void unlock_chunks(struct btrfs_root *root)
54 mutex_unlock(&root->fs_info->chunk_mutex);
57 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
59 struct btrfs_device *device;
60 WARN_ON(fs_devices->opened);
61 while (!list_empty(&fs_devices->devices)) {
62 device = list_entry(fs_devices->devices.next,
63 struct btrfs_device, dev_list);
64 list_del(&device->dev_list);
71 void btrfs_cleanup_fs_uuids(void)
73 struct btrfs_fs_devices *fs_devices;
75 while (!list_empty(&fs_uuids)) {
76 fs_devices = list_entry(fs_uuids.next,
77 struct btrfs_fs_devices, list);
78 list_del(&fs_devices->list);
79 free_fs_devices(fs_devices);
83 static noinline struct btrfs_device *__find_device(struct list_head *head,
86 struct btrfs_device *dev;
88 list_for_each_entry(dev, head, dev_list) {
89 if (dev->devid == devid &&
90 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
97 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
99 struct btrfs_fs_devices *fs_devices;
101 list_for_each_entry(fs_devices, &fs_uuids, list) {
102 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
108 static void requeue_list(struct btrfs_pending_bios *pending_bios,
109 struct bio *head, struct bio *tail)
112 struct bio *old_head;
114 old_head = pending_bios->head;
115 pending_bios->head = head;
116 if (pending_bios->tail)
117 tail->bi_next = old_head;
119 pending_bios->tail = tail;
123 * we try to collect pending bios for a device so we don't get a large
124 * number of procs sending bios down to the same device. This greatly
125 * improves the schedulers ability to collect and merge the bios.
127 * But, it also turns into a long list of bios to process and that is sure
128 * to eventually make the worker thread block. The solution here is to
129 * make some progress and then put this work struct back at the end of
130 * the list if the block device is congested. This way, multiple devices
131 * can make progress from a single worker thread.
133 static noinline void run_scheduled_bios(struct btrfs_device *device)
136 struct backing_dev_info *bdi;
137 struct btrfs_fs_info *fs_info;
138 struct btrfs_pending_bios *pending_bios;
142 unsigned long num_run;
143 unsigned long batch_run = 0;
145 unsigned long last_waited = 0;
147 int sync_pending = 0;
148 struct blk_plug plug;
151 * this function runs all the bios we've collected for
152 * a particular device. We don't want to wander off to
153 * another device without first sending all of these down.
154 * So, setup a plug here and finish it off before we return
156 blk_start_plug(&plug);
158 bdi = blk_get_backing_dev_info(device->bdev);
159 fs_info = device->dev_root->fs_info;
160 limit = btrfs_async_submit_limit(fs_info);
161 limit = limit * 2 / 3;
164 spin_lock(&device->io_lock);
169 /* take all the bios off the list at once and process them
170 * later on (without the lock held). But, remember the
171 * tail and other pointers so the bios can be properly reinserted
172 * into the list if we hit congestion
174 if (!force_reg && device->pending_sync_bios.head) {
175 pending_bios = &device->pending_sync_bios;
178 pending_bios = &device->pending_bios;
182 pending = pending_bios->head;
183 tail = pending_bios->tail;
184 WARN_ON(pending && !tail);
187 * if pending was null this time around, no bios need processing
188 * at all and we can stop. Otherwise it'll loop back up again
189 * and do an additional check so no bios are missed.
191 * device->running_pending is used to synchronize with the
194 if (device->pending_sync_bios.head == NULL &&
195 device->pending_bios.head == NULL) {
197 device->running_pending = 0;
200 device->running_pending = 1;
203 pending_bios->head = NULL;
204 pending_bios->tail = NULL;
206 spin_unlock(&device->io_lock);
211 /* we want to work on both lists, but do more bios on the
212 * sync list than the regular list
215 pending_bios != &device->pending_sync_bios &&
216 device->pending_sync_bios.head) ||
217 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
218 device->pending_bios.head)) {
219 spin_lock(&device->io_lock);
220 requeue_list(pending_bios, pending, tail);
225 pending = pending->bi_next;
227 atomic_dec(&fs_info->nr_async_bios);
229 if (atomic_read(&fs_info->nr_async_bios) < limit &&
230 waitqueue_active(&fs_info->async_submit_wait))
231 wake_up(&fs_info->async_submit_wait);
233 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
236 * if we're doing the sync list, record that our
237 * plug has some sync requests on it
239 * If we're doing the regular list and there are
240 * sync requests sitting around, unplug before
243 if (pending_bios == &device->pending_sync_bios) {
245 } else if (sync_pending) {
246 blk_finish_plug(&plug);
247 blk_start_plug(&plug);
251 btrfsic_submit_bio(cur->bi_rw, cur);
258 * we made progress, there is more work to do and the bdi
259 * is now congested. Back off and let other work structs
262 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
263 fs_info->fs_devices->open_devices > 1) {
264 struct io_context *ioc;
266 ioc = current->io_context;
269 * the main goal here is that we don't want to
270 * block if we're going to be able to submit
271 * more requests without blocking.
273 * This code does two great things, it pokes into
274 * the elevator code from a filesystem _and_
275 * it makes assumptions about how batching works.
277 if (ioc && ioc->nr_batch_requests > 0 &&
278 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
280 ioc->last_waited == last_waited)) {
282 * we want to go through our batch of
283 * requests and stop. So, we copy out
284 * the ioc->last_waited time and test
285 * against it before looping
287 last_waited = ioc->last_waited;
292 spin_lock(&device->io_lock);
293 requeue_list(pending_bios, pending, tail);
294 device->running_pending = 1;
296 spin_unlock(&device->io_lock);
297 btrfs_requeue_work(&device->work);
300 /* unplug every 64 requests just for good measure */
301 if (batch_run % 64 == 0) {
302 blk_finish_plug(&plug);
303 blk_start_plug(&plug);
312 spin_lock(&device->io_lock);
313 if (device->pending_bios.head || device->pending_sync_bios.head)
315 spin_unlock(&device->io_lock);
318 blk_finish_plug(&plug);
321 static void pending_bios_fn(struct btrfs_work *work)
323 struct btrfs_device *device;
325 device = container_of(work, struct btrfs_device, work);
326 run_scheduled_bios(device);
329 static noinline int device_list_add(const char *path,
330 struct btrfs_super_block *disk_super,
331 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
333 struct btrfs_device *device;
334 struct btrfs_fs_devices *fs_devices;
335 u64 found_transid = btrfs_super_generation(disk_super);
338 fs_devices = find_fsid(disk_super->fsid);
340 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
343 INIT_LIST_HEAD(&fs_devices->devices);
344 INIT_LIST_HEAD(&fs_devices->alloc_list);
345 list_add(&fs_devices->list, &fs_uuids);
346 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
347 fs_devices->latest_devid = devid;
348 fs_devices->latest_trans = found_transid;
349 mutex_init(&fs_devices->device_list_mutex);
352 device = __find_device(&fs_devices->devices, devid,
353 disk_super->dev_item.uuid);
356 if (fs_devices->opened)
359 device = kzalloc(sizeof(*device), GFP_NOFS);
361 /* we can safely leave the fs_devices entry around */
364 device->devid = devid;
365 device->work.func = pending_bios_fn;
366 memcpy(device->uuid, disk_super->dev_item.uuid,
368 spin_lock_init(&device->io_lock);
369 device->name = kstrdup(path, GFP_NOFS);
374 INIT_LIST_HEAD(&device->dev_alloc_list);
376 /* init readahead state */
377 spin_lock_init(&device->reada_lock);
378 device->reada_curr_zone = NULL;
379 atomic_set(&device->reada_in_flight, 0);
380 device->reada_next = 0;
381 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
382 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
384 mutex_lock(&fs_devices->device_list_mutex);
385 list_add_rcu(&device->dev_list, &fs_devices->devices);
386 mutex_unlock(&fs_devices->device_list_mutex);
388 device->fs_devices = fs_devices;
389 fs_devices->num_devices++;
390 } else if (!device->name || strcmp(device->name, path)) {
391 name = kstrdup(path, GFP_NOFS);
396 if (device->missing) {
397 fs_devices->missing_devices--;
402 if (found_transid > fs_devices->latest_trans) {
403 fs_devices->latest_devid = devid;
404 fs_devices->latest_trans = found_transid;
406 *fs_devices_ret = fs_devices;
410 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
412 struct btrfs_fs_devices *fs_devices;
413 struct btrfs_device *device;
414 struct btrfs_device *orig_dev;
416 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
418 return ERR_PTR(-ENOMEM);
420 INIT_LIST_HEAD(&fs_devices->devices);
421 INIT_LIST_HEAD(&fs_devices->alloc_list);
422 INIT_LIST_HEAD(&fs_devices->list);
423 mutex_init(&fs_devices->device_list_mutex);
424 fs_devices->latest_devid = orig->latest_devid;
425 fs_devices->latest_trans = orig->latest_trans;
426 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
428 /* We have held the volume lock, it is safe to get the devices. */
429 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
430 device = kzalloc(sizeof(*device), GFP_NOFS);
434 device->name = kstrdup(orig_dev->name, GFP_NOFS);
440 device->devid = orig_dev->devid;
441 device->work.func = pending_bios_fn;
442 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
443 spin_lock_init(&device->io_lock);
444 INIT_LIST_HEAD(&device->dev_list);
445 INIT_LIST_HEAD(&device->dev_alloc_list);
447 list_add(&device->dev_list, &fs_devices->devices);
448 device->fs_devices = fs_devices;
449 fs_devices->num_devices++;
453 free_fs_devices(fs_devices);
454 return ERR_PTR(-ENOMEM);
457 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
459 struct btrfs_device *device, *next;
461 struct block_device *latest_bdev = NULL;
462 u64 latest_devid = 0;
463 u64 latest_transid = 0;
465 mutex_lock(&uuid_mutex);
467 /* This is the initialized path, it is safe to release the devices. */
468 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
469 if (device->in_fs_metadata) {
470 if (!latest_transid ||
471 device->generation > latest_transid) {
472 latest_devid = device->devid;
473 latest_transid = device->generation;
474 latest_bdev = device->bdev;
480 blkdev_put(device->bdev, device->mode);
482 fs_devices->open_devices--;
484 if (device->writeable) {
485 list_del_init(&device->dev_alloc_list);
486 device->writeable = 0;
487 fs_devices->rw_devices--;
489 list_del_init(&device->dev_list);
490 fs_devices->num_devices--;
495 if (fs_devices->seed) {
496 fs_devices = fs_devices->seed;
500 fs_devices->latest_bdev = latest_bdev;
501 fs_devices->latest_devid = latest_devid;
502 fs_devices->latest_trans = latest_transid;
504 mutex_unlock(&uuid_mutex);
507 static void __free_device(struct work_struct *work)
509 struct btrfs_device *device;
511 device = container_of(work, struct btrfs_device, rcu_work);
514 blkdev_put(device->bdev, device->mode);
520 static void free_device(struct rcu_head *head)
522 struct btrfs_device *device;
524 device = container_of(head, struct btrfs_device, rcu);
526 INIT_WORK(&device->rcu_work, __free_device);
527 schedule_work(&device->rcu_work);
530 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
532 struct btrfs_device *device;
534 if (--fs_devices->opened > 0)
537 mutex_lock(&fs_devices->device_list_mutex);
538 list_for_each_entry(device, &fs_devices->devices, dev_list) {
539 struct btrfs_device *new_device;
542 fs_devices->open_devices--;
544 if (device->writeable) {
545 list_del_init(&device->dev_alloc_list);
546 fs_devices->rw_devices--;
549 if (device->can_discard)
550 fs_devices->num_can_discard--;
552 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
553 BUG_ON(!new_device); /* -ENOMEM */
554 memcpy(new_device, device, sizeof(*new_device));
555 new_device->name = kstrdup(device->name, GFP_NOFS);
556 BUG_ON(device->name && !new_device->name); /* -ENOMEM */
557 new_device->bdev = NULL;
558 new_device->writeable = 0;
559 new_device->in_fs_metadata = 0;
560 new_device->can_discard = 0;
561 list_replace_rcu(&device->dev_list, &new_device->dev_list);
563 call_rcu(&device->rcu, free_device);
565 mutex_unlock(&fs_devices->device_list_mutex);
567 WARN_ON(fs_devices->open_devices);
568 WARN_ON(fs_devices->rw_devices);
569 fs_devices->opened = 0;
570 fs_devices->seeding = 0;
575 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
577 struct btrfs_fs_devices *seed_devices = NULL;
580 mutex_lock(&uuid_mutex);
581 ret = __btrfs_close_devices(fs_devices);
582 if (!fs_devices->opened) {
583 seed_devices = fs_devices->seed;
584 fs_devices->seed = NULL;
586 mutex_unlock(&uuid_mutex);
588 while (seed_devices) {
589 fs_devices = seed_devices;
590 seed_devices = fs_devices->seed;
591 __btrfs_close_devices(fs_devices);
592 free_fs_devices(fs_devices);
597 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
598 fmode_t flags, void *holder)
600 struct request_queue *q;
601 struct block_device *bdev;
602 struct list_head *head = &fs_devices->devices;
603 struct btrfs_device *device;
604 struct block_device *latest_bdev = NULL;
605 struct buffer_head *bh;
606 struct btrfs_super_block *disk_super;
607 u64 latest_devid = 0;
608 u64 latest_transid = 0;
615 list_for_each_entry(device, head, dev_list) {
621 bdev = blkdev_get_by_path(device->name, flags, holder);
623 printk(KERN_INFO "open %s failed\n", device->name);
626 filemap_write_and_wait(bdev->bd_inode->i_mapping);
627 invalidate_bdev(bdev);
628 set_blocksize(bdev, 4096);
630 bh = btrfs_read_dev_super(bdev);
634 disk_super = (struct btrfs_super_block *)bh->b_data;
635 devid = btrfs_stack_device_id(&disk_super->dev_item);
636 if (devid != device->devid)
639 if (memcmp(device->uuid, disk_super->dev_item.uuid,
643 device->generation = btrfs_super_generation(disk_super);
644 if (!latest_transid || device->generation > latest_transid) {
645 latest_devid = devid;
646 latest_transid = device->generation;
650 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
651 device->writeable = 0;
653 device->writeable = !bdev_read_only(bdev);
657 q = bdev_get_queue(bdev);
658 if (blk_queue_discard(q)) {
659 device->can_discard = 1;
660 fs_devices->num_can_discard++;
664 device->in_fs_metadata = 0;
665 device->mode = flags;
667 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
668 fs_devices->rotating = 1;
670 fs_devices->open_devices++;
671 if (device->writeable) {
672 fs_devices->rw_devices++;
673 list_add(&device->dev_alloc_list,
674 &fs_devices->alloc_list);
682 blkdev_put(bdev, flags);
686 if (fs_devices->open_devices == 0) {
690 fs_devices->seeding = seeding;
691 fs_devices->opened = 1;
692 fs_devices->latest_bdev = latest_bdev;
693 fs_devices->latest_devid = latest_devid;
694 fs_devices->latest_trans = latest_transid;
695 fs_devices->total_rw_bytes = 0;
700 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
701 fmode_t flags, void *holder)
705 mutex_lock(&uuid_mutex);
706 if (fs_devices->opened) {
707 fs_devices->opened++;
710 ret = __btrfs_open_devices(fs_devices, flags, holder);
712 mutex_unlock(&uuid_mutex);
716 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
717 struct btrfs_fs_devices **fs_devices_ret)
719 struct btrfs_super_block *disk_super;
720 struct block_device *bdev;
721 struct buffer_head *bh;
727 bdev = blkdev_get_by_path(path, flags, holder);
734 mutex_lock(&uuid_mutex);
735 ret = set_blocksize(bdev, 4096);
738 bh = btrfs_read_dev_super(bdev);
743 disk_super = (struct btrfs_super_block *)bh->b_data;
744 devid = btrfs_stack_device_id(&disk_super->dev_item);
745 transid = btrfs_super_generation(disk_super);
746 if (disk_super->label[0])
747 printk(KERN_INFO "device label %s ", disk_super->label);
749 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
750 printk(KERN_CONT "devid %llu transid %llu %s\n",
751 (unsigned long long)devid, (unsigned long long)transid, path);
752 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
756 mutex_unlock(&uuid_mutex);
757 blkdev_put(bdev, flags);
762 /* helper to account the used device space in the range */
763 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
764 u64 end, u64 *length)
766 struct btrfs_key key;
767 struct btrfs_root *root = device->dev_root;
768 struct btrfs_dev_extent *dev_extent;
769 struct btrfs_path *path;
773 struct extent_buffer *l;
777 if (start >= device->total_bytes)
780 path = btrfs_alloc_path();
785 key.objectid = device->devid;
787 key.type = BTRFS_DEV_EXTENT_KEY;
789 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
793 ret = btrfs_previous_item(root, path, key.objectid, key.type);
800 slot = path->slots[0];
801 if (slot >= btrfs_header_nritems(l)) {
802 ret = btrfs_next_leaf(root, path);
810 btrfs_item_key_to_cpu(l, &key, slot);
812 if (key.objectid < device->devid)
815 if (key.objectid > device->devid)
818 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
821 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
822 extent_end = key.offset + btrfs_dev_extent_length(l,
824 if (key.offset <= start && extent_end > end) {
825 *length = end - start + 1;
827 } else if (key.offset <= start && extent_end > start)
828 *length += extent_end - start;
829 else if (key.offset > start && extent_end <= end)
830 *length += extent_end - key.offset;
831 else if (key.offset > start && key.offset <= end) {
832 *length += end - key.offset + 1;
834 } else if (key.offset > end)
842 btrfs_free_path(path);
847 * find_free_dev_extent - find free space in the specified device
848 * @device: the device which we search the free space in
849 * @num_bytes: the size of the free space that we need
850 * @start: store the start of the free space.
851 * @len: the size of the free space. that we find, or the size of the max
852 * free space if we don't find suitable free space
854 * this uses a pretty simple search, the expectation is that it is
855 * called very infrequently and that a given device has a small number
858 * @start is used to store the start of the free space if we find. But if we
859 * don't find suitable free space, it will be used to store the start position
860 * of the max free space.
862 * @len is used to store the size of the free space that we find.
863 * But if we don't find suitable free space, it is used to store the size of
864 * the max free space.
866 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
867 u64 *start, u64 *len)
869 struct btrfs_key key;
870 struct btrfs_root *root = device->dev_root;
871 struct btrfs_dev_extent *dev_extent;
872 struct btrfs_path *path;
878 u64 search_end = device->total_bytes;
881 struct extent_buffer *l;
883 /* FIXME use last free of some kind */
885 /* we don't want to overwrite the superblock on the drive,
886 * so we make sure to start at an offset of at least 1MB
888 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
890 max_hole_start = search_start;
894 if (search_start >= search_end) {
899 path = btrfs_alloc_path();
906 key.objectid = device->devid;
907 key.offset = search_start;
908 key.type = BTRFS_DEV_EXTENT_KEY;
910 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
914 ret = btrfs_previous_item(root, path, key.objectid, key.type);
921 slot = path->slots[0];
922 if (slot >= btrfs_header_nritems(l)) {
923 ret = btrfs_next_leaf(root, path);
931 btrfs_item_key_to_cpu(l, &key, slot);
933 if (key.objectid < device->devid)
936 if (key.objectid > device->devid)
939 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
942 if (key.offset > search_start) {
943 hole_size = key.offset - search_start;
945 if (hole_size > max_hole_size) {
946 max_hole_start = search_start;
947 max_hole_size = hole_size;
951 * If this free space is greater than which we need,
952 * it must be the max free space that we have found
953 * until now, so max_hole_start must point to the start
954 * of this free space and the length of this free space
955 * is stored in max_hole_size. Thus, we return
956 * max_hole_start and max_hole_size and go back to the
959 if (hole_size >= num_bytes) {
965 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
966 extent_end = key.offset + btrfs_dev_extent_length(l,
968 if (extent_end > search_start)
969 search_start = extent_end;
976 * At this point, search_start should be the end of
977 * allocated dev extents, and when shrinking the device,
978 * search_end may be smaller than search_start.
980 if (search_end > search_start)
981 hole_size = search_end - search_start;
983 if (hole_size > max_hole_size) {
984 max_hole_start = search_start;
985 max_hole_size = hole_size;
989 if (hole_size < num_bytes)
995 btrfs_free_path(path);
997 *start = max_hole_start;
999 *len = max_hole_size;
1003 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1004 struct btrfs_device *device,
1008 struct btrfs_path *path;
1009 struct btrfs_root *root = device->dev_root;
1010 struct btrfs_key key;
1011 struct btrfs_key found_key;
1012 struct extent_buffer *leaf = NULL;
1013 struct btrfs_dev_extent *extent = NULL;
1015 path = btrfs_alloc_path();
1019 key.objectid = device->devid;
1021 key.type = BTRFS_DEV_EXTENT_KEY;
1023 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1025 ret = btrfs_previous_item(root, path, key.objectid,
1026 BTRFS_DEV_EXTENT_KEY);
1029 leaf = path->nodes[0];
1030 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1031 extent = btrfs_item_ptr(leaf, path->slots[0],
1032 struct btrfs_dev_extent);
1033 BUG_ON(found_key.offset > start || found_key.offset +
1034 btrfs_dev_extent_length(leaf, extent) < start);
1036 btrfs_release_path(path);
1038 } else if (ret == 0) {
1039 leaf = path->nodes[0];
1040 extent = btrfs_item_ptr(leaf, path->slots[0],
1041 struct btrfs_dev_extent);
1043 btrfs_error(root->fs_info, ret, "Slot search failed");
1047 if (device->bytes_used > 0) {
1048 u64 len = btrfs_dev_extent_length(leaf, extent);
1049 device->bytes_used -= len;
1050 spin_lock(&root->fs_info->free_chunk_lock);
1051 root->fs_info->free_chunk_space += len;
1052 spin_unlock(&root->fs_info->free_chunk_lock);
1054 ret = btrfs_del_item(trans, root, path);
1056 btrfs_error(root->fs_info, ret,
1057 "Failed to remove dev extent item");
1060 btrfs_free_path(path);
1064 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1065 struct btrfs_device *device,
1066 u64 chunk_tree, u64 chunk_objectid,
1067 u64 chunk_offset, u64 start, u64 num_bytes)
1070 struct btrfs_path *path;
1071 struct btrfs_root *root = device->dev_root;
1072 struct btrfs_dev_extent *extent;
1073 struct extent_buffer *leaf;
1074 struct btrfs_key key;
1076 WARN_ON(!device->in_fs_metadata);
1077 path = btrfs_alloc_path();
1081 key.objectid = device->devid;
1083 key.type = BTRFS_DEV_EXTENT_KEY;
1084 ret = btrfs_insert_empty_item(trans, root, path, &key,
1089 leaf = path->nodes[0];
1090 extent = btrfs_item_ptr(leaf, path->slots[0],
1091 struct btrfs_dev_extent);
1092 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1093 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1094 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1096 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1097 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1100 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1101 btrfs_mark_buffer_dirty(leaf);
1103 btrfs_free_path(path);
1107 static noinline int find_next_chunk(struct btrfs_root *root,
1108 u64 objectid, u64 *offset)
1110 struct btrfs_path *path;
1112 struct btrfs_key key;
1113 struct btrfs_chunk *chunk;
1114 struct btrfs_key found_key;
1116 path = btrfs_alloc_path();
1120 key.objectid = objectid;
1121 key.offset = (u64)-1;
1122 key.type = BTRFS_CHUNK_ITEM_KEY;
1124 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1128 BUG_ON(ret == 0); /* Corruption */
1130 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1134 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1136 if (found_key.objectid != objectid)
1139 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1140 struct btrfs_chunk);
1141 *offset = found_key.offset +
1142 btrfs_chunk_length(path->nodes[0], chunk);
1147 btrfs_free_path(path);
1151 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1154 struct btrfs_key key;
1155 struct btrfs_key found_key;
1156 struct btrfs_path *path;
1158 root = root->fs_info->chunk_root;
1160 path = btrfs_alloc_path();
1164 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1165 key.type = BTRFS_DEV_ITEM_KEY;
1166 key.offset = (u64)-1;
1168 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1172 BUG_ON(ret == 0); /* Corruption */
1174 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1175 BTRFS_DEV_ITEM_KEY);
1179 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1181 *objectid = found_key.offset + 1;
1185 btrfs_free_path(path);
1190 * the device information is stored in the chunk root
1191 * the btrfs_device struct should be fully filled in
1193 int btrfs_add_device(struct btrfs_trans_handle *trans,
1194 struct btrfs_root *root,
1195 struct btrfs_device *device)
1198 struct btrfs_path *path;
1199 struct btrfs_dev_item *dev_item;
1200 struct extent_buffer *leaf;
1201 struct btrfs_key key;
1204 root = root->fs_info->chunk_root;
1206 path = btrfs_alloc_path();
1210 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1211 key.type = BTRFS_DEV_ITEM_KEY;
1212 key.offset = device->devid;
1214 ret = btrfs_insert_empty_item(trans, root, path, &key,
1219 leaf = path->nodes[0];
1220 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1222 btrfs_set_device_id(leaf, dev_item, device->devid);
1223 btrfs_set_device_generation(leaf, dev_item, 0);
1224 btrfs_set_device_type(leaf, dev_item, device->type);
1225 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1226 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1227 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1228 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1229 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1230 btrfs_set_device_group(leaf, dev_item, 0);
1231 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1232 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1233 btrfs_set_device_start_offset(leaf, dev_item, 0);
1235 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1236 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1237 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1238 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1239 btrfs_mark_buffer_dirty(leaf);
1243 btrfs_free_path(path);
1247 static int btrfs_rm_dev_item(struct btrfs_root *root,
1248 struct btrfs_device *device)
1251 struct btrfs_path *path;
1252 struct btrfs_key key;
1253 struct btrfs_trans_handle *trans;
1255 root = root->fs_info->chunk_root;
1257 path = btrfs_alloc_path();
1261 trans = btrfs_start_transaction(root, 0);
1262 if (IS_ERR(trans)) {
1263 btrfs_free_path(path);
1264 return PTR_ERR(trans);
1266 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1267 key.type = BTRFS_DEV_ITEM_KEY;
1268 key.offset = device->devid;
1271 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1280 ret = btrfs_del_item(trans, root, path);
1284 btrfs_free_path(path);
1285 unlock_chunks(root);
1286 btrfs_commit_transaction(trans, root);
1290 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1292 struct btrfs_device *device;
1293 struct btrfs_device *next_device;
1294 struct block_device *bdev;
1295 struct buffer_head *bh = NULL;
1296 struct btrfs_super_block *disk_super;
1297 struct btrfs_fs_devices *cur_devices;
1303 bool clear_super = false;
1305 mutex_lock(&uuid_mutex);
1307 all_avail = root->fs_info->avail_data_alloc_bits |
1308 root->fs_info->avail_system_alloc_bits |
1309 root->fs_info->avail_metadata_alloc_bits;
1311 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1312 root->fs_info->fs_devices->num_devices <= 4) {
1313 printk(KERN_ERR "btrfs: unable to go below four devices "
1319 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1320 root->fs_info->fs_devices->num_devices <= 2) {
1321 printk(KERN_ERR "btrfs: unable to go below two "
1322 "devices on raid1\n");
1327 if (strcmp(device_path, "missing") == 0) {
1328 struct list_head *devices;
1329 struct btrfs_device *tmp;
1332 devices = &root->fs_info->fs_devices->devices;
1334 * It is safe to read the devices since the volume_mutex
1337 list_for_each_entry(tmp, devices, dev_list) {
1338 if (tmp->in_fs_metadata && !tmp->bdev) {
1347 printk(KERN_ERR "btrfs: no missing devices found to "
1352 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1353 root->fs_info->bdev_holder);
1355 ret = PTR_ERR(bdev);
1359 set_blocksize(bdev, 4096);
1360 invalidate_bdev(bdev);
1361 bh = btrfs_read_dev_super(bdev);
1366 disk_super = (struct btrfs_super_block *)bh->b_data;
1367 devid = btrfs_stack_device_id(&disk_super->dev_item);
1368 dev_uuid = disk_super->dev_item.uuid;
1369 device = btrfs_find_device(root, devid, dev_uuid,
1377 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1378 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1384 if (device->writeable) {
1386 list_del_init(&device->dev_alloc_list);
1387 unlock_chunks(root);
1388 root->fs_info->fs_devices->rw_devices--;
1392 ret = btrfs_shrink_device(device, 0);
1396 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1400 spin_lock(&root->fs_info->free_chunk_lock);
1401 root->fs_info->free_chunk_space = device->total_bytes -
1403 spin_unlock(&root->fs_info->free_chunk_lock);
1405 device->in_fs_metadata = 0;
1406 btrfs_scrub_cancel_dev(root, device);
1409 * the device list mutex makes sure that we don't change
1410 * the device list while someone else is writing out all
1411 * the device supers.
1414 cur_devices = device->fs_devices;
1415 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1416 list_del_rcu(&device->dev_list);
1418 device->fs_devices->num_devices--;
1420 if (device->missing)
1421 root->fs_info->fs_devices->missing_devices--;
1423 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1424 struct btrfs_device, dev_list);
1425 if (device->bdev == root->fs_info->sb->s_bdev)
1426 root->fs_info->sb->s_bdev = next_device->bdev;
1427 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1428 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1431 device->fs_devices->open_devices--;
1433 call_rcu(&device->rcu, free_device);
1434 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1436 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1437 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1439 if (cur_devices->open_devices == 0) {
1440 struct btrfs_fs_devices *fs_devices;
1441 fs_devices = root->fs_info->fs_devices;
1442 while (fs_devices) {
1443 if (fs_devices->seed == cur_devices)
1445 fs_devices = fs_devices->seed;
1447 fs_devices->seed = cur_devices->seed;
1448 cur_devices->seed = NULL;
1450 __btrfs_close_devices(cur_devices);
1451 unlock_chunks(root);
1452 free_fs_devices(cur_devices);
1456 * at this point, the device is zero sized. We want to
1457 * remove it from the devices list and zero out the old super
1460 /* make sure this device isn't detected as part of
1463 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1464 set_buffer_dirty(bh);
1465 sync_dirty_buffer(bh);
1474 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1476 mutex_unlock(&uuid_mutex);
1479 if (device->writeable) {
1481 list_add(&device->dev_alloc_list,
1482 &root->fs_info->fs_devices->alloc_list);
1483 unlock_chunks(root);
1484 root->fs_info->fs_devices->rw_devices++;
1490 * does all the dirty work required for changing file system's UUID.
1492 static int btrfs_prepare_sprout(struct btrfs_root *root)
1494 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1495 struct btrfs_fs_devices *old_devices;
1496 struct btrfs_fs_devices *seed_devices;
1497 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1498 struct btrfs_device *device;
1501 BUG_ON(!mutex_is_locked(&uuid_mutex));
1502 if (!fs_devices->seeding)
1505 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1509 old_devices = clone_fs_devices(fs_devices);
1510 if (IS_ERR(old_devices)) {
1511 kfree(seed_devices);
1512 return PTR_ERR(old_devices);
1515 list_add(&old_devices->list, &fs_uuids);
1517 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1518 seed_devices->opened = 1;
1519 INIT_LIST_HEAD(&seed_devices->devices);
1520 INIT_LIST_HEAD(&seed_devices->alloc_list);
1521 mutex_init(&seed_devices->device_list_mutex);
1523 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1524 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1526 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1528 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1529 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1530 device->fs_devices = seed_devices;
1533 fs_devices->seeding = 0;
1534 fs_devices->num_devices = 0;
1535 fs_devices->open_devices = 0;
1536 fs_devices->seed = seed_devices;
1538 generate_random_uuid(fs_devices->fsid);
1539 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1540 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1541 super_flags = btrfs_super_flags(disk_super) &
1542 ~BTRFS_SUPER_FLAG_SEEDING;
1543 btrfs_set_super_flags(disk_super, super_flags);
1549 * strore the expected generation for seed devices in device items.
1551 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1552 struct btrfs_root *root)
1554 struct btrfs_path *path;
1555 struct extent_buffer *leaf;
1556 struct btrfs_dev_item *dev_item;
1557 struct btrfs_device *device;
1558 struct btrfs_key key;
1559 u8 fs_uuid[BTRFS_UUID_SIZE];
1560 u8 dev_uuid[BTRFS_UUID_SIZE];
1564 path = btrfs_alloc_path();
1568 root = root->fs_info->chunk_root;
1569 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1571 key.type = BTRFS_DEV_ITEM_KEY;
1574 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1578 leaf = path->nodes[0];
1580 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1581 ret = btrfs_next_leaf(root, path);
1586 leaf = path->nodes[0];
1587 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1588 btrfs_release_path(path);
1592 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1593 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1594 key.type != BTRFS_DEV_ITEM_KEY)
1597 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1598 struct btrfs_dev_item);
1599 devid = btrfs_device_id(leaf, dev_item);
1600 read_extent_buffer(leaf, dev_uuid,
1601 (unsigned long)btrfs_device_uuid(dev_item),
1603 read_extent_buffer(leaf, fs_uuid,
1604 (unsigned long)btrfs_device_fsid(dev_item),
1606 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1607 BUG_ON(!device); /* Logic error */
1609 if (device->fs_devices->seeding) {
1610 btrfs_set_device_generation(leaf, dev_item,
1611 device->generation);
1612 btrfs_mark_buffer_dirty(leaf);
1620 btrfs_free_path(path);
1624 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1626 struct request_queue *q;
1627 struct btrfs_trans_handle *trans;
1628 struct btrfs_device *device;
1629 struct block_device *bdev;
1630 struct list_head *devices;
1631 struct super_block *sb = root->fs_info->sb;
1633 int seeding_dev = 0;
1636 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1639 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1640 root->fs_info->bdev_holder);
1642 return PTR_ERR(bdev);
1644 if (root->fs_info->fs_devices->seeding) {
1646 down_write(&sb->s_umount);
1647 mutex_lock(&uuid_mutex);
1650 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1652 devices = &root->fs_info->fs_devices->devices;
1654 * we have the volume lock, so we don't need the extra
1655 * device list mutex while reading the list here.
1657 list_for_each_entry(device, devices, dev_list) {
1658 if (device->bdev == bdev) {
1664 device = kzalloc(sizeof(*device), GFP_NOFS);
1666 /* we can safely leave the fs_devices entry around */
1671 device->name = kstrdup(device_path, GFP_NOFS);
1672 if (!device->name) {
1678 ret = find_next_devid(root, &device->devid);
1680 kfree(device->name);
1685 trans = btrfs_start_transaction(root, 0);
1686 if (IS_ERR(trans)) {
1687 kfree(device->name);
1689 ret = PTR_ERR(trans);
1695 q = bdev_get_queue(bdev);
1696 if (blk_queue_discard(q))
1697 device->can_discard = 1;
1698 device->writeable = 1;
1699 device->work.func = pending_bios_fn;
1700 generate_random_uuid(device->uuid);
1701 spin_lock_init(&device->io_lock);
1702 device->generation = trans->transid;
1703 device->io_width = root->sectorsize;
1704 device->io_align = root->sectorsize;
1705 device->sector_size = root->sectorsize;
1706 device->total_bytes = i_size_read(bdev->bd_inode);
1707 device->disk_total_bytes = device->total_bytes;
1708 device->dev_root = root->fs_info->dev_root;
1709 device->bdev = bdev;
1710 device->in_fs_metadata = 1;
1711 device->mode = FMODE_EXCL;
1712 set_blocksize(device->bdev, 4096);
1715 sb->s_flags &= ~MS_RDONLY;
1716 ret = btrfs_prepare_sprout(root);
1717 BUG_ON(ret); /* -ENOMEM */
1720 device->fs_devices = root->fs_info->fs_devices;
1723 * we don't want write_supers to jump in here with our device
1726 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1727 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1728 list_add(&device->dev_alloc_list,
1729 &root->fs_info->fs_devices->alloc_list);
1730 root->fs_info->fs_devices->num_devices++;
1731 root->fs_info->fs_devices->open_devices++;
1732 root->fs_info->fs_devices->rw_devices++;
1733 if (device->can_discard)
1734 root->fs_info->fs_devices->num_can_discard++;
1735 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1737 spin_lock(&root->fs_info->free_chunk_lock);
1738 root->fs_info->free_chunk_space += device->total_bytes;
1739 spin_unlock(&root->fs_info->free_chunk_lock);
1741 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1742 root->fs_info->fs_devices->rotating = 1;
1744 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1745 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1746 total_bytes + device->total_bytes);
1748 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1749 btrfs_set_super_num_devices(root->fs_info->super_copy,
1751 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1754 ret = init_first_rw_device(trans, root, device);
1757 ret = btrfs_finish_sprout(trans, root);
1761 ret = btrfs_add_device(trans, root, device);
1767 * we've got more storage, clear any full flags on the space
1770 btrfs_clear_space_info_full(root->fs_info);
1772 unlock_chunks(root);
1773 ret = btrfs_commit_transaction(trans, root);
1776 mutex_unlock(&uuid_mutex);
1777 up_write(&sb->s_umount);
1779 if (ret) /* transaction commit */
1782 ret = btrfs_relocate_sys_chunks(root);
1784 btrfs_error(root->fs_info, ret,
1785 "Failed to relocate sys chunks after "
1786 "device initialization. This can be fixed "
1787 "using the \"btrfs balance\" command.");
1793 unlock_chunks(root);
1794 btrfs_abort_transaction(trans, root, ret);
1795 btrfs_end_transaction(trans, root);
1796 kfree(device->name);
1799 blkdev_put(bdev, FMODE_EXCL);
1801 mutex_unlock(&uuid_mutex);
1802 up_write(&sb->s_umount);
1807 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1808 struct btrfs_device *device)
1811 struct btrfs_path *path;
1812 struct btrfs_root *root;
1813 struct btrfs_dev_item *dev_item;
1814 struct extent_buffer *leaf;
1815 struct btrfs_key key;
1817 root = device->dev_root->fs_info->chunk_root;
1819 path = btrfs_alloc_path();
1823 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1824 key.type = BTRFS_DEV_ITEM_KEY;
1825 key.offset = device->devid;
1827 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1836 leaf = path->nodes[0];
1837 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1839 btrfs_set_device_id(leaf, dev_item, device->devid);
1840 btrfs_set_device_type(leaf, dev_item, device->type);
1841 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1842 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1843 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1844 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1845 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1846 btrfs_mark_buffer_dirty(leaf);
1849 btrfs_free_path(path);
1853 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1854 struct btrfs_device *device, u64 new_size)
1856 struct btrfs_super_block *super_copy =
1857 device->dev_root->fs_info->super_copy;
1858 u64 old_total = btrfs_super_total_bytes(super_copy);
1859 u64 diff = new_size - device->total_bytes;
1861 if (!device->writeable)
1863 if (new_size <= device->total_bytes)
1866 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1867 device->fs_devices->total_rw_bytes += diff;
1869 device->total_bytes = new_size;
1870 device->disk_total_bytes = new_size;
1871 btrfs_clear_space_info_full(device->dev_root->fs_info);
1873 return btrfs_update_device(trans, device);
1876 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1877 struct btrfs_device *device, u64 new_size)
1880 lock_chunks(device->dev_root);
1881 ret = __btrfs_grow_device(trans, device, new_size);
1882 unlock_chunks(device->dev_root);
1886 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1887 struct btrfs_root *root,
1888 u64 chunk_tree, u64 chunk_objectid,
1892 struct btrfs_path *path;
1893 struct btrfs_key key;
1895 root = root->fs_info->chunk_root;
1896 path = btrfs_alloc_path();
1900 key.objectid = chunk_objectid;
1901 key.offset = chunk_offset;
1902 key.type = BTRFS_CHUNK_ITEM_KEY;
1904 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1907 else if (ret > 0) { /* Logic error or corruption */
1908 btrfs_error(root->fs_info, -ENOENT,
1909 "Failed lookup while freeing chunk.");
1914 ret = btrfs_del_item(trans, root, path);
1916 btrfs_error(root->fs_info, ret,
1917 "Failed to delete chunk item.");
1919 btrfs_free_path(path);
1923 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1926 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1927 struct btrfs_disk_key *disk_key;
1928 struct btrfs_chunk *chunk;
1935 struct btrfs_key key;
1937 array_size = btrfs_super_sys_array_size(super_copy);
1939 ptr = super_copy->sys_chunk_array;
1942 while (cur < array_size) {
1943 disk_key = (struct btrfs_disk_key *)ptr;
1944 btrfs_disk_key_to_cpu(&key, disk_key);
1946 len = sizeof(*disk_key);
1948 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1949 chunk = (struct btrfs_chunk *)(ptr + len);
1950 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1951 len += btrfs_chunk_item_size(num_stripes);
1956 if (key.objectid == chunk_objectid &&
1957 key.offset == chunk_offset) {
1958 memmove(ptr, ptr + len, array_size - (cur + len));
1960 btrfs_set_super_sys_array_size(super_copy, array_size);
1969 static int btrfs_relocate_chunk(struct btrfs_root *root,
1970 u64 chunk_tree, u64 chunk_objectid,
1973 struct extent_map_tree *em_tree;
1974 struct btrfs_root *extent_root;
1975 struct btrfs_trans_handle *trans;
1976 struct extent_map *em;
1977 struct map_lookup *map;
1981 root = root->fs_info->chunk_root;
1982 extent_root = root->fs_info->extent_root;
1983 em_tree = &root->fs_info->mapping_tree.map_tree;
1985 ret = btrfs_can_relocate(extent_root, chunk_offset);
1989 /* step one, relocate all the extents inside this chunk */
1990 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1994 trans = btrfs_start_transaction(root, 0);
1995 BUG_ON(IS_ERR(trans));
2000 * step two, delete the device extents and the
2001 * chunk tree entries
2003 read_lock(&em_tree->lock);
2004 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2005 read_unlock(&em_tree->lock);
2007 BUG_ON(!em || em->start > chunk_offset ||
2008 em->start + em->len < chunk_offset);
2009 map = (struct map_lookup *)em->bdev;
2011 for (i = 0; i < map->num_stripes; i++) {
2012 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2013 map->stripes[i].physical);
2016 if (map->stripes[i].dev) {
2017 ret = btrfs_update_device(trans, map->stripes[i].dev);
2021 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2026 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2028 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2029 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2033 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2036 write_lock(&em_tree->lock);
2037 remove_extent_mapping(em_tree, em);
2038 write_unlock(&em_tree->lock);
2043 /* once for the tree */
2044 free_extent_map(em);
2046 free_extent_map(em);
2048 unlock_chunks(root);
2049 btrfs_end_transaction(trans, root);
2053 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2055 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2056 struct btrfs_path *path;
2057 struct extent_buffer *leaf;
2058 struct btrfs_chunk *chunk;
2059 struct btrfs_key key;
2060 struct btrfs_key found_key;
2061 u64 chunk_tree = chunk_root->root_key.objectid;
2063 bool retried = false;
2067 path = btrfs_alloc_path();
2072 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2073 key.offset = (u64)-1;
2074 key.type = BTRFS_CHUNK_ITEM_KEY;
2077 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2080 BUG_ON(ret == 0); /* Corruption */
2082 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2089 leaf = path->nodes[0];
2090 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2092 chunk = btrfs_item_ptr(leaf, path->slots[0],
2093 struct btrfs_chunk);
2094 chunk_type = btrfs_chunk_type(leaf, chunk);
2095 btrfs_release_path(path);
2097 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2098 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2107 if (found_key.offset == 0)
2109 key.offset = found_key.offset - 1;
2112 if (failed && !retried) {
2116 } else if (failed && retried) {
2121 btrfs_free_path(path);
2125 static int insert_balance_item(struct btrfs_root *root,
2126 struct btrfs_balance_control *bctl)
2128 struct btrfs_trans_handle *trans;
2129 struct btrfs_balance_item *item;
2130 struct btrfs_disk_balance_args disk_bargs;
2131 struct btrfs_path *path;
2132 struct extent_buffer *leaf;
2133 struct btrfs_key key;
2136 path = btrfs_alloc_path();
2140 trans = btrfs_start_transaction(root, 0);
2141 if (IS_ERR(trans)) {
2142 btrfs_free_path(path);
2143 return PTR_ERR(trans);
2146 key.objectid = BTRFS_BALANCE_OBJECTID;
2147 key.type = BTRFS_BALANCE_ITEM_KEY;
2150 ret = btrfs_insert_empty_item(trans, root, path, &key,
2155 leaf = path->nodes[0];
2156 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2158 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2160 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2161 btrfs_set_balance_data(leaf, item, &disk_bargs);
2162 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2163 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2164 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2165 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2167 btrfs_set_balance_flags(leaf, item, bctl->flags);
2169 btrfs_mark_buffer_dirty(leaf);
2171 btrfs_free_path(path);
2172 err = btrfs_commit_transaction(trans, root);
2178 static int del_balance_item(struct btrfs_root *root)
2180 struct btrfs_trans_handle *trans;
2181 struct btrfs_path *path;
2182 struct btrfs_key key;
2185 path = btrfs_alloc_path();
2189 trans = btrfs_start_transaction(root, 0);
2190 if (IS_ERR(trans)) {
2191 btrfs_free_path(path);
2192 return PTR_ERR(trans);
2195 key.objectid = BTRFS_BALANCE_OBJECTID;
2196 key.type = BTRFS_BALANCE_ITEM_KEY;
2199 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2207 ret = btrfs_del_item(trans, root, path);
2209 btrfs_free_path(path);
2210 err = btrfs_commit_transaction(trans, root);
2217 * This is a heuristic used to reduce the number of chunks balanced on
2218 * resume after balance was interrupted.
2220 static void update_balance_args(struct btrfs_balance_control *bctl)
2223 * Turn on soft mode for chunk types that were being converted.
2225 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2226 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2227 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2228 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2229 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2230 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2233 * Turn on usage filter if is not already used. The idea is
2234 * that chunks that we have already balanced should be
2235 * reasonably full. Don't do it for chunks that are being
2236 * converted - that will keep us from relocating unconverted
2237 * (albeit full) chunks.
2239 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2240 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2241 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2242 bctl->data.usage = 90;
2244 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2245 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2246 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2247 bctl->sys.usage = 90;
2249 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2250 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2251 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2252 bctl->meta.usage = 90;
2257 * Should be called with both balance and volume mutexes held to
2258 * serialize other volume operations (add_dev/rm_dev/resize) with
2259 * restriper. Same goes for unset_balance_control.
2261 static void set_balance_control(struct btrfs_balance_control *bctl)
2263 struct btrfs_fs_info *fs_info = bctl->fs_info;
2265 BUG_ON(fs_info->balance_ctl);
2267 spin_lock(&fs_info->balance_lock);
2268 fs_info->balance_ctl = bctl;
2269 spin_unlock(&fs_info->balance_lock);
2272 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2274 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2276 BUG_ON(!fs_info->balance_ctl);
2278 spin_lock(&fs_info->balance_lock);
2279 fs_info->balance_ctl = NULL;
2280 spin_unlock(&fs_info->balance_lock);
2286 * Balance filters. Return 1 if chunk should be filtered out
2287 * (should not be balanced).
2289 static int chunk_profiles_filter(u64 chunk_type,
2290 struct btrfs_balance_args *bargs)
2292 chunk_type = chunk_to_extended(chunk_type) &
2293 BTRFS_EXTENDED_PROFILE_MASK;
2295 if (bargs->profiles & chunk_type)
2301 static u64 div_factor_fine(u64 num, int factor)
2313 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2314 struct btrfs_balance_args *bargs)
2316 struct btrfs_block_group_cache *cache;
2317 u64 chunk_used, user_thresh;
2320 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2321 chunk_used = btrfs_block_group_used(&cache->item);
2323 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2324 if (chunk_used < user_thresh)
2327 btrfs_put_block_group(cache);
2331 static int chunk_devid_filter(struct extent_buffer *leaf,
2332 struct btrfs_chunk *chunk,
2333 struct btrfs_balance_args *bargs)
2335 struct btrfs_stripe *stripe;
2336 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2339 for (i = 0; i < num_stripes; i++) {
2340 stripe = btrfs_stripe_nr(chunk, i);
2341 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2348 /* [pstart, pend) */
2349 static int chunk_drange_filter(struct extent_buffer *leaf,
2350 struct btrfs_chunk *chunk,
2352 struct btrfs_balance_args *bargs)
2354 struct btrfs_stripe *stripe;
2355 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2361 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2364 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2365 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2369 factor = num_stripes / factor;
2371 for (i = 0; i < num_stripes; i++) {
2372 stripe = btrfs_stripe_nr(chunk, i);
2373 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2376 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2377 stripe_length = btrfs_chunk_length(leaf, chunk);
2378 do_div(stripe_length, factor);
2380 if (stripe_offset < bargs->pend &&
2381 stripe_offset + stripe_length > bargs->pstart)
2388 /* [vstart, vend) */
2389 static int chunk_vrange_filter(struct extent_buffer *leaf,
2390 struct btrfs_chunk *chunk,
2392 struct btrfs_balance_args *bargs)
2394 if (chunk_offset < bargs->vend &&
2395 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2396 /* at least part of the chunk is inside this vrange */
2402 static int chunk_soft_convert_filter(u64 chunk_type,
2403 struct btrfs_balance_args *bargs)
2405 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2408 chunk_type = chunk_to_extended(chunk_type) &
2409 BTRFS_EXTENDED_PROFILE_MASK;
2411 if (bargs->target == chunk_type)
2417 static int should_balance_chunk(struct btrfs_root *root,
2418 struct extent_buffer *leaf,
2419 struct btrfs_chunk *chunk, u64 chunk_offset)
2421 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2422 struct btrfs_balance_args *bargs = NULL;
2423 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2426 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2427 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2431 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2432 bargs = &bctl->data;
2433 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2435 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2436 bargs = &bctl->meta;
2438 /* profiles filter */
2439 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2440 chunk_profiles_filter(chunk_type, bargs)) {
2445 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2446 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2451 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2452 chunk_devid_filter(leaf, chunk, bargs)) {
2456 /* drange filter, makes sense only with devid filter */
2457 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2458 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2463 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2464 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2468 /* soft profile changing mode */
2469 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2470 chunk_soft_convert_filter(chunk_type, bargs)) {
2477 static u64 div_factor(u64 num, int factor)
2486 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2488 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2489 struct btrfs_root *chunk_root = fs_info->chunk_root;
2490 struct btrfs_root *dev_root = fs_info->dev_root;
2491 struct list_head *devices;
2492 struct btrfs_device *device;
2495 struct btrfs_chunk *chunk;
2496 struct btrfs_path *path;
2497 struct btrfs_key key;
2498 struct btrfs_key found_key;
2499 struct btrfs_trans_handle *trans;
2500 struct extent_buffer *leaf;
2503 int enospc_errors = 0;
2504 bool counting = true;
2506 /* step one make some room on all the devices */
2507 devices = &fs_info->fs_devices->devices;
2508 list_for_each_entry(device, devices, dev_list) {
2509 old_size = device->total_bytes;
2510 size_to_free = div_factor(old_size, 1);
2511 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2512 if (!device->writeable ||
2513 device->total_bytes - device->bytes_used > size_to_free)
2516 ret = btrfs_shrink_device(device, old_size - size_to_free);
2521 trans = btrfs_start_transaction(dev_root, 0);
2522 BUG_ON(IS_ERR(trans));
2524 ret = btrfs_grow_device(trans, device, old_size);
2527 btrfs_end_transaction(trans, dev_root);
2530 /* step two, relocate all the chunks */
2531 path = btrfs_alloc_path();
2537 /* zero out stat counters */
2538 spin_lock(&fs_info->balance_lock);
2539 memset(&bctl->stat, 0, sizeof(bctl->stat));
2540 spin_unlock(&fs_info->balance_lock);
2542 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2543 key.offset = (u64)-1;
2544 key.type = BTRFS_CHUNK_ITEM_KEY;
2547 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2548 atomic_read(&fs_info->balance_cancel_req)) {
2553 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2558 * this shouldn't happen, it means the last relocate
2562 BUG(); /* FIXME break ? */
2564 ret = btrfs_previous_item(chunk_root, path, 0,
2565 BTRFS_CHUNK_ITEM_KEY);
2571 leaf = path->nodes[0];
2572 slot = path->slots[0];
2573 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2575 if (found_key.objectid != key.objectid)
2578 /* chunk zero is special */
2579 if (found_key.offset == 0)
2582 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2585 spin_lock(&fs_info->balance_lock);
2586 bctl->stat.considered++;
2587 spin_unlock(&fs_info->balance_lock);
2590 ret = should_balance_chunk(chunk_root, leaf, chunk,
2592 btrfs_release_path(path);
2597 spin_lock(&fs_info->balance_lock);
2598 bctl->stat.expected++;
2599 spin_unlock(&fs_info->balance_lock);
2603 ret = btrfs_relocate_chunk(chunk_root,
2604 chunk_root->root_key.objectid,
2607 if (ret && ret != -ENOSPC)
2609 if (ret == -ENOSPC) {
2612 spin_lock(&fs_info->balance_lock);
2613 bctl->stat.completed++;
2614 spin_unlock(&fs_info->balance_lock);
2617 key.offset = found_key.offset - 1;
2621 btrfs_release_path(path);
2626 btrfs_free_path(path);
2627 if (enospc_errors) {
2628 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2638 * alloc_profile_is_valid - see if a given profile is valid and reduced
2639 * @flags: profile to validate
2640 * @extended: if true @flags is treated as an extended profile
2642 static int alloc_profile_is_valid(u64 flags, int extended)
2644 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2645 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2647 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2649 /* 1) check that all other bits are zeroed */
2653 /* 2) see if profile is reduced */
2655 return !extended; /* "0" is valid for usual profiles */
2657 /* true if exactly one bit set */
2658 return (flags & (flags - 1)) == 0;
2661 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2663 /* cancel requested || normal exit path */
2664 return atomic_read(&fs_info->balance_cancel_req) ||
2665 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2666 atomic_read(&fs_info->balance_cancel_req) == 0);
2669 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2673 unset_balance_control(fs_info);
2674 ret = del_balance_item(fs_info->tree_root);
2678 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2679 struct btrfs_ioctl_balance_args *bargs);
2682 * Should be called with both balance and volume mutexes held
2684 int btrfs_balance(struct btrfs_balance_control *bctl,
2685 struct btrfs_ioctl_balance_args *bargs)
2687 struct btrfs_fs_info *fs_info = bctl->fs_info;
2692 if (btrfs_fs_closing(fs_info) ||
2693 atomic_read(&fs_info->balance_pause_req) ||
2694 atomic_read(&fs_info->balance_cancel_req)) {
2699 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2700 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2704 * In case of mixed groups both data and meta should be picked,
2705 * and identical options should be given for both of them.
2707 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2708 if (mixed && (bctl->flags & allowed)) {
2709 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2710 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2711 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2712 printk(KERN_ERR "btrfs: with mixed groups data and "
2713 "metadata balance options must be the same\n");
2719 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2720 if (fs_info->fs_devices->num_devices == 1)
2721 allowed |= BTRFS_BLOCK_GROUP_DUP;
2722 else if (fs_info->fs_devices->num_devices < 4)
2723 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2725 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2726 BTRFS_BLOCK_GROUP_RAID10);
2728 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2729 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2730 (bctl->data.target & ~allowed))) {
2731 printk(KERN_ERR "btrfs: unable to start balance with target "
2732 "data profile %llu\n",
2733 (unsigned long long)bctl->data.target);
2737 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2738 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2739 (bctl->meta.target & ~allowed))) {
2740 printk(KERN_ERR "btrfs: unable to start balance with target "
2741 "metadata profile %llu\n",
2742 (unsigned long long)bctl->meta.target);
2746 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2747 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2748 (bctl->sys.target & ~allowed))) {
2749 printk(KERN_ERR "btrfs: unable to start balance with target "
2750 "system profile %llu\n",
2751 (unsigned long long)bctl->sys.target);
2756 /* allow dup'ed data chunks only in mixed mode */
2757 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2758 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2759 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2764 /* allow to reduce meta or sys integrity only if force set */
2765 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2766 BTRFS_BLOCK_GROUP_RAID10;
2767 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2768 (fs_info->avail_system_alloc_bits & allowed) &&
2769 !(bctl->sys.target & allowed)) ||
2770 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2771 (fs_info->avail_metadata_alloc_bits & allowed) &&
2772 !(bctl->meta.target & allowed))) {
2773 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2774 printk(KERN_INFO "btrfs: force reducing metadata "
2777 printk(KERN_ERR "btrfs: balance will reduce metadata "
2778 "integrity, use force if you want this\n");
2784 ret = insert_balance_item(fs_info->tree_root, bctl);
2785 if (ret && ret != -EEXIST)
2788 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2789 BUG_ON(ret == -EEXIST);
2790 set_balance_control(bctl);
2792 BUG_ON(ret != -EEXIST);
2793 spin_lock(&fs_info->balance_lock);
2794 update_balance_args(bctl);
2795 spin_unlock(&fs_info->balance_lock);
2798 atomic_inc(&fs_info->balance_running);
2799 mutex_unlock(&fs_info->balance_mutex);
2801 ret = __btrfs_balance(fs_info);
2803 mutex_lock(&fs_info->balance_mutex);
2804 atomic_dec(&fs_info->balance_running);
2807 memset(bargs, 0, sizeof(*bargs));
2808 update_ioctl_balance_args(fs_info, 0, bargs);
2811 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2812 balance_need_close(fs_info)) {
2813 __cancel_balance(fs_info);
2816 wake_up(&fs_info->balance_wait_q);
2820 if (bctl->flags & BTRFS_BALANCE_RESUME)
2821 __cancel_balance(fs_info);
2827 static int balance_kthread(void *data)
2829 struct btrfs_balance_control *bctl =
2830 (struct btrfs_balance_control *)data;
2831 struct btrfs_fs_info *fs_info = bctl->fs_info;
2834 mutex_lock(&fs_info->volume_mutex);
2835 mutex_lock(&fs_info->balance_mutex);
2837 set_balance_control(bctl);
2839 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2840 printk(KERN_INFO "btrfs: force skipping balance\n");
2842 printk(KERN_INFO "btrfs: continuing balance\n");
2843 ret = btrfs_balance(bctl, NULL);
2846 mutex_unlock(&fs_info->balance_mutex);
2847 mutex_unlock(&fs_info->volume_mutex);
2851 int btrfs_recover_balance(struct btrfs_root *tree_root)
2853 struct task_struct *tsk;
2854 struct btrfs_balance_control *bctl;
2855 struct btrfs_balance_item *item;
2856 struct btrfs_disk_balance_args disk_bargs;
2857 struct btrfs_path *path;
2858 struct extent_buffer *leaf;
2859 struct btrfs_key key;
2862 path = btrfs_alloc_path();
2866 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2872 key.objectid = BTRFS_BALANCE_OBJECTID;
2873 key.type = BTRFS_BALANCE_ITEM_KEY;
2876 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2879 if (ret > 0) { /* ret = -ENOENT; */
2884 leaf = path->nodes[0];
2885 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2887 bctl->fs_info = tree_root->fs_info;
2888 bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
2890 btrfs_balance_data(leaf, item, &disk_bargs);
2891 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2892 btrfs_balance_meta(leaf, item, &disk_bargs);
2893 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2894 btrfs_balance_sys(leaf, item, &disk_bargs);
2895 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2897 tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
2906 btrfs_free_path(path);
2910 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2914 mutex_lock(&fs_info->balance_mutex);
2915 if (!fs_info->balance_ctl) {
2916 mutex_unlock(&fs_info->balance_mutex);
2920 if (atomic_read(&fs_info->balance_running)) {
2921 atomic_inc(&fs_info->balance_pause_req);
2922 mutex_unlock(&fs_info->balance_mutex);
2924 wait_event(fs_info->balance_wait_q,
2925 atomic_read(&fs_info->balance_running) == 0);
2927 mutex_lock(&fs_info->balance_mutex);
2928 /* we are good with balance_ctl ripped off from under us */
2929 BUG_ON(atomic_read(&fs_info->balance_running));
2930 atomic_dec(&fs_info->balance_pause_req);
2935 mutex_unlock(&fs_info->balance_mutex);
2939 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
2941 mutex_lock(&fs_info->balance_mutex);
2942 if (!fs_info->balance_ctl) {
2943 mutex_unlock(&fs_info->balance_mutex);
2947 atomic_inc(&fs_info->balance_cancel_req);
2949 * if we are running just wait and return, balance item is
2950 * deleted in btrfs_balance in this case
2952 if (atomic_read(&fs_info->balance_running)) {
2953 mutex_unlock(&fs_info->balance_mutex);
2954 wait_event(fs_info->balance_wait_q,
2955 atomic_read(&fs_info->balance_running) == 0);
2956 mutex_lock(&fs_info->balance_mutex);
2958 /* __cancel_balance needs volume_mutex */
2959 mutex_unlock(&fs_info->balance_mutex);
2960 mutex_lock(&fs_info->volume_mutex);
2961 mutex_lock(&fs_info->balance_mutex);
2963 if (fs_info->balance_ctl)
2964 __cancel_balance(fs_info);
2966 mutex_unlock(&fs_info->volume_mutex);
2969 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
2970 atomic_dec(&fs_info->balance_cancel_req);
2971 mutex_unlock(&fs_info->balance_mutex);
2976 * shrinking a device means finding all of the device extents past
2977 * the new size, and then following the back refs to the chunks.
2978 * The chunk relocation code actually frees the device extent
2980 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2982 struct btrfs_trans_handle *trans;
2983 struct btrfs_root *root = device->dev_root;
2984 struct btrfs_dev_extent *dev_extent = NULL;
2985 struct btrfs_path *path;
2993 bool retried = false;
2994 struct extent_buffer *l;
2995 struct btrfs_key key;
2996 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2997 u64 old_total = btrfs_super_total_bytes(super_copy);
2998 u64 old_size = device->total_bytes;
2999 u64 diff = device->total_bytes - new_size;
3001 if (new_size >= device->total_bytes)
3004 path = btrfs_alloc_path();
3012 device->total_bytes = new_size;
3013 if (device->writeable) {
3014 device->fs_devices->total_rw_bytes -= diff;
3015 spin_lock(&root->fs_info->free_chunk_lock);
3016 root->fs_info->free_chunk_space -= diff;
3017 spin_unlock(&root->fs_info->free_chunk_lock);
3019 unlock_chunks(root);
3022 key.objectid = device->devid;
3023 key.offset = (u64)-1;
3024 key.type = BTRFS_DEV_EXTENT_KEY;
3027 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3031 ret = btrfs_previous_item(root, path, 0, key.type);
3036 btrfs_release_path(path);
3041 slot = path->slots[0];
3042 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3044 if (key.objectid != device->devid) {
3045 btrfs_release_path(path);
3049 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3050 length = btrfs_dev_extent_length(l, dev_extent);
3052 if (key.offset + length <= new_size) {
3053 btrfs_release_path(path);
3057 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3058 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3059 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3060 btrfs_release_path(path);
3062 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3064 if (ret && ret != -ENOSPC)
3068 } while (key.offset-- > 0);
3070 if (failed && !retried) {
3074 } else if (failed && retried) {
3078 device->total_bytes = old_size;
3079 if (device->writeable)
3080 device->fs_devices->total_rw_bytes += diff;
3081 spin_lock(&root->fs_info->free_chunk_lock);
3082 root->fs_info->free_chunk_space += diff;
3083 spin_unlock(&root->fs_info->free_chunk_lock);
3084 unlock_chunks(root);
3088 /* Shrinking succeeded, else we would be at "done". */
3089 trans = btrfs_start_transaction(root, 0);
3090 if (IS_ERR(trans)) {
3091 ret = PTR_ERR(trans);
3097 device->disk_total_bytes = new_size;
3098 /* Now btrfs_update_device() will change the on-disk size. */
3099 ret = btrfs_update_device(trans, device);
3101 unlock_chunks(root);
3102 btrfs_end_transaction(trans, root);
3105 WARN_ON(diff > old_total);
3106 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3107 unlock_chunks(root);
3108 btrfs_end_transaction(trans, root);
3110 btrfs_free_path(path);
3114 static int btrfs_add_system_chunk(struct btrfs_root *root,
3115 struct btrfs_key *key,
3116 struct btrfs_chunk *chunk, int item_size)
3118 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3119 struct btrfs_disk_key disk_key;
3123 array_size = btrfs_super_sys_array_size(super_copy);
3124 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3127 ptr = super_copy->sys_chunk_array + array_size;
3128 btrfs_cpu_key_to_disk(&disk_key, key);
3129 memcpy(ptr, &disk_key, sizeof(disk_key));
3130 ptr += sizeof(disk_key);
3131 memcpy(ptr, chunk, item_size);
3132 item_size += sizeof(disk_key);
3133 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3138 * sort the devices in descending order by max_avail, total_avail
3140 static int btrfs_cmp_device_info(const void *a, const void *b)
3142 const struct btrfs_device_info *di_a = a;
3143 const struct btrfs_device_info *di_b = b;
3145 if (di_a->max_avail > di_b->max_avail)
3147 if (di_a->max_avail < di_b->max_avail)
3149 if (di_a->total_avail > di_b->total_avail)
3151 if (di_a->total_avail < di_b->total_avail)
3156 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3157 struct btrfs_root *extent_root,
3158 struct map_lookup **map_ret,
3159 u64 *num_bytes_out, u64 *stripe_size_out,
3160 u64 start, u64 type)
3162 struct btrfs_fs_info *info = extent_root->fs_info;
3163 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3164 struct list_head *cur;
3165 struct map_lookup *map = NULL;
3166 struct extent_map_tree *em_tree;
3167 struct extent_map *em;
3168 struct btrfs_device_info *devices_info = NULL;
3170 int num_stripes; /* total number of stripes to allocate */
3171 int sub_stripes; /* sub_stripes info for map */
3172 int dev_stripes; /* stripes per dev */
3173 int devs_max; /* max devs to use */
3174 int devs_min; /* min devs needed */
3175 int devs_increment; /* ndevs has to be a multiple of this */
3176 int ncopies; /* how many copies to data has */
3178 u64 max_stripe_size;
3186 BUG_ON(!alloc_profile_is_valid(type, 0));
3188 if (list_empty(&fs_devices->alloc_list))
3195 devs_max = 0; /* 0 == as many as possible */
3199 * define the properties of each RAID type.
3200 * FIXME: move this to a global table and use it in all RAID
3203 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3207 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3209 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3214 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3223 if (type & BTRFS_BLOCK_GROUP_DATA) {
3224 max_stripe_size = 1024 * 1024 * 1024;
3225 max_chunk_size = 10 * max_stripe_size;
3226 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3227 /* for larger filesystems, use larger metadata chunks */
3228 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3229 max_stripe_size = 1024 * 1024 * 1024;
3231 max_stripe_size = 256 * 1024 * 1024;
3232 max_chunk_size = max_stripe_size;
3233 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3234 max_stripe_size = 32 * 1024 * 1024;
3235 max_chunk_size = 2 * max_stripe_size;
3237 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3242 /* we don't want a chunk larger than 10% of writeable space */
3243 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3246 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3251 cur = fs_devices->alloc_list.next;
3254 * in the first pass through the devices list, we gather information
3255 * about the available holes on each device.
3258 while (cur != &fs_devices->alloc_list) {
3259 struct btrfs_device *device;
3263 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3267 if (!device->writeable) {
3269 "btrfs: read-only device in alloc_list\n");
3274 if (!device->in_fs_metadata)
3277 if (device->total_bytes > device->bytes_used)
3278 total_avail = device->total_bytes - device->bytes_used;
3282 /* If there is no space on this device, skip it. */
3283 if (total_avail == 0)
3286 ret = find_free_dev_extent(device,
3287 max_stripe_size * dev_stripes,
3288 &dev_offset, &max_avail);
3289 if (ret && ret != -ENOSPC)
3293 max_avail = max_stripe_size * dev_stripes;
3295 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3298 devices_info[ndevs].dev_offset = dev_offset;
3299 devices_info[ndevs].max_avail = max_avail;
3300 devices_info[ndevs].total_avail = total_avail;
3301 devices_info[ndevs].dev = device;
3306 * now sort the devices by hole size / available space
3308 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3309 btrfs_cmp_device_info, NULL);
3311 /* round down to number of usable stripes */
3312 ndevs -= ndevs % devs_increment;
3314 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3319 if (devs_max && ndevs > devs_max)
3322 * the primary goal is to maximize the number of stripes, so use as many
3323 * devices as possible, even if the stripes are not maximum sized.
3325 stripe_size = devices_info[ndevs-1].max_avail;
3326 num_stripes = ndevs * dev_stripes;
3328 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3329 stripe_size = max_chunk_size * ncopies;
3330 do_div(stripe_size, ndevs);
3333 do_div(stripe_size, dev_stripes);
3335 /* align to BTRFS_STRIPE_LEN */
3336 do_div(stripe_size, BTRFS_STRIPE_LEN);
3337 stripe_size *= BTRFS_STRIPE_LEN;
3339 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3344 map->num_stripes = num_stripes;
3346 for (i = 0; i < ndevs; ++i) {
3347 for (j = 0; j < dev_stripes; ++j) {
3348 int s = i * dev_stripes + j;
3349 map->stripes[s].dev = devices_info[i].dev;
3350 map->stripes[s].physical = devices_info[i].dev_offset +
3354 map->sector_size = extent_root->sectorsize;
3355 map->stripe_len = BTRFS_STRIPE_LEN;
3356 map->io_align = BTRFS_STRIPE_LEN;
3357 map->io_width = BTRFS_STRIPE_LEN;
3359 map->sub_stripes = sub_stripes;
3362 num_bytes = stripe_size * (num_stripes / ncopies);
3364 *stripe_size_out = stripe_size;
3365 *num_bytes_out = num_bytes;
3367 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3369 em = alloc_extent_map();
3374 em->bdev = (struct block_device *)map;
3376 em->len = num_bytes;
3377 em->block_start = 0;
3378 em->block_len = em->len;
3380 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3381 write_lock(&em_tree->lock);
3382 ret = add_extent_mapping(em_tree, em);
3383 write_unlock(&em_tree->lock);
3384 free_extent_map(em);
3388 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3389 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3394 for (i = 0; i < map->num_stripes; ++i) {
3395 struct btrfs_device *device;
3398 device = map->stripes[i].dev;
3399 dev_offset = map->stripes[i].physical;
3401 ret = btrfs_alloc_dev_extent(trans, device,
3402 info->chunk_root->root_key.objectid,
3403 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3404 start, dev_offset, stripe_size);
3406 btrfs_abort_transaction(trans, extent_root, ret);
3411 kfree(devices_info);
3416 kfree(devices_info);
3420 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3421 struct btrfs_root *extent_root,
3422 struct map_lookup *map, u64 chunk_offset,
3423 u64 chunk_size, u64 stripe_size)
3426 struct btrfs_key key;
3427 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3428 struct btrfs_device *device;
3429 struct btrfs_chunk *chunk;
3430 struct btrfs_stripe *stripe;
3431 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3435 chunk = kzalloc(item_size, GFP_NOFS);
3440 while (index < map->num_stripes) {
3441 device = map->stripes[index].dev;
3442 device->bytes_used += stripe_size;
3443 ret = btrfs_update_device(trans, device);
3449 spin_lock(&extent_root->fs_info->free_chunk_lock);
3450 extent_root->fs_info->free_chunk_space -= (stripe_size *
3452 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3455 stripe = &chunk->stripe;
3456 while (index < map->num_stripes) {
3457 device = map->stripes[index].dev;
3458 dev_offset = map->stripes[index].physical;
3460 btrfs_set_stack_stripe_devid(stripe, device->devid);
3461 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3462 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3467 btrfs_set_stack_chunk_length(chunk, chunk_size);
3468 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3469 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3470 btrfs_set_stack_chunk_type(chunk, map->type);
3471 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3472 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3473 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3474 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3475 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3477 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3478 key.type = BTRFS_CHUNK_ITEM_KEY;
3479 key.offset = chunk_offset;
3481 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3483 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3485 * TODO: Cleanup of inserted chunk root in case of
3488 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3498 * Chunk allocation falls into two parts. The first part does works
3499 * that make the new allocated chunk useable, but not do any operation
3500 * that modifies the chunk tree. The second part does the works that
3501 * require modifying the chunk tree. This division is important for the
3502 * bootstrap process of adding storage to a seed btrfs.
3504 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3505 struct btrfs_root *extent_root, u64 type)
3510 struct map_lookup *map;
3511 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3514 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3519 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3520 &stripe_size, chunk_offset, type);
3524 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3525 chunk_size, stripe_size);
3531 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3532 struct btrfs_root *root,
3533 struct btrfs_device *device)
3536 u64 sys_chunk_offset;
3540 u64 sys_stripe_size;
3542 struct map_lookup *map;
3543 struct map_lookup *sys_map;
3544 struct btrfs_fs_info *fs_info = root->fs_info;
3545 struct btrfs_root *extent_root = fs_info->extent_root;
3548 ret = find_next_chunk(fs_info->chunk_root,
3549 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3553 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3554 fs_info->avail_metadata_alloc_bits;
3555 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3557 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3558 &stripe_size, chunk_offset, alloc_profile);
3562 sys_chunk_offset = chunk_offset + chunk_size;
3564 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3565 fs_info->avail_system_alloc_bits;
3566 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3568 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3569 &sys_chunk_size, &sys_stripe_size,
3570 sys_chunk_offset, alloc_profile);
3574 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3579 * Modifying chunk tree needs allocating new blocks from both
3580 * system block group and metadata block group. So we only can
3581 * do operations require modifying the chunk tree after both
3582 * block groups were created.
3584 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3585 chunk_size, stripe_size);
3589 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3590 sys_chunk_offset, sys_chunk_size,
3598 btrfs_abort_transaction(trans, root, ret);
3602 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3604 struct extent_map *em;
3605 struct map_lookup *map;
3606 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3610 read_lock(&map_tree->map_tree.lock);
3611 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3612 read_unlock(&map_tree->map_tree.lock);
3616 if (btrfs_test_opt(root, DEGRADED)) {
3617 free_extent_map(em);
3621 map = (struct map_lookup *)em->bdev;
3622 for (i = 0; i < map->num_stripes; i++) {
3623 if (!map->stripes[i].dev->writeable) {
3628 free_extent_map(em);
3632 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3634 extent_map_tree_init(&tree->map_tree);
3637 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3639 struct extent_map *em;
3642 write_lock(&tree->map_tree.lock);
3643 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3645 remove_extent_mapping(&tree->map_tree, em);
3646 write_unlock(&tree->map_tree.lock);
3651 free_extent_map(em);
3652 /* once for the tree */
3653 free_extent_map(em);
3657 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3659 struct extent_map *em;
3660 struct map_lookup *map;
3661 struct extent_map_tree *em_tree = &map_tree->map_tree;
3664 read_lock(&em_tree->lock);
3665 em = lookup_extent_mapping(em_tree, logical, len);
3666 read_unlock(&em_tree->lock);
3669 BUG_ON(em->start > logical || em->start + em->len < logical);
3670 map = (struct map_lookup *)em->bdev;
3671 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3672 ret = map->num_stripes;
3673 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3674 ret = map->sub_stripes;
3677 free_extent_map(em);
3681 static int find_live_mirror(struct map_lookup *map, int first, int num,
3685 if (map->stripes[optimal].dev->bdev)
3687 for (i = first; i < first + num; i++) {
3688 if (map->stripes[i].dev->bdev)
3691 /* we couldn't find one that doesn't fail. Just return something
3692 * and the io error handling code will clean up eventually
3697 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3698 u64 logical, u64 *length,
3699 struct btrfs_bio **bbio_ret,
3702 struct extent_map *em;
3703 struct map_lookup *map;
3704 struct extent_map_tree *em_tree = &map_tree->map_tree;
3707 u64 stripe_end_offset;
3716 struct btrfs_bio *bbio = NULL;
3718 read_lock(&em_tree->lock);
3719 em = lookup_extent_mapping(em_tree, logical, *length);
3720 read_unlock(&em_tree->lock);
3723 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3724 (unsigned long long)logical,
3725 (unsigned long long)*length);
3729 BUG_ON(em->start > logical || em->start + em->len < logical);
3730 map = (struct map_lookup *)em->bdev;
3731 offset = logical - em->start;
3733 if (mirror_num > map->num_stripes)
3738 * stripe_nr counts the total number of stripes we have to stride
3739 * to get to this block
3741 do_div(stripe_nr, map->stripe_len);
3743 stripe_offset = stripe_nr * map->stripe_len;
3744 BUG_ON(offset < stripe_offset);
3746 /* stripe_offset is the offset of this block in its stripe*/
3747 stripe_offset = offset - stripe_offset;
3749 if (rw & REQ_DISCARD)
3750 *length = min_t(u64, em->len - offset, *length);
3751 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3752 /* we limit the length of each bio to what fits in a stripe */
3753 *length = min_t(u64, em->len - offset,
3754 map->stripe_len - stripe_offset);
3756 *length = em->len - offset;
3764 stripe_nr_orig = stripe_nr;
3765 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3766 (~(map->stripe_len - 1));
3767 do_div(stripe_nr_end, map->stripe_len);
3768 stripe_end_offset = stripe_nr_end * map->stripe_len -
3770 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3771 if (rw & REQ_DISCARD)
3772 num_stripes = min_t(u64, map->num_stripes,
3773 stripe_nr_end - stripe_nr_orig);
3774 stripe_index = do_div(stripe_nr, map->num_stripes);
3775 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3776 if (rw & (REQ_WRITE | REQ_DISCARD))
3777 num_stripes = map->num_stripes;
3778 else if (mirror_num)
3779 stripe_index = mirror_num - 1;
3781 stripe_index = find_live_mirror(map, 0,
3783 current->pid % map->num_stripes);
3784 mirror_num = stripe_index + 1;
3787 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3788 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3789 num_stripes = map->num_stripes;
3790 } else if (mirror_num) {
3791 stripe_index = mirror_num - 1;
3796 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3797 int factor = map->num_stripes / map->sub_stripes;
3799 stripe_index = do_div(stripe_nr, factor);
3800 stripe_index *= map->sub_stripes;
3803 num_stripes = map->sub_stripes;
3804 else if (rw & REQ_DISCARD)
3805 num_stripes = min_t(u64, map->sub_stripes *
3806 (stripe_nr_end - stripe_nr_orig),
3808 else if (mirror_num)
3809 stripe_index += mirror_num - 1;
3811 int old_stripe_index = stripe_index;
3812 stripe_index = find_live_mirror(map, stripe_index,
3813 map->sub_stripes, stripe_index +
3814 current->pid % map->sub_stripes);
3815 mirror_num = stripe_index - old_stripe_index + 1;
3819 * after this do_div call, stripe_nr is the number of stripes
3820 * on this device we have to walk to find the data, and
3821 * stripe_index is the number of our device in the stripe array
3823 stripe_index = do_div(stripe_nr, map->num_stripes);
3824 mirror_num = stripe_index + 1;
3826 BUG_ON(stripe_index >= map->num_stripes);
3828 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3833 atomic_set(&bbio->error, 0);
3835 if (rw & REQ_DISCARD) {
3837 int sub_stripes = 0;
3838 u64 stripes_per_dev = 0;
3839 u32 remaining_stripes = 0;
3840 u32 last_stripe = 0;
3843 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3844 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3847 sub_stripes = map->sub_stripes;
3849 factor = map->num_stripes / sub_stripes;
3850 stripes_per_dev = div_u64_rem(stripe_nr_end -
3853 &remaining_stripes);
3854 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3855 last_stripe *= sub_stripes;
3858 for (i = 0; i < num_stripes; i++) {
3859 bbio->stripes[i].physical =
3860 map->stripes[stripe_index].physical +
3861 stripe_offset + stripe_nr * map->stripe_len;
3862 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3864 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3865 BTRFS_BLOCK_GROUP_RAID10)) {
3866 bbio->stripes[i].length = stripes_per_dev *
3869 if (i / sub_stripes < remaining_stripes)
3870 bbio->stripes[i].length +=
3874 * Special for the first stripe and
3877 * |-------|...|-------|
3881 if (i < sub_stripes)
3882 bbio->stripes[i].length -=
3885 if (stripe_index >= last_stripe &&
3886 stripe_index <= (last_stripe +
3888 bbio->stripes[i].length -=
3891 if (i == sub_stripes - 1)
3894 bbio->stripes[i].length = *length;
3897 if (stripe_index == map->num_stripes) {
3898 /* This could only happen for RAID0/10 */
3904 for (i = 0; i < num_stripes; i++) {
3905 bbio->stripes[i].physical =
3906 map->stripes[stripe_index].physical +
3908 stripe_nr * map->stripe_len;
3909 bbio->stripes[i].dev =
3910 map->stripes[stripe_index].dev;
3915 if (rw & REQ_WRITE) {
3916 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3917 BTRFS_BLOCK_GROUP_RAID10 |
3918 BTRFS_BLOCK_GROUP_DUP)) {
3924 bbio->num_stripes = num_stripes;
3925 bbio->max_errors = max_errors;
3926 bbio->mirror_num = mirror_num;
3928 free_extent_map(em);
3932 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3933 u64 logical, u64 *length,
3934 struct btrfs_bio **bbio_ret, int mirror_num)
3936 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3940 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3941 u64 chunk_start, u64 physical, u64 devid,
3942 u64 **logical, int *naddrs, int *stripe_len)
3944 struct extent_map_tree *em_tree = &map_tree->map_tree;
3945 struct extent_map *em;
3946 struct map_lookup *map;
3953 read_lock(&em_tree->lock);
3954 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3955 read_unlock(&em_tree->lock);
3957 BUG_ON(!em || em->start != chunk_start);
3958 map = (struct map_lookup *)em->bdev;
3961 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3962 do_div(length, map->num_stripes / map->sub_stripes);
3963 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3964 do_div(length, map->num_stripes);
3966 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3967 BUG_ON(!buf); /* -ENOMEM */
3969 for (i = 0; i < map->num_stripes; i++) {
3970 if (devid && map->stripes[i].dev->devid != devid)
3972 if (map->stripes[i].physical > physical ||
3973 map->stripes[i].physical + length <= physical)
3976 stripe_nr = physical - map->stripes[i].physical;
3977 do_div(stripe_nr, map->stripe_len);
3979 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3980 stripe_nr = stripe_nr * map->num_stripes + i;
3981 do_div(stripe_nr, map->sub_stripes);
3982 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3983 stripe_nr = stripe_nr * map->num_stripes + i;
3985 bytenr = chunk_start + stripe_nr * map->stripe_len;
3986 WARN_ON(nr >= map->num_stripes);
3987 for (j = 0; j < nr; j++) {
3988 if (buf[j] == bytenr)
3992 WARN_ON(nr >= map->num_stripes);
3999 *stripe_len = map->stripe_len;
4001 free_extent_map(em);
4005 static void *merge_stripe_index_into_bio_private(void *bi_private,
4006 unsigned int stripe_index)
4009 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4011 * The alternative solution (instead of stealing bits from the
4012 * pointer) would be to allocate an intermediate structure
4013 * that contains the old private pointer plus the stripe_index.
4015 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4016 BUG_ON(stripe_index > 3);
4017 return (void *)(((uintptr_t)bi_private) | stripe_index);
4020 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4022 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4025 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4027 return (unsigned int)((uintptr_t)bi_private) & 3;
4030 static void btrfs_end_bio(struct bio *bio, int err)
4032 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4033 int is_orig_bio = 0;
4036 atomic_inc(&bbio->error);
4037 if (err == -EIO || err == -EREMOTEIO) {
4038 unsigned int stripe_index =
4039 extract_stripe_index_from_bio_private(
4041 struct btrfs_device *dev;
4043 BUG_ON(stripe_index >= bbio->num_stripes);
4044 dev = bbio->stripes[stripe_index].dev;
4045 if (bio->bi_rw & WRITE)
4046 btrfs_dev_stat_inc(dev,
4047 BTRFS_DEV_STAT_WRITE_ERRS);
4049 btrfs_dev_stat_inc(dev,
4050 BTRFS_DEV_STAT_READ_ERRS);
4051 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4052 btrfs_dev_stat_inc(dev,
4053 BTRFS_DEV_STAT_FLUSH_ERRS);
4054 btrfs_dev_stat_print_on_error(dev);
4058 if (bio == bbio->orig_bio)
4061 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4064 bio = bbio->orig_bio;
4066 bio->bi_private = bbio->private;
4067 bio->bi_end_io = bbio->end_io;
4068 bio->bi_bdev = (struct block_device *)
4069 (unsigned long)bbio->mirror_num;
4070 /* only send an error to the higher layers if it is
4071 * beyond the tolerance of the multi-bio
4073 if (atomic_read(&bbio->error) > bbio->max_errors) {
4077 * this bio is actually up to date, we didn't
4078 * go over the max number of errors
4080 set_bit(BIO_UPTODATE, &bio->bi_flags);
4085 bio_endio(bio, err);
4086 } else if (!is_orig_bio) {
4091 struct async_sched {
4094 struct btrfs_fs_info *info;
4095 struct btrfs_work work;
4099 * see run_scheduled_bios for a description of why bios are collected for
4102 * This will add one bio to the pending list for a device and make sure
4103 * the work struct is scheduled.
4105 static noinline void schedule_bio(struct btrfs_root *root,
4106 struct btrfs_device *device,
4107 int rw, struct bio *bio)
4109 int should_queue = 1;
4110 struct btrfs_pending_bios *pending_bios;
4112 /* don't bother with additional async steps for reads, right now */
4113 if (!(rw & REQ_WRITE)) {
4115 btrfsic_submit_bio(rw, bio);
4121 * nr_async_bios allows us to reliably return congestion to the
4122 * higher layers. Otherwise, the async bio makes it appear we have
4123 * made progress against dirty pages when we've really just put it
4124 * on a queue for later
4126 atomic_inc(&root->fs_info->nr_async_bios);
4127 WARN_ON(bio->bi_next);
4128 bio->bi_next = NULL;
4131 spin_lock(&device->io_lock);
4132 if (bio->bi_rw & REQ_SYNC)
4133 pending_bios = &device->pending_sync_bios;
4135 pending_bios = &device->pending_bios;
4137 if (pending_bios->tail)
4138 pending_bios->tail->bi_next = bio;
4140 pending_bios->tail = bio;
4141 if (!pending_bios->head)
4142 pending_bios->head = bio;
4143 if (device->running_pending)
4146 spin_unlock(&device->io_lock);
4149 btrfs_queue_worker(&root->fs_info->submit_workers,
4153 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4154 int mirror_num, int async_submit)
4156 struct btrfs_mapping_tree *map_tree;
4157 struct btrfs_device *dev;
4158 struct bio *first_bio = bio;
4159 u64 logical = (u64)bio->bi_sector << 9;
4165 struct btrfs_bio *bbio = NULL;
4167 length = bio->bi_size;
4168 map_tree = &root->fs_info->mapping_tree;
4169 map_length = length;
4171 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4173 if (ret) /* -ENOMEM */
4176 total_devs = bbio->num_stripes;
4177 if (map_length < length) {
4178 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
4179 "len %llu\n", (unsigned long long)logical,
4180 (unsigned long long)length,
4181 (unsigned long long)map_length);
4185 bbio->orig_bio = first_bio;
4186 bbio->private = first_bio->bi_private;
4187 bbio->end_io = first_bio->bi_end_io;
4188 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4190 while (dev_nr < total_devs) {
4191 if (dev_nr < total_devs - 1) {
4192 bio = bio_clone(first_bio, GFP_NOFS);
4193 BUG_ON(!bio); /* -ENOMEM */
4197 bio->bi_private = bbio;
4198 bio->bi_private = merge_stripe_index_into_bio_private(
4199 bio->bi_private, (unsigned int)dev_nr);
4200 bio->bi_end_io = btrfs_end_bio;
4201 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4202 dev = bbio->stripes[dev_nr].dev;
4203 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4204 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4205 "(%s id %llu), size=%u\n", rw,
4206 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4207 dev->name, dev->devid, bio->bi_size);
4208 bio->bi_bdev = dev->bdev;
4210 schedule_bio(root, dev, rw, bio);
4212 btrfsic_submit_bio(rw, bio);
4214 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4215 bio->bi_sector = logical >> 9;
4216 bio_endio(bio, -EIO);
4223 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4226 struct btrfs_device *device;
4227 struct btrfs_fs_devices *cur_devices;
4229 cur_devices = root->fs_info->fs_devices;
4230 while (cur_devices) {
4232 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4233 device = __find_device(&cur_devices->devices,
4238 cur_devices = cur_devices->seed;
4243 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4244 u64 devid, u8 *dev_uuid)
4246 struct btrfs_device *device;
4247 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4249 device = kzalloc(sizeof(*device), GFP_NOFS);
4252 list_add(&device->dev_list,
4253 &fs_devices->devices);
4254 device->dev_root = root->fs_info->dev_root;
4255 device->devid = devid;
4256 device->work.func = pending_bios_fn;
4257 device->fs_devices = fs_devices;
4258 device->missing = 1;
4259 fs_devices->num_devices++;
4260 fs_devices->missing_devices++;
4261 spin_lock_init(&device->io_lock);
4262 INIT_LIST_HEAD(&device->dev_alloc_list);
4263 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4267 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4268 struct extent_buffer *leaf,
4269 struct btrfs_chunk *chunk)
4271 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4272 struct map_lookup *map;
4273 struct extent_map *em;
4277 u8 uuid[BTRFS_UUID_SIZE];
4282 logical = key->offset;
4283 length = btrfs_chunk_length(leaf, chunk);
4285 read_lock(&map_tree->map_tree.lock);
4286 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4287 read_unlock(&map_tree->map_tree.lock);
4289 /* already mapped? */
4290 if (em && em->start <= logical && em->start + em->len > logical) {
4291 free_extent_map(em);
4294 free_extent_map(em);
4297 em = alloc_extent_map();
4300 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4301 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4303 free_extent_map(em);
4307 em->bdev = (struct block_device *)map;
4308 em->start = logical;
4310 em->block_start = 0;
4311 em->block_len = em->len;
4313 map->num_stripes = num_stripes;
4314 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4315 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4316 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4317 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4318 map->type = btrfs_chunk_type(leaf, chunk);
4319 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4320 for (i = 0; i < num_stripes; i++) {
4321 map->stripes[i].physical =
4322 btrfs_stripe_offset_nr(leaf, chunk, i);
4323 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4324 read_extent_buffer(leaf, uuid, (unsigned long)
4325 btrfs_stripe_dev_uuid_nr(chunk, i),
4327 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4329 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4331 free_extent_map(em);
4334 if (!map->stripes[i].dev) {
4335 map->stripes[i].dev =
4336 add_missing_dev(root, devid, uuid);
4337 if (!map->stripes[i].dev) {
4339 free_extent_map(em);
4343 map->stripes[i].dev->in_fs_metadata = 1;
4346 write_lock(&map_tree->map_tree.lock);
4347 ret = add_extent_mapping(&map_tree->map_tree, em);
4348 write_unlock(&map_tree->map_tree.lock);
4349 BUG_ON(ret); /* Tree corruption */
4350 free_extent_map(em);
4355 static void fill_device_from_item(struct extent_buffer *leaf,
4356 struct btrfs_dev_item *dev_item,
4357 struct btrfs_device *device)
4361 device->devid = btrfs_device_id(leaf, dev_item);
4362 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4363 device->total_bytes = device->disk_total_bytes;
4364 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4365 device->type = btrfs_device_type(leaf, dev_item);
4366 device->io_align = btrfs_device_io_align(leaf, dev_item);
4367 device->io_width = btrfs_device_io_width(leaf, dev_item);
4368 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4370 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4371 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4374 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4376 struct btrfs_fs_devices *fs_devices;
4379 BUG_ON(!mutex_is_locked(&uuid_mutex));
4381 fs_devices = root->fs_info->fs_devices->seed;
4382 while (fs_devices) {
4383 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4387 fs_devices = fs_devices->seed;
4390 fs_devices = find_fsid(fsid);
4396 fs_devices = clone_fs_devices(fs_devices);
4397 if (IS_ERR(fs_devices)) {
4398 ret = PTR_ERR(fs_devices);
4402 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4403 root->fs_info->bdev_holder);
4405 free_fs_devices(fs_devices);
4409 if (!fs_devices->seeding) {
4410 __btrfs_close_devices(fs_devices);
4411 free_fs_devices(fs_devices);
4416 fs_devices->seed = root->fs_info->fs_devices->seed;
4417 root->fs_info->fs_devices->seed = fs_devices;
4422 static int read_one_dev(struct btrfs_root *root,
4423 struct extent_buffer *leaf,
4424 struct btrfs_dev_item *dev_item)
4426 struct btrfs_device *device;
4429 u8 fs_uuid[BTRFS_UUID_SIZE];
4430 u8 dev_uuid[BTRFS_UUID_SIZE];
4432 devid = btrfs_device_id(leaf, dev_item);
4433 read_extent_buffer(leaf, dev_uuid,
4434 (unsigned long)btrfs_device_uuid(dev_item),
4436 read_extent_buffer(leaf, fs_uuid,
4437 (unsigned long)btrfs_device_fsid(dev_item),
4440 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4441 ret = open_seed_devices(root, fs_uuid);
4442 if (ret && !btrfs_test_opt(root, DEGRADED))
4446 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4447 if (!device || !device->bdev) {
4448 if (!btrfs_test_opt(root, DEGRADED))
4452 printk(KERN_WARNING "warning devid %llu missing\n",
4453 (unsigned long long)devid);
4454 device = add_missing_dev(root, devid, dev_uuid);
4457 } else if (!device->missing) {
4459 * this happens when a device that was properly setup
4460 * in the device info lists suddenly goes bad.
4461 * device->bdev is NULL, and so we have to set
4462 * device->missing to one here
4464 root->fs_info->fs_devices->missing_devices++;
4465 device->missing = 1;
4469 if (device->fs_devices != root->fs_info->fs_devices) {
4470 BUG_ON(device->writeable);
4471 if (device->generation !=
4472 btrfs_device_generation(leaf, dev_item))
4476 fill_device_from_item(leaf, dev_item, device);
4477 device->dev_root = root->fs_info->dev_root;
4478 device->in_fs_metadata = 1;
4479 if (device->writeable) {
4480 device->fs_devices->total_rw_bytes += device->total_bytes;
4481 spin_lock(&root->fs_info->free_chunk_lock);
4482 root->fs_info->free_chunk_space += device->total_bytes -
4484 spin_unlock(&root->fs_info->free_chunk_lock);
4490 int btrfs_read_sys_array(struct btrfs_root *root)
4492 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4493 struct extent_buffer *sb;
4494 struct btrfs_disk_key *disk_key;
4495 struct btrfs_chunk *chunk;
4497 unsigned long sb_ptr;
4503 struct btrfs_key key;
4505 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4506 BTRFS_SUPER_INFO_SIZE);
4509 btrfs_set_buffer_uptodate(sb);
4510 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4512 * The sb extent buffer is artifical and just used to read the system array.
4513 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4514 * pages up-to-date when the page is larger: extent does not cover the
4515 * whole page and consequently check_page_uptodate does not find all
4516 * the page's extents up-to-date (the hole beyond sb),
4517 * write_extent_buffer then triggers a WARN_ON.
4519 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4520 * but sb spans only this function. Add an explicit SetPageUptodate call
4521 * to silence the warning eg. on PowerPC 64.
4523 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4524 SetPageUptodate(sb->pages[0]);
4526 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4527 array_size = btrfs_super_sys_array_size(super_copy);
4529 ptr = super_copy->sys_chunk_array;
4530 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4533 while (cur < array_size) {
4534 disk_key = (struct btrfs_disk_key *)ptr;
4535 btrfs_disk_key_to_cpu(&key, disk_key);
4537 len = sizeof(*disk_key); ptr += len;
4541 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4542 chunk = (struct btrfs_chunk *)sb_ptr;
4543 ret = read_one_chunk(root, &key, sb, chunk);
4546 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4547 len = btrfs_chunk_item_size(num_stripes);
4556 free_extent_buffer(sb);
4560 struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
4561 u64 logical, int mirror_num)
4563 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4566 struct btrfs_bio *bbio = NULL;
4567 struct btrfs_device *device;
4569 BUG_ON(mirror_num == 0);
4570 ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
4573 BUG_ON(bbio != NULL);
4576 BUG_ON(mirror_num != bbio->mirror_num);
4577 device = bbio->stripes[mirror_num - 1].dev;
4582 int btrfs_read_chunk_tree(struct btrfs_root *root)
4584 struct btrfs_path *path;
4585 struct extent_buffer *leaf;
4586 struct btrfs_key key;
4587 struct btrfs_key found_key;
4591 root = root->fs_info->chunk_root;
4593 path = btrfs_alloc_path();
4597 mutex_lock(&uuid_mutex);
4600 /* first we search for all of the device items, and then we
4601 * read in all of the chunk items. This way we can create chunk
4602 * mappings that reference all of the devices that are afound
4604 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4608 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4612 leaf = path->nodes[0];
4613 slot = path->slots[0];
4614 if (slot >= btrfs_header_nritems(leaf)) {
4615 ret = btrfs_next_leaf(root, path);
4622 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4623 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4624 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4626 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4627 struct btrfs_dev_item *dev_item;
4628 dev_item = btrfs_item_ptr(leaf, slot,
4629 struct btrfs_dev_item);
4630 ret = read_one_dev(root, leaf, dev_item);
4634 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4635 struct btrfs_chunk *chunk;
4636 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4637 ret = read_one_chunk(root, &found_key, leaf, chunk);
4643 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4645 btrfs_release_path(path);
4650 unlock_chunks(root);
4651 mutex_unlock(&uuid_mutex);
4653 btrfs_free_path(path);
4657 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
4659 btrfs_dev_stat_inc(dev, index);
4660 btrfs_dev_stat_print_on_error(dev);
4663 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
4665 printk_ratelimited(KERN_ERR
4666 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4668 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4669 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4670 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4671 btrfs_dev_stat_read(dev,
4672 BTRFS_DEV_STAT_CORRUPTION_ERRS),
4673 btrfs_dev_stat_read(dev,
4674 BTRFS_DEV_STAT_GENERATION_ERRS));
4677 int btrfs_get_dev_stats(struct btrfs_root *root,
4678 struct btrfs_ioctl_get_dev_stats *stats,
4679 int reset_after_read)
4681 struct btrfs_device *dev;
4682 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4685 mutex_lock(&fs_devices->device_list_mutex);
4686 dev = btrfs_find_device(root, stats->devid, NULL, NULL);
4687 mutex_unlock(&fs_devices->device_list_mutex);
4691 "btrfs: get dev_stats failed, device not found\n");
4693 } else if (reset_after_read) {
4694 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4695 if (stats->nr_items > i)
4697 btrfs_dev_stat_read_and_reset(dev, i);
4699 btrfs_dev_stat_reset(dev, i);
4702 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4703 if (stats->nr_items > i)
4704 stats->values[i] = btrfs_dev_stat_read(dev, i);
4706 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
4707 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;