2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/kthread.h>
27 #include <asm/div64.h>
30 #include "extent_map.h"
32 #include "transaction.h"
33 #include "print-tree.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
38 static int init_first_rw_device(struct btrfs_trans_handle *trans,
39 struct btrfs_root *root,
40 struct btrfs_device *device);
41 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
43 static DEFINE_MUTEX(uuid_mutex);
44 static LIST_HEAD(fs_uuids);
46 static void lock_chunks(struct btrfs_root *root)
48 mutex_lock(&root->fs_info->chunk_mutex);
51 static void unlock_chunks(struct btrfs_root *root)
53 mutex_unlock(&root->fs_info->chunk_mutex);
56 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
58 struct btrfs_device *device;
59 WARN_ON(fs_devices->opened);
60 while (!list_empty(&fs_devices->devices)) {
61 device = list_entry(fs_devices->devices.next,
62 struct btrfs_device, dev_list);
63 list_del(&device->dev_list);
70 int btrfs_cleanup_fs_uuids(void)
72 struct btrfs_fs_devices *fs_devices;
74 while (!list_empty(&fs_uuids)) {
75 fs_devices = list_entry(fs_uuids.next,
76 struct btrfs_fs_devices, list);
77 list_del(&fs_devices->list);
78 free_fs_devices(fs_devices);
83 static noinline struct btrfs_device *__find_device(struct list_head *head,
86 struct btrfs_device *dev;
88 list_for_each_entry(dev, head, dev_list) {
89 if (dev->devid == devid &&
90 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
97 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
99 struct btrfs_fs_devices *fs_devices;
101 list_for_each_entry(fs_devices, &fs_uuids, list) {
102 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
108 static void requeue_list(struct btrfs_pending_bios *pending_bios,
109 struct bio *head, struct bio *tail)
112 struct bio *old_head;
114 old_head = pending_bios->head;
115 pending_bios->head = head;
116 if (pending_bios->tail)
117 tail->bi_next = old_head;
119 pending_bios->tail = tail;
123 * we try to collect pending bios for a device so we don't get a large
124 * number of procs sending bios down to the same device. This greatly
125 * improves the schedulers ability to collect and merge the bios.
127 * But, it also turns into a long list of bios to process and that is sure
128 * to eventually make the worker thread block. The solution here is to
129 * make some progress and then put this work struct back at the end of
130 * the list if the block device is congested. This way, multiple devices
131 * can make progress from a single worker thread.
133 static noinline int run_scheduled_bios(struct btrfs_device *device)
136 struct backing_dev_info *bdi;
137 struct btrfs_fs_info *fs_info;
138 struct btrfs_pending_bios *pending_bios;
142 unsigned long num_run;
143 unsigned long batch_run = 0;
145 unsigned long last_waited = 0;
147 int sync_pending = 0;
148 struct blk_plug plug;
151 * this function runs all the bios we've collected for
152 * a particular device. We don't want to wander off to
153 * another device without first sending all of these down.
154 * So, setup a plug here and finish it off before we return
156 blk_start_plug(&plug);
158 bdi = blk_get_backing_dev_info(device->bdev);
159 fs_info = device->dev_root->fs_info;
160 limit = btrfs_async_submit_limit(fs_info);
161 limit = limit * 2 / 3;
164 spin_lock(&device->io_lock);
169 /* take all the bios off the list at once and process them
170 * later on (without the lock held). But, remember the
171 * tail and other pointers so the bios can be properly reinserted
172 * into the list if we hit congestion
174 if (!force_reg && device->pending_sync_bios.head) {
175 pending_bios = &device->pending_sync_bios;
178 pending_bios = &device->pending_bios;
182 pending = pending_bios->head;
183 tail = pending_bios->tail;
184 WARN_ON(pending && !tail);
187 * if pending was null this time around, no bios need processing
188 * at all and we can stop. Otherwise it'll loop back up again
189 * and do an additional check so no bios are missed.
191 * device->running_pending is used to synchronize with the
194 if (device->pending_sync_bios.head == NULL &&
195 device->pending_bios.head == NULL) {
197 device->running_pending = 0;
200 device->running_pending = 1;
203 pending_bios->head = NULL;
204 pending_bios->tail = NULL;
206 spin_unlock(&device->io_lock);
211 /* we want to work on both lists, but do more bios on the
212 * sync list than the regular list
215 pending_bios != &device->pending_sync_bios &&
216 device->pending_sync_bios.head) ||
217 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
218 device->pending_bios.head)) {
219 spin_lock(&device->io_lock);
220 requeue_list(pending_bios, pending, tail);
225 pending = pending->bi_next;
227 atomic_dec(&fs_info->nr_async_bios);
229 if (atomic_read(&fs_info->nr_async_bios) < limit &&
230 waitqueue_active(&fs_info->async_submit_wait))
231 wake_up(&fs_info->async_submit_wait);
233 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
236 * if we're doing the sync list, record that our
237 * plug has some sync requests on it
239 * If we're doing the regular list and there are
240 * sync requests sitting around, unplug before
243 if (pending_bios == &device->pending_sync_bios) {
245 } else if (sync_pending) {
246 blk_finish_plug(&plug);
247 blk_start_plug(&plug);
251 btrfsic_submit_bio(cur->bi_rw, cur);
258 * we made progress, there is more work to do and the bdi
259 * is now congested. Back off and let other work structs
262 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
263 fs_info->fs_devices->open_devices > 1) {
264 struct io_context *ioc;
266 ioc = current->io_context;
269 * the main goal here is that we don't want to
270 * block if we're going to be able to submit
271 * more requests without blocking.
273 * This code does two great things, it pokes into
274 * the elevator code from a filesystem _and_
275 * it makes assumptions about how batching works.
277 if (ioc && ioc->nr_batch_requests > 0 &&
278 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
280 ioc->last_waited == last_waited)) {
282 * we want to go through our batch of
283 * requests and stop. So, we copy out
284 * the ioc->last_waited time and test
285 * against it before looping
287 last_waited = ioc->last_waited;
292 spin_lock(&device->io_lock);
293 requeue_list(pending_bios, pending, tail);
294 device->running_pending = 1;
296 spin_unlock(&device->io_lock);
297 btrfs_requeue_work(&device->work);
300 /* unplug every 64 requests just for good measure */
301 if (batch_run % 64 == 0) {
302 blk_finish_plug(&plug);
303 blk_start_plug(&plug);
312 spin_lock(&device->io_lock);
313 if (device->pending_bios.head || device->pending_sync_bios.head)
315 spin_unlock(&device->io_lock);
318 blk_finish_plug(&plug);
322 static void pending_bios_fn(struct btrfs_work *work)
324 struct btrfs_device *device;
326 device = container_of(work, struct btrfs_device, work);
327 run_scheduled_bios(device);
330 static noinline int device_list_add(const char *path,
331 struct btrfs_super_block *disk_super,
332 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
334 struct btrfs_device *device;
335 struct btrfs_fs_devices *fs_devices;
336 u64 found_transid = btrfs_super_generation(disk_super);
339 fs_devices = find_fsid(disk_super->fsid);
341 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
344 INIT_LIST_HEAD(&fs_devices->devices);
345 INIT_LIST_HEAD(&fs_devices->alloc_list);
346 list_add(&fs_devices->list, &fs_uuids);
347 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
348 fs_devices->latest_devid = devid;
349 fs_devices->latest_trans = found_transid;
350 mutex_init(&fs_devices->device_list_mutex);
353 device = __find_device(&fs_devices->devices, devid,
354 disk_super->dev_item.uuid);
357 if (fs_devices->opened)
360 device = kzalloc(sizeof(*device), GFP_NOFS);
362 /* we can safely leave the fs_devices entry around */
365 device->devid = devid;
366 device->work.func = pending_bios_fn;
367 memcpy(device->uuid, disk_super->dev_item.uuid,
369 spin_lock_init(&device->io_lock);
370 device->name = kstrdup(path, GFP_NOFS);
375 INIT_LIST_HEAD(&device->dev_alloc_list);
377 /* init readahead state */
378 spin_lock_init(&device->reada_lock);
379 device->reada_curr_zone = NULL;
380 atomic_set(&device->reada_in_flight, 0);
381 device->reada_next = 0;
382 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
383 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
385 mutex_lock(&fs_devices->device_list_mutex);
386 list_add_rcu(&device->dev_list, &fs_devices->devices);
387 mutex_unlock(&fs_devices->device_list_mutex);
389 device->fs_devices = fs_devices;
390 fs_devices->num_devices++;
391 } else if (!device->name || strcmp(device->name, path)) {
392 name = kstrdup(path, GFP_NOFS);
397 if (device->missing) {
398 fs_devices->missing_devices--;
403 if (found_transid > fs_devices->latest_trans) {
404 fs_devices->latest_devid = devid;
405 fs_devices->latest_trans = found_transid;
407 *fs_devices_ret = fs_devices;
411 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
413 struct btrfs_fs_devices *fs_devices;
414 struct btrfs_device *device;
415 struct btrfs_device *orig_dev;
417 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
419 return ERR_PTR(-ENOMEM);
421 INIT_LIST_HEAD(&fs_devices->devices);
422 INIT_LIST_HEAD(&fs_devices->alloc_list);
423 INIT_LIST_HEAD(&fs_devices->list);
424 mutex_init(&fs_devices->device_list_mutex);
425 fs_devices->latest_devid = orig->latest_devid;
426 fs_devices->latest_trans = orig->latest_trans;
427 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
429 /* We have held the volume lock, it is safe to get the devices. */
430 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
431 device = kzalloc(sizeof(*device), GFP_NOFS);
435 device->name = kstrdup(orig_dev->name, GFP_NOFS);
441 device->devid = orig_dev->devid;
442 device->work.func = pending_bios_fn;
443 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
444 spin_lock_init(&device->io_lock);
445 INIT_LIST_HEAD(&device->dev_list);
446 INIT_LIST_HEAD(&device->dev_alloc_list);
448 list_add(&device->dev_list, &fs_devices->devices);
449 device->fs_devices = fs_devices;
450 fs_devices->num_devices++;
454 free_fs_devices(fs_devices);
455 return ERR_PTR(-ENOMEM);
458 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
460 struct btrfs_device *device, *next;
462 struct block_device *latest_bdev = NULL;
463 u64 latest_devid = 0;
464 u64 latest_transid = 0;
466 mutex_lock(&uuid_mutex);
468 /* This is the initialized path, it is safe to release the devices. */
469 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
470 if (device->in_fs_metadata) {
471 if (!latest_transid ||
472 device->generation > latest_transid) {
473 latest_devid = device->devid;
474 latest_transid = device->generation;
475 latest_bdev = device->bdev;
481 blkdev_put(device->bdev, device->mode);
483 fs_devices->open_devices--;
485 if (device->writeable) {
486 list_del_init(&device->dev_alloc_list);
487 device->writeable = 0;
488 fs_devices->rw_devices--;
490 list_del_init(&device->dev_list);
491 fs_devices->num_devices--;
496 if (fs_devices->seed) {
497 fs_devices = fs_devices->seed;
501 fs_devices->latest_bdev = latest_bdev;
502 fs_devices->latest_devid = latest_devid;
503 fs_devices->latest_trans = latest_transid;
505 mutex_unlock(&uuid_mutex);
509 static void __free_device(struct work_struct *work)
511 struct btrfs_device *device;
513 device = container_of(work, struct btrfs_device, rcu_work);
516 blkdev_put(device->bdev, device->mode);
522 static void free_device(struct rcu_head *head)
524 struct btrfs_device *device;
526 device = container_of(head, struct btrfs_device, rcu);
528 INIT_WORK(&device->rcu_work, __free_device);
529 schedule_work(&device->rcu_work);
532 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
534 struct btrfs_device *device;
536 if (--fs_devices->opened > 0)
539 mutex_lock(&fs_devices->device_list_mutex);
540 list_for_each_entry(device, &fs_devices->devices, dev_list) {
541 struct btrfs_device *new_device;
544 fs_devices->open_devices--;
546 if (device->writeable) {
547 list_del_init(&device->dev_alloc_list);
548 fs_devices->rw_devices--;
551 if (device->can_discard)
552 fs_devices->num_can_discard--;
554 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
556 memcpy(new_device, device, sizeof(*new_device));
557 new_device->name = kstrdup(device->name, GFP_NOFS);
558 BUG_ON(device->name && !new_device->name);
559 new_device->bdev = NULL;
560 new_device->writeable = 0;
561 new_device->in_fs_metadata = 0;
562 new_device->can_discard = 0;
563 list_replace_rcu(&device->dev_list, &new_device->dev_list);
565 call_rcu(&device->rcu, free_device);
567 mutex_unlock(&fs_devices->device_list_mutex);
569 WARN_ON(fs_devices->open_devices);
570 WARN_ON(fs_devices->rw_devices);
571 fs_devices->opened = 0;
572 fs_devices->seeding = 0;
577 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
579 struct btrfs_fs_devices *seed_devices = NULL;
582 mutex_lock(&uuid_mutex);
583 ret = __btrfs_close_devices(fs_devices);
584 if (!fs_devices->opened) {
585 seed_devices = fs_devices->seed;
586 fs_devices->seed = NULL;
588 mutex_unlock(&uuid_mutex);
590 while (seed_devices) {
591 fs_devices = seed_devices;
592 seed_devices = fs_devices->seed;
593 __btrfs_close_devices(fs_devices);
594 free_fs_devices(fs_devices);
599 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
600 fmode_t flags, void *holder)
602 struct request_queue *q;
603 struct block_device *bdev;
604 struct list_head *head = &fs_devices->devices;
605 struct btrfs_device *device;
606 struct block_device *latest_bdev = NULL;
607 struct buffer_head *bh;
608 struct btrfs_super_block *disk_super;
609 u64 latest_devid = 0;
610 u64 latest_transid = 0;
617 list_for_each_entry(device, head, dev_list) {
623 bdev = blkdev_get_by_path(device->name, flags, holder);
625 printk(KERN_INFO "open %s failed\n", device->name);
628 set_blocksize(bdev, 4096);
630 bh = btrfs_read_dev_super(bdev);
634 disk_super = (struct btrfs_super_block *)bh->b_data;
635 devid = btrfs_stack_device_id(&disk_super->dev_item);
636 if (devid != device->devid)
639 if (memcmp(device->uuid, disk_super->dev_item.uuid,
643 device->generation = btrfs_super_generation(disk_super);
644 if (!latest_transid || device->generation > latest_transid) {
645 latest_devid = devid;
646 latest_transid = device->generation;
650 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
651 device->writeable = 0;
653 device->writeable = !bdev_read_only(bdev);
657 q = bdev_get_queue(bdev);
658 if (blk_queue_discard(q)) {
659 device->can_discard = 1;
660 fs_devices->num_can_discard++;
664 device->in_fs_metadata = 0;
665 device->mode = flags;
667 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
668 fs_devices->rotating = 1;
670 fs_devices->open_devices++;
671 if (device->writeable) {
672 fs_devices->rw_devices++;
673 list_add(&device->dev_alloc_list,
674 &fs_devices->alloc_list);
682 blkdev_put(bdev, flags);
686 if (fs_devices->open_devices == 0) {
690 fs_devices->seeding = seeding;
691 fs_devices->opened = 1;
692 fs_devices->latest_bdev = latest_bdev;
693 fs_devices->latest_devid = latest_devid;
694 fs_devices->latest_trans = latest_transid;
695 fs_devices->total_rw_bytes = 0;
700 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
701 fmode_t flags, void *holder)
705 mutex_lock(&uuid_mutex);
706 if (fs_devices->opened) {
707 fs_devices->opened++;
710 ret = __btrfs_open_devices(fs_devices, flags, holder);
712 mutex_unlock(&uuid_mutex);
716 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
717 struct btrfs_fs_devices **fs_devices_ret)
719 struct btrfs_super_block *disk_super;
720 struct block_device *bdev;
721 struct buffer_head *bh;
726 mutex_lock(&uuid_mutex);
729 bdev = blkdev_get_by_path(path, flags, holder);
736 ret = set_blocksize(bdev, 4096);
739 bh = btrfs_read_dev_super(bdev);
744 disk_super = (struct btrfs_super_block *)bh->b_data;
745 devid = btrfs_stack_device_id(&disk_super->dev_item);
746 transid = btrfs_super_generation(disk_super);
747 if (disk_super->label[0])
748 printk(KERN_INFO "device label %s ", disk_super->label);
750 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
751 printk(KERN_CONT "devid %llu transid %llu %s\n",
752 (unsigned long long)devid, (unsigned long long)transid, path);
753 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
757 blkdev_put(bdev, flags);
759 mutex_unlock(&uuid_mutex);
763 /* helper to account the used device space in the range */
764 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
765 u64 end, u64 *length)
767 struct btrfs_key key;
768 struct btrfs_root *root = device->dev_root;
769 struct btrfs_dev_extent *dev_extent;
770 struct btrfs_path *path;
774 struct extent_buffer *l;
778 if (start >= device->total_bytes)
781 path = btrfs_alloc_path();
786 key.objectid = device->devid;
788 key.type = BTRFS_DEV_EXTENT_KEY;
790 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
794 ret = btrfs_previous_item(root, path, key.objectid, key.type);
801 slot = path->slots[0];
802 if (slot >= btrfs_header_nritems(l)) {
803 ret = btrfs_next_leaf(root, path);
811 btrfs_item_key_to_cpu(l, &key, slot);
813 if (key.objectid < device->devid)
816 if (key.objectid > device->devid)
819 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
822 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
823 extent_end = key.offset + btrfs_dev_extent_length(l,
825 if (key.offset <= start && extent_end > end) {
826 *length = end - start + 1;
828 } else if (key.offset <= start && extent_end > start)
829 *length += extent_end - start;
830 else if (key.offset > start && extent_end <= end)
831 *length += extent_end - key.offset;
832 else if (key.offset > start && key.offset <= end) {
833 *length += end - key.offset + 1;
835 } else if (key.offset > end)
843 btrfs_free_path(path);
848 * find_free_dev_extent - find free space in the specified device
849 * @device: the device which we search the free space in
850 * @num_bytes: the size of the free space that we need
851 * @start: store the start of the free space.
852 * @len: the size of the free space. that we find, or the size of the max
853 * free space if we don't find suitable free space
855 * this uses a pretty simple search, the expectation is that it is
856 * called very infrequently and that a given device has a small number
859 * @start is used to store the start of the free space if we find. But if we
860 * don't find suitable free space, it will be used to store the start position
861 * of the max free space.
863 * @len is used to store the size of the free space that we find.
864 * But if we don't find suitable free space, it is used to store the size of
865 * the max free space.
867 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
868 u64 *start, u64 *len)
870 struct btrfs_key key;
871 struct btrfs_root *root = device->dev_root;
872 struct btrfs_dev_extent *dev_extent;
873 struct btrfs_path *path;
879 u64 search_end = device->total_bytes;
882 struct extent_buffer *l;
884 /* FIXME use last free of some kind */
886 /* we don't want to overwrite the superblock on the drive,
887 * so we make sure to start at an offset of at least 1MB
889 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
891 max_hole_start = search_start;
895 if (search_start >= search_end) {
900 path = btrfs_alloc_path();
907 key.objectid = device->devid;
908 key.offset = search_start;
909 key.type = BTRFS_DEV_EXTENT_KEY;
911 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
915 ret = btrfs_previous_item(root, path, key.objectid, key.type);
922 slot = path->slots[0];
923 if (slot >= btrfs_header_nritems(l)) {
924 ret = btrfs_next_leaf(root, path);
932 btrfs_item_key_to_cpu(l, &key, slot);
934 if (key.objectid < device->devid)
937 if (key.objectid > device->devid)
940 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
943 if (key.offset > search_start) {
944 hole_size = key.offset - search_start;
946 if (hole_size > max_hole_size) {
947 max_hole_start = search_start;
948 max_hole_size = hole_size;
952 * If this free space is greater than which we need,
953 * it must be the max free space that we have found
954 * until now, so max_hole_start must point to the start
955 * of this free space and the length of this free space
956 * is stored in max_hole_size. Thus, we return
957 * max_hole_start and max_hole_size and go back to the
960 if (hole_size >= num_bytes) {
966 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
967 extent_end = key.offset + btrfs_dev_extent_length(l,
969 if (extent_end > search_start)
970 search_start = extent_end;
977 * At this point, search_start should be the end of
978 * allocated dev extents, and when shrinking the device,
979 * search_end may be smaller than search_start.
981 if (search_end > search_start)
982 hole_size = search_end - search_start;
984 if (hole_size > max_hole_size) {
985 max_hole_start = search_start;
986 max_hole_size = hole_size;
990 if (hole_size < num_bytes)
996 btrfs_free_path(path);
998 *start = max_hole_start;
1000 *len = max_hole_size;
1004 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1005 struct btrfs_device *device,
1009 struct btrfs_path *path;
1010 struct btrfs_root *root = device->dev_root;
1011 struct btrfs_key key;
1012 struct btrfs_key found_key;
1013 struct extent_buffer *leaf = NULL;
1014 struct btrfs_dev_extent *extent = NULL;
1016 path = btrfs_alloc_path();
1020 key.objectid = device->devid;
1022 key.type = BTRFS_DEV_EXTENT_KEY;
1024 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1026 ret = btrfs_previous_item(root, path, key.objectid,
1027 BTRFS_DEV_EXTENT_KEY);
1030 leaf = path->nodes[0];
1031 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1032 extent = btrfs_item_ptr(leaf, path->slots[0],
1033 struct btrfs_dev_extent);
1034 BUG_ON(found_key.offset > start || found_key.offset +
1035 btrfs_dev_extent_length(leaf, extent) < start);
1037 btrfs_release_path(path);
1039 } else if (ret == 0) {
1040 leaf = path->nodes[0];
1041 extent = btrfs_item_ptr(leaf, path->slots[0],
1042 struct btrfs_dev_extent);
1046 if (device->bytes_used > 0) {
1047 u64 len = btrfs_dev_extent_length(leaf, extent);
1048 device->bytes_used -= len;
1049 spin_lock(&root->fs_info->free_chunk_lock);
1050 root->fs_info->free_chunk_space += len;
1051 spin_unlock(&root->fs_info->free_chunk_lock);
1053 ret = btrfs_del_item(trans, root, path);
1056 btrfs_free_path(path);
1060 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1061 struct btrfs_device *device,
1062 u64 chunk_tree, u64 chunk_objectid,
1063 u64 chunk_offset, u64 start, u64 num_bytes)
1066 struct btrfs_path *path;
1067 struct btrfs_root *root = device->dev_root;
1068 struct btrfs_dev_extent *extent;
1069 struct extent_buffer *leaf;
1070 struct btrfs_key key;
1072 WARN_ON(!device->in_fs_metadata);
1073 path = btrfs_alloc_path();
1077 key.objectid = device->devid;
1079 key.type = BTRFS_DEV_EXTENT_KEY;
1080 ret = btrfs_insert_empty_item(trans, root, path, &key,
1084 leaf = path->nodes[0];
1085 extent = btrfs_item_ptr(leaf, path->slots[0],
1086 struct btrfs_dev_extent);
1087 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1088 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1089 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1091 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1092 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1095 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1096 btrfs_mark_buffer_dirty(leaf);
1097 btrfs_free_path(path);
1101 static noinline int find_next_chunk(struct btrfs_root *root,
1102 u64 objectid, u64 *offset)
1104 struct btrfs_path *path;
1106 struct btrfs_key key;
1107 struct btrfs_chunk *chunk;
1108 struct btrfs_key found_key;
1110 path = btrfs_alloc_path();
1114 key.objectid = objectid;
1115 key.offset = (u64)-1;
1116 key.type = BTRFS_CHUNK_ITEM_KEY;
1118 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1124 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1128 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1130 if (found_key.objectid != objectid)
1133 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1134 struct btrfs_chunk);
1135 *offset = found_key.offset +
1136 btrfs_chunk_length(path->nodes[0], chunk);
1141 btrfs_free_path(path);
1145 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1148 struct btrfs_key key;
1149 struct btrfs_key found_key;
1150 struct btrfs_path *path;
1152 root = root->fs_info->chunk_root;
1154 path = btrfs_alloc_path();
1158 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1159 key.type = BTRFS_DEV_ITEM_KEY;
1160 key.offset = (u64)-1;
1162 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1168 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1169 BTRFS_DEV_ITEM_KEY);
1173 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1175 *objectid = found_key.offset + 1;
1179 btrfs_free_path(path);
1184 * the device information is stored in the chunk root
1185 * the btrfs_device struct should be fully filled in
1187 int btrfs_add_device(struct btrfs_trans_handle *trans,
1188 struct btrfs_root *root,
1189 struct btrfs_device *device)
1192 struct btrfs_path *path;
1193 struct btrfs_dev_item *dev_item;
1194 struct extent_buffer *leaf;
1195 struct btrfs_key key;
1198 root = root->fs_info->chunk_root;
1200 path = btrfs_alloc_path();
1204 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1205 key.type = BTRFS_DEV_ITEM_KEY;
1206 key.offset = device->devid;
1208 ret = btrfs_insert_empty_item(trans, root, path, &key,
1213 leaf = path->nodes[0];
1214 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1216 btrfs_set_device_id(leaf, dev_item, device->devid);
1217 btrfs_set_device_generation(leaf, dev_item, 0);
1218 btrfs_set_device_type(leaf, dev_item, device->type);
1219 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1220 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1221 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1222 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1223 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1224 btrfs_set_device_group(leaf, dev_item, 0);
1225 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1226 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1227 btrfs_set_device_start_offset(leaf, dev_item, 0);
1229 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1230 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1231 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1232 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1233 btrfs_mark_buffer_dirty(leaf);
1237 btrfs_free_path(path);
1241 static int btrfs_rm_dev_item(struct btrfs_root *root,
1242 struct btrfs_device *device)
1245 struct btrfs_path *path;
1246 struct btrfs_key key;
1247 struct btrfs_trans_handle *trans;
1249 root = root->fs_info->chunk_root;
1251 path = btrfs_alloc_path();
1255 trans = btrfs_start_transaction(root, 0);
1256 if (IS_ERR(trans)) {
1257 btrfs_free_path(path);
1258 return PTR_ERR(trans);
1260 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1261 key.type = BTRFS_DEV_ITEM_KEY;
1262 key.offset = device->devid;
1265 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1274 ret = btrfs_del_item(trans, root, path);
1278 btrfs_free_path(path);
1279 unlock_chunks(root);
1280 btrfs_commit_transaction(trans, root);
1284 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1286 struct btrfs_device *device;
1287 struct btrfs_device *next_device;
1288 struct block_device *bdev;
1289 struct buffer_head *bh = NULL;
1290 struct btrfs_super_block *disk_super;
1291 struct btrfs_fs_devices *cur_devices;
1297 bool clear_super = false;
1299 mutex_lock(&uuid_mutex);
1301 all_avail = root->fs_info->avail_data_alloc_bits |
1302 root->fs_info->avail_system_alloc_bits |
1303 root->fs_info->avail_metadata_alloc_bits;
1305 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1306 root->fs_info->fs_devices->num_devices <= 4) {
1307 printk(KERN_ERR "btrfs: unable to go below four devices "
1313 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1314 root->fs_info->fs_devices->num_devices <= 2) {
1315 printk(KERN_ERR "btrfs: unable to go below two "
1316 "devices on raid1\n");
1321 if (strcmp(device_path, "missing") == 0) {
1322 struct list_head *devices;
1323 struct btrfs_device *tmp;
1326 devices = &root->fs_info->fs_devices->devices;
1328 * It is safe to read the devices since the volume_mutex
1331 list_for_each_entry(tmp, devices, dev_list) {
1332 if (tmp->in_fs_metadata && !tmp->bdev) {
1341 printk(KERN_ERR "btrfs: no missing devices found to "
1346 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1347 root->fs_info->bdev_holder);
1349 ret = PTR_ERR(bdev);
1353 set_blocksize(bdev, 4096);
1354 bh = btrfs_read_dev_super(bdev);
1359 disk_super = (struct btrfs_super_block *)bh->b_data;
1360 devid = btrfs_stack_device_id(&disk_super->dev_item);
1361 dev_uuid = disk_super->dev_item.uuid;
1362 device = btrfs_find_device(root, devid, dev_uuid,
1370 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1371 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1377 if (device->writeable) {
1379 list_del_init(&device->dev_alloc_list);
1380 unlock_chunks(root);
1381 root->fs_info->fs_devices->rw_devices--;
1385 ret = btrfs_shrink_device(device, 0);
1389 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1393 spin_lock(&root->fs_info->free_chunk_lock);
1394 root->fs_info->free_chunk_space = device->total_bytes -
1396 spin_unlock(&root->fs_info->free_chunk_lock);
1398 device->in_fs_metadata = 0;
1399 btrfs_scrub_cancel_dev(root, device);
1402 * the device list mutex makes sure that we don't change
1403 * the device list while someone else is writing out all
1404 * the device supers.
1407 cur_devices = device->fs_devices;
1408 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1409 list_del_rcu(&device->dev_list);
1411 device->fs_devices->num_devices--;
1413 if (device->missing)
1414 root->fs_info->fs_devices->missing_devices--;
1416 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1417 struct btrfs_device, dev_list);
1418 if (device->bdev == root->fs_info->sb->s_bdev)
1419 root->fs_info->sb->s_bdev = next_device->bdev;
1420 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1421 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1424 device->fs_devices->open_devices--;
1426 call_rcu(&device->rcu, free_device);
1427 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1429 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1430 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1432 if (cur_devices->open_devices == 0) {
1433 struct btrfs_fs_devices *fs_devices;
1434 fs_devices = root->fs_info->fs_devices;
1435 while (fs_devices) {
1436 if (fs_devices->seed == cur_devices)
1438 fs_devices = fs_devices->seed;
1440 fs_devices->seed = cur_devices->seed;
1441 cur_devices->seed = NULL;
1443 __btrfs_close_devices(cur_devices);
1444 unlock_chunks(root);
1445 free_fs_devices(cur_devices);
1449 * at this point, the device is zero sized. We want to
1450 * remove it from the devices list and zero out the old super
1453 /* make sure this device isn't detected as part of
1456 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1457 set_buffer_dirty(bh);
1458 sync_dirty_buffer(bh);
1467 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1469 mutex_unlock(&uuid_mutex);
1472 if (device->writeable) {
1474 list_add(&device->dev_alloc_list,
1475 &root->fs_info->fs_devices->alloc_list);
1476 unlock_chunks(root);
1477 root->fs_info->fs_devices->rw_devices++;
1483 * does all the dirty work required for changing file system's UUID.
1485 static int btrfs_prepare_sprout(struct btrfs_root *root)
1487 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1488 struct btrfs_fs_devices *old_devices;
1489 struct btrfs_fs_devices *seed_devices;
1490 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1491 struct btrfs_device *device;
1494 BUG_ON(!mutex_is_locked(&uuid_mutex));
1495 if (!fs_devices->seeding)
1498 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1502 old_devices = clone_fs_devices(fs_devices);
1503 if (IS_ERR(old_devices)) {
1504 kfree(seed_devices);
1505 return PTR_ERR(old_devices);
1508 list_add(&old_devices->list, &fs_uuids);
1510 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1511 seed_devices->opened = 1;
1512 INIT_LIST_HEAD(&seed_devices->devices);
1513 INIT_LIST_HEAD(&seed_devices->alloc_list);
1514 mutex_init(&seed_devices->device_list_mutex);
1516 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1517 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1519 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1521 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1522 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1523 device->fs_devices = seed_devices;
1526 fs_devices->seeding = 0;
1527 fs_devices->num_devices = 0;
1528 fs_devices->open_devices = 0;
1529 fs_devices->seed = seed_devices;
1531 generate_random_uuid(fs_devices->fsid);
1532 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1533 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1534 super_flags = btrfs_super_flags(disk_super) &
1535 ~BTRFS_SUPER_FLAG_SEEDING;
1536 btrfs_set_super_flags(disk_super, super_flags);
1542 * strore the expected generation for seed devices in device items.
1544 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1545 struct btrfs_root *root)
1547 struct btrfs_path *path;
1548 struct extent_buffer *leaf;
1549 struct btrfs_dev_item *dev_item;
1550 struct btrfs_device *device;
1551 struct btrfs_key key;
1552 u8 fs_uuid[BTRFS_UUID_SIZE];
1553 u8 dev_uuid[BTRFS_UUID_SIZE];
1557 path = btrfs_alloc_path();
1561 root = root->fs_info->chunk_root;
1562 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1564 key.type = BTRFS_DEV_ITEM_KEY;
1567 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1571 leaf = path->nodes[0];
1573 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1574 ret = btrfs_next_leaf(root, path);
1579 leaf = path->nodes[0];
1580 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1581 btrfs_release_path(path);
1585 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1586 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1587 key.type != BTRFS_DEV_ITEM_KEY)
1590 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1591 struct btrfs_dev_item);
1592 devid = btrfs_device_id(leaf, dev_item);
1593 read_extent_buffer(leaf, dev_uuid,
1594 (unsigned long)btrfs_device_uuid(dev_item),
1596 read_extent_buffer(leaf, fs_uuid,
1597 (unsigned long)btrfs_device_fsid(dev_item),
1599 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1602 if (device->fs_devices->seeding) {
1603 btrfs_set_device_generation(leaf, dev_item,
1604 device->generation);
1605 btrfs_mark_buffer_dirty(leaf);
1613 btrfs_free_path(path);
1617 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1619 struct request_queue *q;
1620 struct btrfs_trans_handle *trans;
1621 struct btrfs_device *device;
1622 struct block_device *bdev;
1623 struct list_head *devices;
1624 struct super_block *sb = root->fs_info->sb;
1626 int seeding_dev = 0;
1629 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1632 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1633 root->fs_info->bdev_holder);
1635 return PTR_ERR(bdev);
1637 if (root->fs_info->fs_devices->seeding) {
1639 down_write(&sb->s_umount);
1640 mutex_lock(&uuid_mutex);
1643 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1645 devices = &root->fs_info->fs_devices->devices;
1647 * we have the volume lock, so we don't need the extra
1648 * device list mutex while reading the list here.
1650 list_for_each_entry(device, devices, dev_list) {
1651 if (device->bdev == bdev) {
1657 device = kzalloc(sizeof(*device), GFP_NOFS);
1659 /* we can safely leave the fs_devices entry around */
1664 device->name = kstrdup(device_path, GFP_NOFS);
1665 if (!device->name) {
1671 ret = find_next_devid(root, &device->devid);
1673 kfree(device->name);
1678 trans = btrfs_start_transaction(root, 0);
1679 if (IS_ERR(trans)) {
1680 kfree(device->name);
1682 ret = PTR_ERR(trans);
1688 q = bdev_get_queue(bdev);
1689 if (blk_queue_discard(q))
1690 device->can_discard = 1;
1691 device->writeable = 1;
1692 device->work.func = pending_bios_fn;
1693 generate_random_uuid(device->uuid);
1694 spin_lock_init(&device->io_lock);
1695 device->generation = trans->transid;
1696 device->io_width = root->sectorsize;
1697 device->io_align = root->sectorsize;
1698 device->sector_size = root->sectorsize;
1699 device->total_bytes = i_size_read(bdev->bd_inode);
1700 device->disk_total_bytes = device->total_bytes;
1701 device->dev_root = root->fs_info->dev_root;
1702 device->bdev = bdev;
1703 device->in_fs_metadata = 1;
1704 device->mode = FMODE_EXCL;
1705 set_blocksize(device->bdev, 4096);
1708 sb->s_flags &= ~MS_RDONLY;
1709 ret = btrfs_prepare_sprout(root);
1713 device->fs_devices = root->fs_info->fs_devices;
1716 * we don't want write_supers to jump in here with our device
1719 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1720 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1721 list_add(&device->dev_alloc_list,
1722 &root->fs_info->fs_devices->alloc_list);
1723 root->fs_info->fs_devices->num_devices++;
1724 root->fs_info->fs_devices->open_devices++;
1725 root->fs_info->fs_devices->rw_devices++;
1726 if (device->can_discard)
1727 root->fs_info->fs_devices->num_can_discard++;
1728 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1730 spin_lock(&root->fs_info->free_chunk_lock);
1731 root->fs_info->free_chunk_space += device->total_bytes;
1732 spin_unlock(&root->fs_info->free_chunk_lock);
1734 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1735 root->fs_info->fs_devices->rotating = 1;
1737 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1738 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1739 total_bytes + device->total_bytes);
1741 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1742 btrfs_set_super_num_devices(root->fs_info->super_copy,
1744 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1747 ret = init_first_rw_device(trans, root, device);
1749 ret = btrfs_finish_sprout(trans, root);
1752 ret = btrfs_add_device(trans, root, device);
1756 * we've got more storage, clear any full flags on the space
1759 btrfs_clear_space_info_full(root->fs_info);
1761 unlock_chunks(root);
1762 btrfs_commit_transaction(trans, root);
1765 mutex_unlock(&uuid_mutex);
1766 up_write(&sb->s_umount);
1768 ret = btrfs_relocate_sys_chunks(root);
1774 blkdev_put(bdev, FMODE_EXCL);
1776 mutex_unlock(&uuid_mutex);
1777 up_write(&sb->s_umount);
1782 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1783 struct btrfs_device *device)
1786 struct btrfs_path *path;
1787 struct btrfs_root *root;
1788 struct btrfs_dev_item *dev_item;
1789 struct extent_buffer *leaf;
1790 struct btrfs_key key;
1792 root = device->dev_root->fs_info->chunk_root;
1794 path = btrfs_alloc_path();
1798 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1799 key.type = BTRFS_DEV_ITEM_KEY;
1800 key.offset = device->devid;
1802 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1811 leaf = path->nodes[0];
1812 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1814 btrfs_set_device_id(leaf, dev_item, device->devid);
1815 btrfs_set_device_type(leaf, dev_item, device->type);
1816 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1817 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1818 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1819 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1820 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1821 btrfs_mark_buffer_dirty(leaf);
1824 btrfs_free_path(path);
1828 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1829 struct btrfs_device *device, u64 new_size)
1831 struct btrfs_super_block *super_copy =
1832 device->dev_root->fs_info->super_copy;
1833 u64 old_total = btrfs_super_total_bytes(super_copy);
1834 u64 diff = new_size - device->total_bytes;
1836 if (!device->writeable)
1838 if (new_size <= device->total_bytes)
1841 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1842 device->fs_devices->total_rw_bytes += diff;
1844 device->total_bytes = new_size;
1845 device->disk_total_bytes = new_size;
1846 btrfs_clear_space_info_full(device->dev_root->fs_info);
1848 return btrfs_update_device(trans, device);
1851 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1852 struct btrfs_device *device, u64 new_size)
1855 lock_chunks(device->dev_root);
1856 ret = __btrfs_grow_device(trans, device, new_size);
1857 unlock_chunks(device->dev_root);
1861 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1862 struct btrfs_root *root,
1863 u64 chunk_tree, u64 chunk_objectid,
1867 struct btrfs_path *path;
1868 struct btrfs_key key;
1870 root = root->fs_info->chunk_root;
1871 path = btrfs_alloc_path();
1875 key.objectid = chunk_objectid;
1876 key.offset = chunk_offset;
1877 key.type = BTRFS_CHUNK_ITEM_KEY;
1879 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1882 ret = btrfs_del_item(trans, root, path);
1884 btrfs_free_path(path);
1888 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1891 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1892 struct btrfs_disk_key *disk_key;
1893 struct btrfs_chunk *chunk;
1900 struct btrfs_key key;
1902 array_size = btrfs_super_sys_array_size(super_copy);
1904 ptr = super_copy->sys_chunk_array;
1907 while (cur < array_size) {
1908 disk_key = (struct btrfs_disk_key *)ptr;
1909 btrfs_disk_key_to_cpu(&key, disk_key);
1911 len = sizeof(*disk_key);
1913 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1914 chunk = (struct btrfs_chunk *)(ptr + len);
1915 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1916 len += btrfs_chunk_item_size(num_stripes);
1921 if (key.objectid == chunk_objectid &&
1922 key.offset == chunk_offset) {
1923 memmove(ptr, ptr + len, array_size - (cur + len));
1925 btrfs_set_super_sys_array_size(super_copy, array_size);
1934 static int btrfs_relocate_chunk(struct btrfs_root *root,
1935 u64 chunk_tree, u64 chunk_objectid,
1938 struct extent_map_tree *em_tree;
1939 struct btrfs_root *extent_root;
1940 struct btrfs_trans_handle *trans;
1941 struct extent_map *em;
1942 struct map_lookup *map;
1946 root = root->fs_info->chunk_root;
1947 extent_root = root->fs_info->extent_root;
1948 em_tree = &root->fs_info->mapping_tree.map_tree;
1950 ret = btrfs_can_relocate(extent_root, chunk_offset);
1954 /* step one, relocate all the extents inside this chunk */
1955 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1959 trans = btrfs_start_transaction(root, 0);
1960 BUG_ON(IS_ERR(trans));
1965 * step two, delete the device extents and the
1966 * chunk tree entries
1968 read_lock(&em_tree->lock);
1969 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1970 read_unlock(&em_tree->lock);
1972 BUG_ON(!em || em->start > chunk_offset ||
1973 em->start + em->len < chunk_offset);
1974 map = (struct map_lookup *)em->bdev;
1976 for (i = 0; i < map->num_stripes; i++) {
1977 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1978 map->stripes[i].physical);
1981 if (map->stripes[i].dev) {
1982 ret = btrfs_update_device(trans, map->stripes[i].dev);
1986 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1991 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
1993 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1994 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1998 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2001 write_lock(&em_tree->lock);
2002 remove_extent_mapping(em_tree, em);
2003 write_unlock(&em_tree->lock);
2008 /* once for the tree */
2009 free_extent_map(em);
2011 free_extent_map(em);
2013 unlock_chunks(root);
2014 btrfs_end_transaction(trans, root);
2018 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2020 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2021 struct btrfs_path *path;
2022 struct extent_buffer *leaf;
2023 struct btrfs_chunk *chunk;
2024 struct btrfs_key key;
2025 struct btrfs_key found_key;
2026 u64 chunk_tree = chunk_root->root_key.objectid;
2028 bool retried = false;
2032 path = btrfs_alloc_path();
2037 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2038 key.offset = (u64)-1;
2039 key.type = BTRFS_CHUNK_ITEM_KEY;
2042 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2047 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2054 leaf = path->nodes[0];
2055 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2057 chunk = btrfs_item_ptr(leaf, path->slots[0],
2058 struct btrfs_chunk);
2059 chunk_type = btrfs_chunk_type(leaf, chunk);
2060 btrfs_release_path(path);
2062 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2063 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2072 if (found_key.offset == 0)
2074 key.offset = found_key.offset - 1;
2077 if (failed && !retried) {
2081 } else if (failed && retried) {
2086 btrfs_free_path(path);
2090 static int insert_balance_item(struct btrfs_root *root,
2091 struct btrfs_balance_control *bctl)
2093 struct btrfs_trans_handle *trans;
2094 struct btrfs_balance_item *item;
2095 struct btrfs_disk_balance_args disk_bargs;
2096 struct btrfs_path *path;
2097 struct extent_buffer *leaf;
2098 struct btrfs_key key;
2101 path = btrfs_alloc_path();
2105 trans = btrfs_start_transaction(root, 0);
2106 if (IS_ERR(trans)) {
2107 btrfs_free_path(path);
2108 return PTR_ERR(trans);
2111 key.objectid = BTRFS_BALANCE_OBJECTID;
2112 key.type = BTRFS_BALANCE_ITEM_KEY;
2115 ret = btrfs_insert_empty_item(trans, root, path, &key,
2120 leaf = path->nodes[0];
2121 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2123 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2125 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2126 btrfs_set_balance_data(leaf, item, &disk_bargs);
2127 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2128 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2129 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2130 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2132 btrfs_set_balance_flags(leaf, item, bctl->flags);
2134 btrfs_mark_buffer_dirty(leaf);
2136 btrfs_free_path(path);
2137 err = btrfs_commit_transaction(trans, root);
2143 static int del_balance_item(struct btrfs_root *root)
2145 struct btrfs_trans_handle *trans;
2146 struct btrfs_path *path;
2147 struct btrfs_key key;
2150 path = btrfs_alloc_path();
2154 trans = btrfs_start_transaction(root, 0);
2155 if (IS_ERR(trans)) {
2156 btrfs_free_path(path);
2157 return PTR_ERR(trans);
2160 key.objectid = BTRFS_BALANCE_OBJECTID;
2161 key.type = BTRFS_BALANCE_ITEM_KEY;
2164 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2172 ret = btrfs_del_item(trans, root, path);
2174 btrfs_free_path(path);
2175 err = btrfs_commit_transaction(trans, root);
2182 * This is a heuristic used to reduce the number of chunks balanced on
2183 * resume after balance was interrupted.
2185 static void update_balance_args(struct btrfs_balance_control *bctl)
2188 * Turn on soft mode for chunk types that were being converted.
2190 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2191 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2192 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2193 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2194 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2195 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2198 * Turn on usage filter if is not already used. The idea is
2199 * that chunks that we have already balanced should be
2200 * reasonably full. Don't do it for chunks that are being
2201 * converted - that will keep us from relocating unconverted
2202 * (albeit full) chunks.
2204 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2205 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2206 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2207 bctl->data.usage = 90;
2209 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2210 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2211 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2212 bctl->sys.usage = 90;
2214 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2215 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2216 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2217 bctl->meta.usage = 90;
2222 * Should be called with both balance and volume mutexes held to
2223 * serialize other volume operations (add_dev/rm_dev/resize) with
2224 * restriper. Same goes for unset_balance_control.
2226 static void set_balance_control(struct btrfs_balance_control *bctl)
2228 struct btrfs_fs_info *fs_info = bctl->fs_info;
2230 BUG_ON(fs_info->balance_ctl);
2232 spin_lock(&fs_info->balance_lock);
2233 fs_info->balance_ctl = bctl;
2234 spin_unlock(&fs_info->balance_lock);
2237 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2239 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2241 BUG_ON(!fs_info->balance_ctl);
2243 spin_lock(&fs_info->balance_lock);
2244 fs_info->balance_ctl = NULL;
2245 spin_unlock(&fs_info->balance_lock);
2251 * Balance filters. Return 1 if chunk should be filtered out
2252 * (should not be balanced).
2254 static int chunk_profiles_filter(u64 chunk_profile,
2255 struct btrfs_balance_args *bargs)
2257 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
2259 if (chunk_profile == 0)
2260 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2262 if (bargs->profiles & chunk_profile)
2268 static u64 div_factor_fine(u64 num, int factor)
2280 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2281 struct btrfs_balance_args *bargs)
2283 struct btrfs_block_group_cache *cache;
2284 u64 chunk_used, user_thresh;
2287 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2288 chunk_used = btrfs_block_group_used(&cache->item);
2290 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2291 if (chunk_used < user_thresh)
2294 btrfs_put_block_group(cache);
2298 static int chunk_devid_filter(struct extent_buffer *leaf,
2299 struct btrfs_chunk *chunk,
2300 struct btrfs_balance_args *bargs)
2302 struct btrfs_stripe *stripe;
2303 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2306 for (i = 0; i < num_stripes; i++) {
2307 stripe = btrfs_stripe_nr(chunk, i);
2308 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2315 /* [pstart, pend) */
2316 static int chunk_drange_filter(struct extent_buffer *leaf,
2317 struct btrfs_chunk *chunk,
2319 struct btrfs_balance_args *bargs)
2321 struct btrfs_stripe *stripe;
2322 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2328 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2331 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2332 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2336 factor = num_stripes / factor;
2338 for (i = 0; i < num_stripes; i++) {
2339 stripe = btrfs_stripe_nr(chunk, i);
2340 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2343 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2344 stripe_length = btrfs_chunk_length(leaf, chunk);
2345 do_div(stripe_length, factor);
2347 if (stripe_offset < bargs->pend &&
2348 stripe_offset + stripe_length > bargs->pstart)
2355 /* [vstart, vend) */
2356 static int chunk_vrange_filter(struct extent_buffer *leaf,
2357 struct btrfs_chunk *chunk,
2359 struct btrfs_balance_args *bargs)
2361 if (chunk_offset < bargs->vend &&
2362 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2363 /* at least part of the chunk is inside this vrange */
2369 static int chunk_soft_convert_filter(u64 chunk_profile,
2370 struct btrfs_balance_args *bargs)
2372 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2375 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
2377 if (chunk_profile == 0)
2378 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2380 if (bargs->target & chunk_profile)
2386 static int should_balance_chunk(struct btrfs_root *root,
2387 struct extent_buffer *leaf,
2388 struct btrfs_chunk *chunk, u64 chunk_offset)
2390 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2391 struct btrfs_balance_args *bargs = NULL;
2392 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2395 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2396 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2400 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2401 bargs = &bctl->data;
2402 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2404 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2405 bargs = &bctl->meta;
2407 /* profiles filter */
2408 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2409 chunk_profiles_filter(chunk_type, bargs)) {
2414 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2415 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2420 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2421 chunk_devid_filter(leaf, chunk, bargs)) {
2425 /* drange filter, makes sense only with devid filter */
2426 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2427 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2432 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2433 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2437 /* soft profile changing mode */
2438 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2439 chunk_soft_convert_filter(chunk_type, bargs)) {
2446 static u64 div_factor(u64 num, int factor)
2455 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2457 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2458 struct btrfs_root *chunk_root = fs_info->chunk_root;
2459 struct btrfs_root *dev_root = fs_info->dev_root;
2460 struct list_head *devices;
2461 struct btrfs_device *device;
2464 struct btrfs_chunk *chunk;
2465 struct btrfs_path *path;
2466 struct btrfs_key key;
2467 struct btrfs_key found_key;
2468 struct btrfs_trans_handle *trans;
2469 struct extent_buffer *leaf;
2472 int enospc_errors = 0;
2473 bool counting = true;
2475 /* step one make some room on all the devices */
2476 devices = &fs_info->fs_devices->devices;
2477 list_for_each_entry(device, devices, dev_list) {
2478 old_size = device->total_bytes;
2479 size_to_free = div_factor(old_size, 1);
2480 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2481 if (!device->writeable ||
2482 device->total_bytes - device->bytes_used > size_to_free)
2485 ret = btrfs_shrink_device(device, old_size - size_to_free);
2490 trans = btrfs_start_transaction(dev_root, 0);
2491 BUG_ON(IS_ERR(trans));
2493 ret = btrfs_grow_device(trans, device, old_size);
2496 btrfs_end_transaction(trans, dev_root);
2499 /* step two, relocate all the chunks */
2500 path = btrfs_alloc_path();
2506 /* zero out stat counters */
2507 spin_lock(&fs_info->balance_lock);
2508 memset(&bctl->stat, 0, sizeof(bctl->stat));
2509 spin_unlock(&fs_info->balance_lock);
2511 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2512 key.offset = (u64)-1;
2513 key.type = BTRFS_CHUNK_ITEM_KEY;
2516 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2517 atomic_read(&fs_info->balance_cancel_req)) {
2522 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2527 * this shouldn't happen, it means the last relocate
2531 BUG(); /* FIXME break ? */
2533 ret = btrfs_previous_item(chunk_root, path, 0,
2534 BTRFS_CHUNK_ITEM_KEY);
2540 leaf = path->nodes[0];
2541 slot = path->slots[0];
2542 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2544 if (found_key.objectid != key.objectid)
2547 /* chunk zero is special */
2548 if (found_key.offset == 0)
2551 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2554 spin_lock(&fs_info->balance_lock);
2555 bctl->stat.considered++;
2556 spin_unlock(&fs_info->balance_lock);
2559 ret = should_balance_chunk(chunk_root, leaf, chunk,
2561 btrfs_release_path(path);
2566 spin_lock(&fs_info->balance_lock);
2567 bctl->stat.expected++;
2568 spin_unlock(&fs_info->balance_lock);
2572 ret = btrfs_relocate_chunk(chunk_root,
2573 chunk_root->root_key.objectid,
2576 if (ret && ret != -ENOSPC)
2578 if (ret == -ENOSPC) {
2581 spin_lock(&fs_info->balance_lock);
2582 bctl->stat.completed++;
2583 spin_unlock(&fs_info->balance_lock);
2586 key.offset = found_key.offset - 1;
2590 btrfs_release_path(path);
2595 btrfs_free_path(path);
2596 if (enospc_errors) {
2597 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2606 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2608 /* cancel requested || normal exit path */
2609 return atomic_read(&fs_info->balance_cancel_req) ||
2610 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2611 atomic_read(&fs_info->balance_cancel_req) == 0);
2614 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2618 unset_balance_control(fs_info);
2619 ret = del_balance_item(fs_info->tree_root);
2623 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2624 struct btrfs_ioctl_balance_args *bargs);
2627 * Should be called with both balance and volume mutexes held
2629 int btrfs_balance(struct btrfs_balance_control *bctl,
2630 struct btrfs_ioctl_balance_args *bargs)
2632 struct btrfs_fs_info *fs_info = bctl->fs_info;
2636 if (btrfs_fs_closing(fs_info) ||
2637 atomic_read(&fs_info->balance_pause_req) ||
2638 atomic_read(&fs_info->balance_cancel_req)) {
2644 * In case of mixed groups both data and meta should be picked,
2645 * and identical options should be given for both of them.
2647 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2648 if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2649 (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
2650 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2651 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2652 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2653 printk(KERN_ERR "btrfs: with mixed groups data and "
2654 "metadata balance options must be the same\n");
2661 * Profile changing sanity checks. Skip them if a simple
2662 * balance is requested.
2664 if (!((bctl->data.flags | bctl->sys.flags | bctl->meta.flags) &
2665 BTRFS_BALANCE_ARGS_CONVERT))
2668 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2669 if (fs_info->fs_devices->num_devices == 1)
2670 allowed |= BTRFS_BLOCK_GROUP_DUP;
2671 else if (fs_info->fs_devices->num_devices < 4)
2672 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2674 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2675 BTRFS_BLOCK_GROUP_RAID10);
2677 if (!profile_is_valid(bctl->data.target, 1) ||
2678 bctl->data.target & ~allowed) {
2679 printk(KERN_ERR "btrfs: unable to start balance with target "
2680 "data profile %llu\n",
2681 (unsigned long long)bctl->data.target);
2685 if (!profile_is_valid(bctl->meta.target, 1) ||
2686 bctl->meta.target & ~allowed) {
2687 printk(KERN_ERR "btrfs: unable to start balance with target "
2688 "metadata profile %llu\n",
2689 (unsigned long long)bctl->meta.target);
2693 if (!profile_is_valid(bctl->sys.target, 1) ||
2694 bctl->sys.target & ~allowed) {
2695 printk(KERN_ERR "btrfs: unable to start balance with target "
2696 "system profile %llu\n",
2697 (unsigned long long)bctl->sys.target);
2702 if (bctl->data.target & BTRFS_BLOCK_GROUP_DUP) {
2703 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2708 /* allow to reduce meta or sys integrity only if force set */
2709 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2710 BTRFS_BLOCK_GROUP_RAID10;
2711 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2712 (fs_info->avail_system_alloc_bits & allowed) &&
2713 !(bctl->sys.target & allowed)) ||
2714 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2715 (fs_info->avail_metadata_alloc_bits & allowed) &&
2716 !(bctl->meta.target & allowed))) {
2717 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2718 printk(KERN_INFO "btrfs: force reducing metadata "
2721 printk(KERN_ERR "btrfs: balance will reduce metadata "
2722 "integrity, use force if you want this\n");
2729 ret = insert_balance_item(fs_info->tree_root, bctl);
2730 if (ret && ret != -EEXIST)
2733 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2734 BUG_ON(ret == -EEXIST);
2735 set_balance_control(bctl);
2737 BUG_ON(ret != -EEXIST);
2738 spin_lock(&fs_info->balance_lock);
2739 update_balance_args(bctl);
2740 spin_unlock(&fs_info->balance_lock);
2743 atomic_inc(&fs_info->balance_running);
2744 mutex_unlock(&fs_info->balance_mutex);
2746 ret = __btrfs_balance(fs_info);
2748 mutex_lock(&fs_info->balance_mutex);
2749 atomic_dec(&fs_info->balance_running);
2752 memset(bargs, 0, sizeof(*bargs));
2753 update_ioctl_balance_args(fs_info, 0, bargs);
2756 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2757 balance_need_close(fs_info)) {
2758 __cancel_balance(fs_info);
2761 wake_up(&fs_info->balance_wait_q);
2765 if (bctl->flags & BTRFS_BALANCE_RESUME)
2766 __cancel_balance(fs_info);
2772 static int balance_kthread(void *data)
2774 struct btrfs_balance_control *bctl =
2775 (struct btrfs_balance_control *)data;
2776 struct btrfs_fs_info *fs_info = bctl->fs_info;
2779 mutex_lock(&fs_info->volume_mutex);
2780 mutex_lock(&fs_info->balance_mutex);
2782 set_balance_control(bctl);
2784 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2785 printk(KERN_INFO "btrfs: force skipping balance\n");
2787 printk(KERN_INFO "btrfs: continuing balance\n");
2788 ret = btrfs_balance(bctl, NULL);
2791 mutex_unlock(&fs_info->balance_mutex);
2792 mutex_unlock(&fs_info->volume_mutex);
2796 int btrfs_recover_balance(struct btrfs_root *tree_root)
2798 struct task_struct *tsk;
2799 struct btrfs_balance_control *bctl;
2800 struct btrfs_balance_item *item;
2801 struct btrfs_disk_balance_args disk_bargs;
2802 struct btrfs_path *path;
2803 struct extent_buffer *leaf;
2804 struct btrfs_key key;
2807 path = btrfs_alloc_path();
2811 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2817 key.objectid = BTRFS_BALANCE_OBJECTID;
2818 key.type = BTRFS_BALANCE_ITEM_KEY;
2821 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2824 if (ret > 0) { /* ret = -ENOENT; */
2829 leaf = path->nodes[0];
2830 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2832 bctl->fs_info = tree_root->fs_info;
2833 bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
2835 btrfs_balance_data(leaf, item, &disk_bargs);
2836 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2837 btrfs_balance_meta(leaf, item, &disk_bargs);
2838 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2839 btrfs_balance_sys(leaf, item, &disk_bargs);
2840 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2842 tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
2851 btrfs_free_path(path);
2855 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2859 mutex_lock(&fs_info->balance_mutex);
2860 if (!fs_info->balance_ctl) {
2861 mutex_unlock(&fs_info->balance_mutex);
2865 if (atomic_read(&fs_info->balance_running)) {
2866 atomic_inc(&fs_info->balance_pause_req);
2867 mutex_unlock(&fs_info->balance_mutex);
2869 wait_event(fs_info->balance_wait_q,
2870 atomic_read(&fs_info->balance_running) == 0);
2872 mutex_lock(&fs_info->balance_mutex);
2873 /* we are good with balance_ctl ripped off from under us */
2874 BUG_ON(atomic_read(&fs_info->balance_running));
2875 atomic_dec(&fs_info->balance_pause_req);
2880 mutex_unlock(&fs_info->balance_mutex);
2884 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
2886 mutex_lock(&fs_info->balance_mutex);
2887 if (!fs_info->balance_ctl) {
2888 mutex_unlock(&fs_info->balance_mutex);
2892 atomic_inc(&fs_info->balance_cancel_req);
2894 * if we are running just wait and return, balance item is
2895 * deleted in btrfs_balance in this case
2897 if (atomic_read(&fs_info->balance_running)) {
2898 mutex_unlock(&fs_info->balance_mutex);
2899 wait_event(fs_info->balance_wait_q,
2900 atomic_read(&fs_info->balance_running) == 0);
2901 mutex_lock(&fs_info->balance_mutex);
2903 /* __cancel_balance needs volume_mutex */
2904 mutex_unlock(&fs_info->balance_mutex);
2905 mutex_lock(&fs_info->volume_mutex);
2906 mutex_lock(&fs_info->balance_mutex);
2908 if (fs_info->balance_ctl)
2909 __cancel_balance(fs_info);
2911 mutex_unlock(&fs_info->volume_mutex);
2914 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
2915 atomic_dec(&fs_info->balance_cancel_req);
2916 mutex_unlock(&fs_info->balance_mutex);
2921 * shrinking a device means finding all of the device extents past
2922 * the new size, and then following the back refs to the chunks.
2923 * The chunk relocation code actually frees the device extent
2925 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2927 struct btrfs_trans_handle *trans;
2928 struct btrfs_root *root = device->dev_root;
2929 struct btrfs_dev_extent *dev_extent = NULL;
2930 struct btrfs_path *path;
2938 bool retried = false;
2939 struct extent_buffer *l;
2940 struct btrfs_key key;
2941 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2942 u64 old_total = btrfs_super_total_bytes(super_copy);
2943 u64 old_size = device->total_bytes;
2944 u64 diff = device->total_bytes - new_size;
2946 if (new_size >= device->total_bytes)
2949 path = btrfs_alloc_path();
2957 device->total_bytes = new_size;
2958 if (device->writeable) {
2959 device->fs_devices->total_rw_bytes -= diff;
2960 spin_lock(&root->fs_info->free_chunk_lock);
2961 root->fs_info->free_chunk_space -= diff;
2962 spin_unlock(&root->fs_info->free_chunk_lock);
2964 unlock_chunks(root);
2967 key.objectid = device->devid;
2968 key.offset = (u64)-1;
2969 key.type = BTRFS_DEV_EXTENT_KEY;
2972 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2976 ret = btrfs_previous_item(root, path, 0, key.type);
2981 btrfs_release_path(path);
2986 slot = path->slots[0];
2987 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2989 if (key.objectid != device->devid) {
2990 btrfs_release_path(path);
2994 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2995 length = btrfs_dev_extent_length(l, dev_extent);
2997 if (key.offset + length <= new_size) {
2998 btrfs_release_path(path);
3002 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3003 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3004 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3005 btrfs_release_path(path);
3007 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3009 if (ret && ret != -ENOSPC)
3016 if (failed && !retried) {
3020 } else if (failed && retried) {
3024 device->total_bytes = old_size;
3025 if (device->writeable)
3026 device->fs_devices->total_rw_bytes += diff;
3027 spin_lock(&root->fs_info->free_chunk_lock);
3028 root->fs_info->free_chunk_space += diff;
3029 spin_unlock(&root->fs_info->free_chunk_lock);
3030 unlock_chunks(root);
3034 /* Shrinking succeeded, else we would be at "done". */
3035 trans = btrfs_start_transaction(root, 0);
3036 if (IS_ERR(trans)) {
3037 ret = PTR_ERR(trans);
3043 device->disk_total_bytes = new_size;
3044 /* Now btrfs_update_device() will change the on-disk size. */
3045 ret = btrfs_update_device(trans, device);
3047 unlock_chunks(root);
3048 btrfs_end_transaction(trans, root);
3051 WARN_ON(diff > old_total);
3052 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3053 unlock_chunks(root);
3054 btrfs_end_transaction(trans, root);
3056 btrfs_free_path(path);
3060 static int btrfs_add_system_chunk(struct btrfs_root *root,
3061 struct btrfs_key *key,
3062 struct btrfs_chunk *chunk, int item_size)
3064 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3065 struct btrfs_disk_key disk_key;
3069 array_size = btrfs_super_sys_array_size(super_copy);
3070 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3073 ptr = super_copy->sys_chunk_array + array_size;
3074 btrfs_cpu_key_to_disk(&disk_key, key);
3075 memcpy(ptr, &disk_key, sizeof(disk_key));
3076 ptr += sizeof(disk_key);
3077 memcpy(ptr, chunk, item_size);
3078 item_size += sizeof(disk_key);
3079 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3084 * sort the devices in descending order by max_avail, total_avail
3086 static int btrfs_cmp_device_info(const void *a, const void *b)
3088 const struct btrfs_device_info *di_a = a;
3089 const struct btrfs_device_info *di_b = b;
3091 if (di_a->max_avail > di_b->max_avail)
3093 if (di_a->max_avail < di_b->max_avail)
3095 if (di_a->total_avail > di_b->total_avail)
3097 if (di_a->total_avail < di_b->total_avail)
3102 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3103 struct btrfs_root *extent_root,
3104 struct map_lookup **map_ret,
3105 u64 *num_bytes_out, u64 *stripe_size_out,
3106 u64 start, u64 type)
3108 struct btrfs_fs_info *info = extent_root->fs_info;
3109 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3110 struct list_head *cur;
3111 struct map_lookup *map = NULL;
3112 struct extent_map_tree *em_tree;
3113 struct extent_map *em;
3114 struct btrfs_device_info *devices_info = NULL;
3116 int num_stripes; /* total number of stripes to allocate */
3117 int sub_stripes; /* sub_stripes info for map */
3118 int dev_stripes; /* stripes per dev */
3119 int devs_max; /* max devs to use */
3120 int devs_min; /* min devs needed */
3121 int devs_increment; /* ndevs has to be a multiple of this */
3122 int ncopies; /* how many copies to data has */
3124 u64 max_stripe_size;
3132 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
3133 (type & BTRFS_BLOCK_GROUP_DUP)) {
3135 type &= ~BTRFS_BLOCK_GROUP_DUP;
3138 if (list_empty(&fs_devices->alloc_list))
3145 devs_max = 0; /* 0 == as many as possible */
3149 * define the properties of each RAID type.
3150 * FIXME: move this to a global table and use it in all RAID
3153 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3157 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3159 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3164 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3173 if (type & BTRFS_BLOCK_GROUP_DATA) {
3174 max_stripe_size = 1024 * 1024 * 1024;
3175 max_chunk_size = 10 * max_stripe_size;
3176 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3177 /* for larger filesystems, use larger metadata chunks */
3178 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3179 max_stripe_size = 1024 * 1024 * 1024;
3181 max_stripe_size = 256 * 1024 * 1024;
3182 max_chunk_size = max_stripe_size;
3183 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3184 max_stripe_size = 32 * 1024 * 1024;
3185 max_chunk_size = 2 * max_stripe_size;
3187 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3192 /* we don't want a chunk larger than 10% of writeable space */
3193 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3196 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3201 cur = fs_devices->alloc_list.next;
3204 * in the first pass through the devices list, we gather information
3205 * about the available holes on each device.
3208 while (cur != &fs_devices->alloc_list) {
3209 struct btrfs_device *device;
3213 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3217 if (!device->writeable) {
3219 "btrfs: read-only device in alloc_list\n");
3224 if (!device->in_fs_metadata)
3227 if (device->total_bytes > device->bytes_used)
3228 total_avail = device->total_bytes - device->bytes_used;
3232 /* If there is no space on this device, skip it. */
3233 if (total_avail == 0)
3236 ret = find_free_dev_extent(device,
3237 max_stripe_size * dev_stripes,
3238 &dev_offset, &max_avail);
3239 if (ret && ret != -ENOSPC)
3243 max_avail = max_stripe_size * dev_stripes;
3245 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3248 devices_info[ndevs].dev_offset = dev_offset;
3249 devices_info[ndevs].max_avail = max_avail;
3250 devices_info[ndevs].total_avail = total_avail;
3251 devices_info[ndevs].dev = device;
3256 * now sort the devices by hole size / available space
3258 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3259 btrfs_cmp_device_info, NULL);
3261 /* round down to number of usable stripes */
3262 ndevs -= ndevs % devs_increment;
3264 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3269 if (devs_max && ndevs > devs_max)
3272 * the primary goal is to maximize the number of stripes, so use as many
3273 * devices as possible, even if the stripes are not maximum sized.
3275 stripe_size = devices_info[ndevs-1].max_avail;
3276 num_stripes = ndevs * dev_stripes;
3278 if (stripe_size * num_stripes > max_chunk_size * ncopies) {
3279 stripe_size = max_chunk_size * ncopies;
3280 do_div(stripe_size, num_stripes);
3283 do_div(stripe_size, dev_stripes);
3284 do_div(stripe_size, BTRFS_STRIPE_LEN);
3285 stripe_size *= BTRFS_STRIPE_LEN;
3287 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3292 map->num_stripes = num_stripes;
3294 for (i = 0; i < ndevs; ++i) {
3295 for (j = 0; j < dev_stripes; ++j) {
3296 int s = i * dev_stripes + j;
3297 map->stripes[s].dev = devices_info[i].dev;
3298 map->stripes[s].physical = devices_info[i].dev_offset +
3302 map->sector_size = extent_root->sectorsize;
3303 map->stripe_len = BTRFS_STRIPE_LEN;
3304 map->io_align = BTRFS_STRIPE_LEN;
3305 map->io_width = BTRFS_STRIPE_LEN;
3307 map->sub_stripes = sub_stripes;
3310 num_bytes = stripe_size * (num_stripes / ncopies);
3312 *stripe_size_out = stripe_size;
3313 *num_bytes_out = num_bytes;
3315 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3317 em = alloc_extent_map();
3322 em->bdev = (struct block_device *)map;
3324 em->len = num_bytes;
3325 em->block_start = 0;
3326 em->block_len = em->len;
3328 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3329 write_lock(&em_tree->lock);
3330 ret = add_extent_mapping(em_tree, em);
3331 write_unlock(&em_tree->lock);
3333 free_extent_map(em);
3335 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3336 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3340 for (i = 0; i < map->num_stripes; ++i) {
3341 struct btrfs_device *device;
3344 device = map->stripes[i].dev;
3345 dev_offset = map->stripes[i].physical;
3347 ret = btrfs_alloc_dev_extent(trans, device,
3348 info->chunk_root->root_key.objectid,
3349 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3350 start, dev_offset, stripe_size);
3354 kfree(devices_info);
3359 kfree(devices_info);
3363 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3364 struct btrfs_root *extent_root,
3365 struct map_lookup *map, u64 chunk_offset,
3366 u64 chunk_size, u64 stripe_size)
3369 struct btrfs_key key;
3370 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3371 struct btrfs_device *device;
3372 struct btrfs_chunk *chunk;
3373 struct btrfs_stripe *stripe;
3374 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3378 chunk = kzalloc(item_size, GFP_NOFS);
3383 while (index < map->num_stripes) {
3384 device = map->stripes[index].dev;
3385 device->bytes_used += stripe_size;
3386 ret = btrfs_update_device(trans, device);
3391 spin_lock(&extent_root->fs_info->free_chunk_lock);
3392 extent_root->fs_info->free_chunk_space -= (stripe_size *
3394 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3397 stripe = &chunk->stripe;
3398 while (index < map->num_stripes) {
3399 device = map->stripes[index].dev;
3400 dev_offset = map->stripes[index].physical;
3402 btrfs_set_stack_stripe_devid(stripe, device->devid);
3403 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3404 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3409 btrfs_set_stack_chunk_length(chunk, chunk_size);
3410 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3411 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3412 btrfs_set_stack_chunk_type(chunk, map->type);
3413 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3414 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3415 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3416 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3417 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3419 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3420 key.type = BTRFS_CHUNK_ITEM_KEY;
3421 key.offset = chunk_offset;
3423 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3426 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3427 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3437 * Chunk allocation falls into two parts. The first part does works
3438 * that make the new allocated chunk useable, but not do any operation
3439 * that modifies the chunk tree. The second part does the works that
3440 * require modifying the chunk tree. This division is important for the
3441 * bootstrap process of adding storage to a seed btrfs.
3443 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3444 struct btrfs_root *extent_root, u64 type)
3449 struct map_lookup *map;
3450 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3453 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3458 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3459 &stripe_size, chunk_offset, type);
3463 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3464 chunk_size, stripe_size);
3469 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3470 struct btrfs_root *root,
3471 struct btrfs_device *device)
3474 u64 sys_chunk_offset;
3478 u64 sys_stripe_size;
3480 struct map_lookup *map;
3481 struct map_lookup *sys_map;
3482 struct btrfs_fs_info *fs_info = root->fs_info;
3483 struct btrfs_root *extent_root = fs_info->extent_root;
3486 ret = find_next_chunk(fs_info->chunk_root,
3487 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3491 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3492 fs_info->avail_metadata_alloc_bits;
3493 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3495 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3496 &stripe_size, chunk_offset, alloc_profile);
3499 sys_chunk_offset = chunk_offset + chunk_size;
3501 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3502 fs_info->avail_system_alloc_bits;
3503 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3505 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3506 &sys_chunk_size, &sys_stripe_size,
3507 sys_chunk_offset, alloc_profile);
3510 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3514 * Modifying chunk tree needs allocating new blocks from both
3515 * system block group and metadata block group. So we only can
3516 * do operations require modifying the chunk tree after both
3517 * block groups were created.
3519 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3520 chunk_size, stripe_size);
3523 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3524 sys_chunk_offset, sys_chunk_size,
3530 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3532 struct extent_map *em;
3533 struct map_lookup *map;
3534 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3538 read_lock(&map_tree->map_tree.lock);
3539 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3540 read_unlock(&map_tree->map_tree.lock);
3544 if (btrfs_test_opt(root, DEGRADED)) {
3545 free_extent_map(em);
3549 map = (struct map_lookup *)em->bdev;
3550 for (i = 0; i < map->num_stripes; i++) {
3551 if (!map->stripes[i].dev->writeable) {
3556 free_extent_map(em);
3560 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3562 extent_map_tree_init(&tree->map_tree);
3565 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3567 struct extent_map *em;
3570 write_lock(&tree->map_tree.lock);
3571 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3573 remove_extent_mapping(&tree->map_tree, em);
3574 write_unlock(&tree->map_tree.lock);
3579 free_extent_map(em);
3580 /* once for the tree */
3581 free_extent_map(em);
3585 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3587 struct extent_map *em;
3588 struct map_lookup *map;
3589 struct extent_map_tree *em_tree = &map_tree->map_tree;
3592 read_lock(&em_tree->lock);
3593 em = lookup_extent_mapping(em_tree, logical, len);
3594 read_unlock(&em_tree->lock);
3597 BUG_ON(em->start > logical || em->start + em->len < logical);
3598 map = (struct map_lookup *)em->bdev;
3599 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3600 ret = map->num_stripes;
3601 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3602 ret = map->sub_stripes;
3605 free_extent_map(em);
3609 static int find_live_mirror(struct map_lookup *map, int first, int num,
3613 if (map->stripes[optimal].dev->bdev)
3615 for (i = first; i < first + num; i++) {
3616 if (map->stripes[i].dev->bdev)
3619 /* we couldn't find one that doesn't fail. Just return something
3620 * and the io error handling code will clean up eventually
3625 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3626 u64 logical, u64 *length,
3627 struct btrfs_bio **bbio_ret,
3630 struct extent_map *em;
3631 struct map_lookup *map;
3632 struct extent_map_tree *em_tree = &map_tree->map_tree;
3635 u64 stripe_end_offset;
3644 struct btrfs_bio *bbio = NULL;
3646 read_lock(&em_tree->lock);
3647 em = lookup_extent_mapping(em_tree, logical, *length);
3648 read_unlock(&em_tree->lock);
3651 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3652 (unsigned long long)logical,
3653 (unsigned long long)*length);
3657 BUG_ON(em->start > logical || em->start + em->len < logical);
3658 map = (struct map_lookup *)em->bdev;
3659 offset = logical - em->start;
3661 if (mirror_num > map->num_stripes)
3666 * stripe_nr counts the total number of stripes we have to stride
3667 * to get to this block
3669 do_div(stripe_nr, map->stripe_len);
3671 stripe_offset = stripe_nr * map->stripe_len;
3672 BUG_ON(offset < stripe_offset);
3674 /* stripe_offset is the offset of this block in its stripe*/
3675 stripe_offset = offset - stripe_offset;
3677 if (rw & REQ_DISCARD)
3678 *length = min_t(u64, em->len - offset, *length);
3679 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3680 /* we limit the length of each bio to what fits in a stripe */
3681 *length = min_t(u64, em->len - offset,
3682 map->stripe_len - stripe_offset);
3684 *length = em->len - offset;
3692 stripe_nr_orig = stripe_nr;
3693 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3694 (~(map->stripe_len - 1));
3695 do_div(stripe_nr_end, map->stripe_len);
3696 stripe_end_offset = stripe_nr_end * map->stripe_len -
3698 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3699 if (rw & REQ_DISCARD)
3700 num_stripes = min_t(u64, map->num_stripes,
3701 stripe_nr_end - stripe_nr_orig);
3702 stripe_index = do_div(stripe_nr, map->num_stripes);
3703 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3704 if (rw & (REQ_WRITE | REQ_DISCARD))
3705 num_stripes = map->num_stripes;
3706 else if (mirror_num)
3707 stripe_index = mirror_num - 1;
3709 stripe_index = find_live_mirror(map, 0,
3711 current->pid % map->num_stripes);
3712 mirror_num = stripe_index + 1;
3715 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3716 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3717 num_stripes = map->num_stripes;
3718 } else if (mirror_num) {
3719 stripe_index = mirror_num - 1;
3724 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3725 int factor = map->num_stripes / map->sub_stripes;
3727 stripe_index = do_div(stripe_nr, factor);
3728 stripe_index *= map->sub_stripes;
3731 num_stripes = map->sub_stripes;
3732 else if (rw & REQ_DISCARD)
3733 num_stripes = min_t(u64, map->sub_stripes *
3734 (stripe_nr_end - stripe_nr_orig),
3736 else if (mirror_num)
3737 stripe_index += mirror_num - 1;
3739 stripe_index = find_live_mirror(map, stripe_index,
3740 map->sub_stripes, stripe_index +
3741 current->pid % map->sub_stripes);
3742 mirror_num = stripe_index + 1;
3746 * after this do_div call, stripe_nr is the number of stripes
3747 * on this device we have to walk to find the data, and
3748 * stripe_index is the number of our device in the stripe array
3750 stripe_index = do_div(stripe_nr, map->num_stripes);
3751 mirror_num = stripe_index + 1;
3753 BUG_ON(stripe_index >= map->num_stripes);
3755 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3760 atomic_set(&bbio->error, 0);
3762 if (rw & REQ_DISCARD) {
3764 int sub_stripes = 0;
3765 u64 stripes_per_dev = 0;
3766 u32 remaining_stripes = 0;
3769 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3770 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3773 sub_stripes = map->sub_stripes;
3775 factor = map->num_stripes / sub_stripes;
3776 stripes_per_dev = div_u64_rem(stripe_nr_end -
3779 &remaining_stripes);
3782 for (i = 0; i < num_stripes; i++) {
3783 bbio->stripes[i].physical =
3784 map->stripes[stripe_index].physical +
3785 stripe_offset + stripe_nr * map->stripe_len;
3786 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3788 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3789 BTRFS_BLOCK_GROUP_RAID10)) {
3790 bbio->stripes[i].length = stripes_per_dev *
3792 if (i / sub_stripes < remaining_stripes)
3793 bbio->stripes[i].length +=
3795 if (i < sub_stripes)
3796 bbio->stripes[i].length -=
3798 if ((i / sub_stripes + 1) %
3799 sub_stripes == remaining_stripes)
3800 bbio->stripes[i].length -=
3802 if (i == sub_stripes - 1)
3805 bbio->stripes[i].length = *length;
3808 if (stripe_index == map->num_stripes) {
3809 /* This could only happen for RAID0/10 */
3815 for (i = 0; i < num_stripes; i++) {
3816 bbio->stripes[i].physical =
3817 map->stripes[stripe_index].physical +
3819 stripe_nr * map->stripe_len;
3820 bbio->stripes[i].dev =
3821 map->stripes[stripe_index].dev;
3826 if (rw & REQ_WRITE) {
3827 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3828 BTRFS_BLOCK_GROUP_RAID10 |
3829 BTRFS_BLOCK_GROUP_DUP)) {
3835 bbio->num_stripes = num_stripes;
3836 bbio->max_errors = max_errors;
3837 bbio->mirror_num = mirror_num;
3839 free_extent_map(em);
3843 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3844 u64 logical, u64 *length,
3845 struct btrfs_bio **bbio_ret, int mirror_num)
3847 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3851 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3852 u64 chunk_start, u64 physical, u64 devid,
3853 u64 **logical, int *naddrs, int *stripe_len)
3855 struct extent_map_tree *em_tree = &map_tree->map_tree;
3856 struct extent_map *em;
3857 struct map_lookup *map;
3864 read_lock(&em_tree->lock);
3865 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3866 read_unlock(&em_tree->lock);
3868 BUG_ON(!em || em->start != chunk_start);
3869 map = (struct map_lookup *)em->bdev;
3872 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3873 do_div(length, map->num_stripes / map->sub_stripes);
3874 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3875 do_div(length, map->num_stripes);
3877 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3880 for (i = 0; i < map->num_stripes; i++) {
3881 if (devid && map->stripes[i].dev->devid != devid)
3883 if (map->stripes[i].physical > physical ||
3884 map->stripes[i].physical + length <= physical)
3887 stripe_nr = physical - map->stripes[i].physical;
3888 do_div(stripe_nr, map->stripe_len);
3890 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3891 stripe_nr = stripe_nr * map->num_stripes + i;
3892 do_div(stripe_nr, map->sub_stripes);
3893 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3894 stripe_nr = stripe_nr * map->num_stripes + i;
3896 bytenr = chunk_start + stripe_nr * map->stripe_len;
3897 WARN_ON(nr >= map->num_stripes);
3898 for (j = 0; j < nr; j++) {
3899 if (buf[j] == bytenr)
3903 WARN_ON(nr >= map->num_stripes);
3910 *stripe_len = map->stripe_len;
3912 free_extent_map(em);
3916 static void btrfs_end_bio(struct bio *bio, int err)
3918 struct btrfs_bio *bbio = bio->bi_private;
3919 int is_orig_bio = 0;
3922 atomic_inc(&bbio->error);
3924 if (bio == bbio->orig_bio)
3927 if (atomic_dec_and_test(&bbio->stripes_pending)) {
3930 bio = bbio->orig_bio;
3932 bio->bi_private = bbio->private;
3933 bio->bi_end_io = bbio->end_io;
3934 bio->bi_bdev = (struct block_device *)
3935 (unsigned long)bbio->mirror_num;
3936 /* only send an error to the higher layers if it is
3937 * beyond the tolerance of the multi-bio
3939 if (atomic_read(&bbio->error) > bbio->max_errors) {
3943 * this bio is actually up to date, we didn't
3944 * go over the max number of errors
3946 set_bit(BIO_UPTODATE, &bio->bi_flags);
3951 bio_endio(bio, err);
3952 } else if (!is_orig_bio) {
3957 struct async_sched {
3960 struct btrfs_fs_info *info;
3961 struct btrfs_work work;
3965 * see run_scheduled_bios for a description of why bios are collected for
3968 * This will add one bio to the pending list for a device and make sure
3969 * the work struct is scheduled.
3971 static noinline int schedule_bio(struct btrfs_root *root,
3972 struct btrfs_device *device,
3973 int rw, struct bio *bio)
3975 int should_queue = 1;
3976 struct btrfs_pending_bios *pending_bios;
3978 /* don't bother with additional async steps for reads, right now */
3979 if (!(rw & REQ_WRITE)) {
3981 btrfsic_submit_bio(rw, bio);
3987 * nr_async_bios allows us to reliably return congestion to the
3988 * higher layers. Otherwise, the async bio makes it appear we have
3989 * made progress against dirty pages when we've really just put it
3990 * on a queue for later
3992 atomic_inc(&root->fs_info->nr_async_bios);
3993 WARN_ON(bio->bi_next);
3994 bio->bi_next = NULL;
3997 spin_lock(&device->io_lock);
3998 if (bio->bi_rw & REQ_SYNC)
3999 pending_bios = &device->pending_sync_bios;
4001 pending_bios = &device->pending_bios;
4003 if (pending_bios->tail)
4004 pending_bios->tail->bi_next = bio;
4006 pending_bios->tail = bio;
4007 if (!pending_bios->head)
4008 pending_bios->head = bio;
4009 if (device->running_pending)
4012 spin_unlock(&device->io_lock);
4015 btrfs_queue_worker(&root->fs_info->submit_workers,
4020 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4021 int mirror_num, int async_submit)
4023 struct btrfs_mapping_tree *map_tree;
4024 struct btrfs_device *dev;
4025 struct bio *first_bio = bio;
4026 u64 logical = (u64)bio->bi_sector << 9;
4032 struct btrfs_bio *bbio = NULL;
4034 length = bio->bi_size;
4035 map_tree = &root->fs_info->mapping_tree;
4036 map_length = length;
4038 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4042 total_devs = bbio->num_stripes;
4043 if (map_length < length) {
4044 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
4045 "len %llu\n", (unsigned long long)logical,
4046 (unsigned long long)length,
4047 (unsigned long long)map_length);
4051 bbio->orig_bio = first_bio;
4052 bbio->private = first_bio->bi_private;
4053 bbio->end_io = first_bio->bi_end_io;
4054 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4056 while (dev_nr < total_devs) {
4057 if (dev_nr < total_devs - 1) {
4058 bio = bio_clone(first_bio, GFP_NOFS);
4063 bio->bi_private = bbio;
4064 bio->bi_end_io = btrfs_end_bio;
4065 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4066 dev = bbio->stripes[dev_nr].dev;
4067 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4068 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4069 "(%s id %llu), size=%u\n", rw,
4070 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4071 dev->name, dev->devid, bio->bi_size);
4072 bio->bi_bdev = dev->bdev;
4074 schedule_bio(root, dev, rw, bio);
4076 btrfsic_submit_bio(rw, bio);
4078 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4079 bio->bi_sector = logical >> 9;
4080 bio_endio(bio, -EIO);
4087 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4090 struct btrfs_device *device;
4091 struct btrfs_fs_devices *cur_devices;
4093 cur_devices = root->fs_info->fs_devices;
4094 while (cur_devices) {
4096 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4097 device = __find_device(&cur_devices->devices,
4102 cur_devices = cur_devices->seed;
4107 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4108 u64 devid, u8 *dev_uuid)
4110 struct btrfs_device *device;
4111 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4113 device = kzalloc(sizeof(*device), GFP_NOFS);
4116 list_add(&device->dev_list,
4117 &fs_devices->devices);
4118 device->dev_root = root->fs_info->dev_root;
4119 device->devid = devid;
4120 device->work.func = pending_bios_fn;
4121 device->fs_devices = fs_devices;
4122 device->missing = 1;
4123 fs_devices->num_devices++;
4124 fs_devices->missing_devices++;
4125 spin_lock_init(&device->io_lock);
4126 INIT_LIST_HEAD(&device->dev_alloc_list);
4127 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4131 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4132 struct extent_buffer *leaf,
4133 struct btrfs_chunk *chunk)
4135 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4136 struct map_lookup *map;
4137 struct extent_map *em;
4141 u8 uuid[BTRFS_UUID_SIZE];
4146 logical = key->offset;
4147 length = btrfs_chunk_length(leaf, chunk);
4149 read_lock(&map_tree->map_tree.lock);
4150 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4151 read_unlock(&map_tree->map_tree.lock);
4153 /* already mapped? */
4154 if (em && em->start <= logical && em->start + em->len > logical) {
4155 free_extent_map(em);
4158 free_extent_map(em);
4161 em = alloc_extent_map();
4164 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4165 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4167 free_extent_map(em);
4171 em->bdev = (struct block_device *)map;
4172 em->start = logical;
4174 em->block_start = 0;
4175 em->block_len = em->len;
4177 map->num_stripes = num_stripes;
4178 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4179 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4180 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4181 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4182 map->type = btrfs_chunk_type(leaf, chunk);
4183 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4184 for (i = 0; i < num_stripes; i++) {
4185 map->stripes[i].physical =
4186 btrfs_stripe_offset_nr(leaf, chunk, i);
4187 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4188 read_extent_buffer(leaf, uuid, (unsigned long)
4189 btrfs_stripe_dev_uuid_nr(chunk, i),
4191 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4193 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4195 free_extent_map(em);
4198 if (!map->stripes[i].dev) {
4199 map->stripes[i].dev =
4200 add_missing_dev(root, devid, uuid);
4201 if (!map->stripes[i].dev) {
4203 free_extent_map(em);
4207 map->stripes[i].dev->in_fs_metadata = 1;
4210 write_lock(&map_tree->map_tree.lock);
4211 ret = add_extent_mapping(&map_tree->map_tree, em);
4212 write_unlock(&map_tree->map_tree.lock);
4214 free_extent_map(em);
4219 static int fill_device_from_item(struct extent_buffer *leaf,
4220 struct btrfs_dev_item *dev_item,
4221 struct btrfs_device *device)
4225 device->devid = btrfs_device_id(leaf, dev_item);
4226 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4227 device->total_bytes = device->disk_total_bytes;
4228 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4229 device->type = btrfs_device_type(leaf, dev_item);
4230 device->io_align = btrfs_device_io_align(leaf, dev_item);
4231 device->io_width = btrfs_device_io_width(leaf, dev_item);
4232 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4234 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4235 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4240 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4242 struct btrfs_fs_devices *fs_devices;
4245 BUG_ON(!mutex_is_locked(&uuid_mutex));
4247 fs_devices = root->fs_info->fs_devices->seed;
4248 while (fs_devices) {
4249 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4253 fs_devices = fs_devices->seed;
4256 fs_devices = find_fsid(fsid);
4262 fs_devices = clone_fs_devices(fs_devices);
4263 if (IS_ERR(fs_devices)) {
4264 ret = PTR_ERR(fs_devices);
4268 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4269 root->fs_info->bdev_holder);
4273 if (!fs_devices->seeding) {
4274 __btrfs_close_devices(fs_devices);
4275 free_fs_devices(fs_devices);
4280 fs_devices->seed = root->fs_info->fs_devices->seed;
4281 root->fs_info->fs_devices->seed = fs_devices;
4286 static int read_one_dev(struct btrfs_root *root,
4287 struct extent_buffer *leaf,
4288 struct btrfs_dev_item *dev_item)
4290 struct btrfs_device *device;
4293 u8 fs_uuid[BTRFS_UUID_SIZE];
4294 u8 dev_uuid[BTRFS_UUID_SIZE];
4296 devid = btrfs_device_id(leaf, dev_item);
4297 read_extent_buffer(leaf, dev_uuid,
4298 (unsigned long)btrfs_device_uuid(dev_item),
4300 read_extent_buffer(leaf, fs_uuid,
4301 (unsigned long)btrfs_device_fsid(dev_item),
4304 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4305 ret = open_seed_devices(root, fs_uuid);
4306 if (ret && !btrfs_test_opt(root, DEGRADED))
4310 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4311 if (!device || !device->bdev) {
4312 if (!btrfs_test_opt(root, DEGRADED))
4316 printk(KERN_WARNING "warning devid %llu missing\n",
4317 (unsigned long long)devid);
4318 device = add_missing_dev(root, devid, dev_uuid);
4321 } else if (!device->missing) {
4323 * this happens when a device that was properly setup
4324 * in the device info lists suddenly goes bad.
4325 * device->bdev is NULL, and so we have to set
4326 * device->missing to one here
4328 root->fs_info->fs_devices->missing_devices++;
4329 device->missing = 1;
4333 if (device->fs_devices != root->fs_info->fs_devices) {
4334 BUG_ON(device->writeable);
4335 if (device->generation !=
4336 btrfs_device_generation(leaf, dev_item))
4340 fill_device_from_item(leaf, dev_item, device);
4341 device->dev_root = root->fs_info->dev_root;
4342 device->in_fs_metadata = 1;
4343 if (device->writeable) {
4344 device->fs_devices->total_rw_bytes += device->total_bytes;
4345 spin_lock(&root->fs_info->free_chunk_lock);
4346 root->fs_info->free_chunk_space += device->total_bytes -
4348 spin_unlock(&root->fs_info->free_chunk_lock);
4354 int btrfs_read_sys_array(struct btrfs_root *root)
4356 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4357 struct extent_buffer *sb;
4358 struct btrfs_disk_key *disk_key;
4359 struct btrfs_chunk *chunk;
4361 unsigned long sb_ptr;
4367 struct btrfs_key key;
4369 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4370 BTRFS_SUPER_INFO_SIZE);
4373 btrfs_set_buffer_uptodate(sb);
4374 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4376 * The sb extent buffer is artifical and just used to read the system array.
4377 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4378 * pages up-to-date when the page is larger: extent does not cover the
4379 * whole page and consequently check_page_uptodate does not find all
4380 * the page's extents up-to-date (the hole beyond sb),
4381 * write_extent_buffer then triggers a WARN_ON.
4383 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4384 * but sb spans only this function. Add an explicit SetPageUptodate call
4385 * to silence the warning eg. on PowerPC 64.
4387 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4388 SetPageUptodate(sb->first_page);
4390 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4391 array_size = btrfs_super_sys_array_size(super_copy);
4393 ptr = super_copy->sys_chunk_array;
4394 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4397 while (cur < array_size) {
4398 disk_key = (struct btrfs_disk_key *)ptr;
4399 btrfs_disk_key_to_cpu(&key, disk_key);
4401 len = sizeof(*disk_key); ptr += len;
4405 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4406 chunk = (struct btrfs_chunk *)sb_ptr;
4407 ret = read_one_chunk(root, &key, sb, chunk);
4410 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4411 len = btrfs_chunk_item_size(num_stripes);
4420 free_extent_buffer(sb);
4424 int btrfs_read_chunk_tree(struct btrfs_root *root)
4426 struct btrfs_path *path;
4427 struct extent_buffer *leaf;
4428 struct btrfs_key key;
4429 struct btrfs_key found_key;
4433 root = root->fs_info->chunk_root;
4435 path = btrfs_alloc_path();
4439 mutex_lock(&uuid_mutex);
4442 /* first we search for all of the device items, and then we
4443 * read in all of the chunk items. This way we can create chunk
4444 * mappings that reference all of the devices that are afound
4446 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4450 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4454 leaf = path->nodes[0];
4455 slot = path->slots[0];
4456 if (slot >= btrfs_header_nritems(leaf)) {
4457 ret = btrfs_next_leaf(root, path);
4464 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4465 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4466 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4468 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4469 struct btrfs_dev_item *dev_item;
4470 dev_item = btrfs_item_ptr(leaf, slot,
4471 struct btrfs_dev_item);
4472 ret = read_one_dev(root, leaf, dev_item);
4476 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4477 struct btrfs_chunk *chunk;
4478 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4479 ret = read_one_chunk(root, &found_key, leaf, chunk);
4485 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4487 btrfs_release_path(path);
4492 unlock_chunks(root);
4493 mutex_unlock(&uuid_mutex);
4495 btrfs_free_path(path);