2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
30 #include "extent_map.h"
32 #include "transaction.h"
33 #include "print-tree.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37 #include "rcu-string.h"
40 static int init_first_rw_device(struct btrfs_trans_handle *trans,
41 struct btrfs_root *root,
42 struct btrfs_device *device);
43 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
45 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
47 static DEFINE_MUTEX(uuid_mutex);
48 static LIST_HEAD(fs_uuids);
50 static void lock_chunks(struct btrfs_root *root)
52 mutex_lock(&root->fs_info->chunk_mutex);
55 static void unlock_chunks(struct btrfs_root *root)
57 mutex_unlock(&root->fs_info->chunk_mutex);
60 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
62 struct btrfs_device *device;
63 WARN_ON(fs_devices->opened);
64 while (!list_empty(&fs_devices->devices)) {
65 device = list_entry(fs_devices->devices.next,
66 struct btrfs_device, dev_list);
67 list_del(&device->dev_list);
68 rcu_string_free(device->name);
74 void btrfs_cleanup_fs_uuids(void)
76 struct btrfs_fs_devices *fs_devices;
78 while (!list_empty(&fs_uuids)) {
79 fs_devices = list_entry(fs_uuids.next,
80 struct btrfs_fs_devices, list);
81 list_del(&fs_devices->list);
82 free_fs_devices(fs_devices);
86 static noinline struct btrfs_device *__find_device(struct list_head *head,
89 struct btrfs_device *dev;
91 list_for_each_entry(dev, head, dev_list) {
92 if (dev->devid == devid &&
93 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
100 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
102 struct btrfs_fs_devices *fs_devices;
104 list_for_each_entry(fs_devices, &fs_uuids, list) {
105 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
111 static void requeue_list(struct btrfs_pending_bios *pending_bios,
112 struct bio *head, struct bio *tail)
115 struct bio *old_head;
117 old_head = pending_bios->head;
118 pending_bios->head = head;
119 if (pending_bios->tail)
120 tail->bi_next = old_head;
122 pending_bios->tail = tail;
126 * we try to collect pending bios for a device so we don't get a large
127 * number of procs sending bios down to the same device. This greatly
128 * improves the schedulers ability to collect and merge the bios.
130 * But, it also turns into a long list of bios to process and that is sure
131 * to eventually make the worker thread block. The solution here is to
132 * make some progress and then put this work struct back at the end of
133 * the list if the block device is congested. This way, multiple devices
134 * can make progress from a single worker thread.
136 static noinline void run_scheduled_bios(struct btrfs_device *device)
139 struct backing_dev_info *bdi;
140 struct btrfs_fs_info *fs_info;
141 struct btrfs_pending_bios *pending_bios;
145 unsigned long num_run;
146 unsigned long batch_run = 0;
148 unsigned long last_waited = 0;
150 int sync_pending = 0;
151 struct blk_plug plug;
154 * this function runs all the bios we've collected for
155 * a particular device. We don't want to wander off to
156 * another device without first sending all of these down.
157 * So, setup a plug here and finish it off before we return
159 blk_start_plug(&plug);
161 bdi = blk_get_backing_dev_info(device->bdev);
162 fs_info = device->dev_root->fs_info;
163 limit = btrfs_async_submit_limit(fs_info);
164 limit = limit * 2 / 3;
167 spin_lock(&device->io_lock);
172 /* take all the bios off the list at once and process them
173 * later on (without the lock held). But, remember the
174 * tail and other pointers so the bios can be properly reinserted
175 * into the list if we hit congestion
177 if (!force_reg && device->pending_sync_bios.head) {
178 pending_bios = &device->pending_sync_bios;
181 pending_bios = &device->pending_bios;
185 pending = pending_bios->head;
186 tail = pending_bios->tail;
187 WARN_ON(pending && !tail);
190 * if pending was null this time around, no bios need processing
191 * at all and we can stop. Otherwise it'll loop back up again
192 * and do an additional check so no bios are missed.
194 * device->running_pending is used to synchronize with the
197 if (device->pending_sync_bios.head == NULL &&
198 device->pending_bios.head == NULL) {
200 device->running_pending = 0;
203 device->running_pending = 1;
206 pending_bios->head = NULL;
207 pending_bios->tail = NULL;
209 spin_unlock(&device->io_lock);
214 /* we want to work on both lists, but do more bios on the
215 * sync list than the regular list
218 pending_bios != &device->pending_sync_bios &&
219 device->pending_sync_bios.head) ||
220 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
221 device->pending_bios.head)) {
222 spin_lock(&device->io_lock);
223 requeue_list(pending_bios, pending, tail);
228 pending = pending->bi_next;
231 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
232 waitqueue_active(&fs_info->async_submit_wait))
233 wake_up(&fs_info->async_submit_wait);
235 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
238 * if we're doing the sync list, record that our
239 * plug has some sync requests on it
241 * If we're doing the regular list and there are
242 * sync requests sitting around, unplug before
245 if (pending_bios == &device->pending_sync_bios) {
247 } else if (sync_pending) {
248 blk_finish_plug(&plug);
249 blk_start_plug(&plug);
253 btrfsic_submit_bio(cur->bi_rw, cur);
260 * we made progress, there is more work to do and the bdi
261 * is now congested. Back off and let other work structs
264 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
265 fs_info->fs_devices->open_devices > 1) {
266 struct io_context *ioc;
268 ioc = current->io_context;
271 * the main goal here is that we don't want to
272 * block if we're going to be able to submit
273 * more requests without blocking.
275 * This code does two great things, it pokes into
276 * the elevator code from a filesystem _and_
277 * it makes assumptions about how batching works.
279 if (ioc && ioc->nr_batch_requests > 0 &&
280 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
282 ioc->last_waited == last_waited)) {
284 * we want to go through our batch of
285 * requests and stop. So, we copy out
286 * the ioc->last_waited time and test
287 * against it before looping
289 last_waited = ioc->last_waited;
294 spin_lock(&device->io_lock);
295 requeue_list(pending_bios, pending, tail);
296 device->running_pending = 1;
298 spin_unlock(&device->io_lock);
299 btrfs_requeue_work(&device->work);
302 /* unplug every 64 requests just for good measure */
303 if (batch_run % 64 == 0) {
304 blk_finish_plug(&plug);
305 blk_start_plug(&plug);
314 spin_lock(&device->io_lock);
315 if (device->pending_bios.head || device->pending_sync_bios.head)
317 spin_unlock(&device->io_lock);
320 blk_finish_plug(&plug);
323 static void pending_bios_fn(struct btrfs_work *work)
325 struct btrfs_device *device;
327 device = container_of(work, struct btrfs_device, work);
328 run_scheduled_bios(device);
331 static noinline int device_list_add(const char *path,
332 struct btrfs_super_block *disk_super,
333 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
335 struct btrfs_device *device;
336 struct btrfs_fs_devices *fs_devices;
337 struct rcu_string *name;
338 u64 found_transid = btrfs_super_generation(disk_super);
340 fs_devices = find_fsid(disk_super->fsid);
342 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
345 INIT_LIST_HEAD(&fs_devices->devices);
346 INIT_LIST_HEAD(&fs_devices->alloc_list);
347 list_add(&fs_devices->list, &fs_uuids);
348 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
349 fs_devices->latest_devid = devid;
350 fs_devices->latest_trans = found_transid;
351 mutex_init(&fs_devices->device_list_mutex);
354 device = __find_device(&fs_devices->devices, devid,
355 disk_super->dev_item.uuid);
358 if (fs_devices->opened)
361 device = kzalloc(sizeof(*device), GFP_NOFS);
363 /* we can safely leave the fs_devices entry around */
366 device->devid = devid;
367 device->dev_stats_valid = 0;
368 device->work.func = pending_bios_fn;
369 memcpy(device->uuid, disk_super->dev_item.uuid,
371 spin_lock_init(&device->io_lock);
373 name = rcu_string_strdup(path, GFP_NOFS);
378 rcu_assign_pointer(device->name, name);
379 INIT_LIST_HEAD(&device->dev_alloc_list);
381 /* init readahead state */
382 spin_lock_init(&device->reada_lock);
383 device->reada_curr_zone = NULL;
384 atomic_set(&device->reada_in_flight, 0);
385 device->reada_next = 0;
386 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
387 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
389 mutex_lock(&fs_devices->device_list_mutex);
390 list_add_rcu(&device->dev_list, &fs_devices->devices);
391 mutex_unlock(&fs_devices->device_list_mutex);
393 device->fs_devices = fs_devices;
394 fs_devices->num_devices++;
395 } else if (!device->name || strcmp(device->name->str, path)) {
396 name = rcu_string_strdup(path, GFP_NOFS);
399 rcu_string_free(device->name);
400 rcu_assign_pointer(device->name, name);
401 if (device->missing) {
402 fs_devices->missing_devices--;
407 if (found_transid > fs_devices->latest_trans) {
408 fs_devices->latest_devid = devid;
409 fs_devices->latest_trans = found_transid;
411 *fs_devices_ret = fs_devices;
415 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
417 struct btrfs_fs_devices *fs_devices;
418 struct btrfs_device *device;
419 struct btrfs_device *orig_dev;
421 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
423 return ERR_PTR(-ENOMEM);
425 INIT_LIST_HEAD(&fs_devices->devices);
426 INIT_LIST_HEAD(&fs_devices->alloc_list);
427 INIT_LIST_HEAD(&fs_devices->list);
428 mutex_init(&fs_devices->device_list_mutex);
429 fs_devices->latest_devid = orig->latest_devid;
430 fs_devices->latest_trans = orig->latest_trans;
431 fs_devices->total_devices = orig->total_devices;
432 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
434 /* We have held the volume lock, it is safe to get the devices. */
435 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
436 struct rcu_string *name;
438 device = kzalloc(sizeof(*device), GFP_NOFS);
443 * This is ok to do without rcu read locked because we hold the
444 * uuid mutex so nothing we touch in here is going to disappear.
446 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
451 rcu_assign_pointer(device->name, name);
453 device->devid = orig_dev->devid;
454 device->work.func = pending_bios_fn;
455 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
456 spin_lock_init(&device->io_lock);
457 INIT_LIST_HEAD(&device->dev_list);
458 INIT_LIST_HEAD(&device->dev_alloc_list);
460 list_add(&device->dev_list, &fs_devices->devices);
461 device->fs_devices = fs_devices;
462 fs_devices->num_devices++;
466 free_fs_devices(fs_devices);
467 return ERR_PTR(-ENOMEM);
470 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
472 struct btrfs_device *device, *next;
474 struct block_device *latest_bdev = NULL;
475 u64 latest_devid = 0;
476 u64 latest_transid = 0;
478 mutex_lock(&uuid_mutex);
480 /* This is the initialized path, it is safe to release the devices. */
481 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
482 if (device->in_fs_metadata) {
483 if (!latest_transid ||
484 device->generation > latest_transid) {
485 latest_devid = device->devid;
486 latest_transid = device->generation;
487 latest_bdev = device->bdev;
493 blkdev_put(device->bdev, device->mode);
495 fs_devices->open_devices--;
497 if (device->writeable) {
498 list_del_init(&device->dev_alloc_list);
499 device->writeable = 0;
500 fs_devices->rw_devices--;
502 list_del_init(&device->dev_list);
503 fs_devices->num_devices--;
504 rcu_string_free(device->name);
508 if (fs_devices->seed) {
509 fs_devices = fs_devices->seed;
513 fs_devices->latest_bdev = latest_bdev;
514 fs_devices->latest_devid = latest_devid;
515 fs_devices->latest_trans = latest_transid;
517 mutex_unlock(&uuid_mutex);
520 static void __free_device(struct work_struct *work)
522 struct btrfs_device *device;
524 device = container_of(work, struct btrfs_device, rcu_work);
527 blkdev_put(device->bdev, device->mode);
529 rcu_string_free(device->name);
533 static void free_device(struct rcu_head *head)
535 struct btrfs_device *device;
537 device = container_of(head, struct btrfs_device, rcu);
539 INIT_WORK(&device->rcu_work, __free_device);
540 schedule_work(&device->rcu_work);
543 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
545 struct btrfs_device *device;
547 if (--fs_devices->opened > 0)
550 mutex_lock(&fs_devices->device_list_mutex);
551 list_for_each_entry(device, &fs_devices->devices, dev_list) {
552 struct btrfs_device *new_device;
553 struct rcu_string *name;
556 fs_devices->open_devices--;
558 if (device->writeable) {
559 list_del_init(&device->dev_alloc_list);
560 fs_devices->rw_devices--;
563 if (device->can_discard)
564 fs_devices->num_can_discard--;
566 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
567 BUG_ON(!new_device); /* -ENOMEM */
568 memcpy(new_device, device, sizeof(*new_device));
570 /* Safe because we are under uuid_mutex */
572 name = rcu_string_strdup(device->name->str, GFP_NOFS);
573 BUG_ON(device->name && !name); /* -ENOMEM */
574 rcu_assign_pointer(new_device->name, name);
576 new_device->bdev = NULL;
577 new_device->writeable = 0;
578 new_device->in_fs_metadata = 0;
579 new_device->can_discard = 0;
580 list_replace_rcu(&device->dev_list, &new_device->dev_list);
582 call_rcu(&device->rcu, free_device);
584 mutex_unlock(&fs_devices->device_list_mutex);
586 WARN_ON(fs_devices->open_devices);
587 WARN_ON(fs_devices->rw_devices);
588 fs_devices->opened = 0;
589 fs_devices->seeding = 0;
594 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
596 struct btrfs_fs_devices *seed_devices = NULL;
599 mutex_lock(&uuid_mutex);
600 ret = __btrfs_close_devices(fs_devices);
601 if (!fs_devices->opened) {
602 seed_devices = fs_devices->seed;
603 fs_devices->seed = NULL;
605 mutex_unlock(&uuid_mutex);
607 while (seed_devices) {
608 fs_devices = seed_devices;
609 seed_devices = fs_devices->seed;
610 __btrfs_close_devices(fs_devices);
611 free_fs_devices(fs_devices);
616 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
617 fmode_t flags, void *holder)
619 struct request_queue *q;
620 struct block_device *bdev;
621 struct list_head *head = &fs_devices->devices;
622 struct btrfs_device *device;
623 struct block_device *latest_bdev = NULL;
624 struct buffer_head *bh;
625 struct btrfs_super_block *disk_super;
626 u64 latest_devid = 0;
627 u64 latest_transid = 0;
634 list_for_each_entry(device, head, dev_list) {
640 bdev = blkdev_get_by_path(device->name->str, flags, holder);
642 printk(KERN_INFO "btrfs: open %s failed\n", device->name->str);
645 filemap_write_and_wait(bdev->bd_inode->i_mapping);
646 invalidate_bdev(bdev);
647 set_blocksize(bdev, 4096);
649 bh = btrfs_read_dev_super(bdev);
653 disk_super = (struct btrfs_super_block *)bh->b_data;
654 devid = btrfs_stack_device_id(&disk_super->dev_item);
655 if (devid != device->devid)
658 if (memcmp(device->uuid, disk_super->dev_item.uuid,
662 device->generation = btrfs_super_generation(disk_super);
663 if (!latest_transid || device->generation > latest_transid) {
664 latest_devid = devid;
665 latest_transid = device->generation;
669 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
670 device->writeable = 0;
672 device->writeable = !bdev_read_only(bdev);
676 q = bdev_get_queue(bdev);
677 if (blk_queue_discard(q)) {
678 device->can_discard = 1;
679 fs_devices->num_can_discard++;
683 device->in_fs_metadata = 0;
684 device->mode = flags;
686 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
687 fs_devices->rotating = 1;
689 fs_devices->open_devices++;
690 if (device->writeable) {
691 fs_devices->rw_devices++;
692 list_add(&device->dev_alloc_list,
693 &fs_devices->alloc_list);
701 blkdev_put(bdev, flags);
705 if (fs_devices->open_devices == 0) {
709 fs_devices->seeding = seeding;
710 fs_devices->opened = 1;
711 fs_devices->latest_bdev = latest_bdev;
712 fs_devices->latest_devid = latest_devid;
713 fs_devices->latest_trans = latest_transid;
714 fs_devices->total_rw_bytes = 0;
719 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
720 fmode_t flags, void *holder)
724 mutex_lock(&uuid_mutex);
725 if (fs_devices->opened) {
726 fs_devices->opened++;
729 ret = __btrfs_open_devices(fs_devices, flags, holder);
731 mutex_unlock(&uuid_mutex);
735 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
736 struct btrfs_fs_devices **fs_devices_ret)
738 struct btrfs_super_block *disk_super;
739 struct block_device *bdev;
740 struct buffer_head *bh;
747 bdev = blkdev_get_by_path(path, flags, holder);
754 mutex_lock(&uuid_mutex);
755 ret = set_blocksize(bdev, 4096);
758 bh = btrfs_read_dev_super(bdev);
763 disk_super = (struct btrfs_super_block *)bh->b_data;
764 devid = btrfs_stack_device_id(&disk_super->dev_item);
765 transid = btrfs_super_generation(disk_super);
766 total_devices = btrfs_super_num_devices(disk_super);
767 if (disk_super->label[0]) {
768 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
769 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
770 printk(KERN_INFO "device label %s ", disk_super->label);
772 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
774 printk(KERN_CONT "devid %llu transid %llu %s\n",
775 (unsigned long long)devid, (unsigned long long)transid, path);
776 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
777 if (!ret && fs_devices_ret)
778 (*fs_devices_ret)->total_devices = total_devices;
781 mutex_unlock(&uuid_mutex);
782 blkdev_put(bdev, flags);
787 /* helper to account the used device space in the range */
788 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
789 u64 end, u64 *length)
791 struct btrfs_key key;
792 struct btrfs_root *root = device->dev_root;
793 struct btrfs_dev_extent *dev_extent;
794 struct btrfs_path *path;
798 struct extent_buffer *l;
802 if (start >= device->total_bytes)
805 path = btrfs_alloc_path();
810 key.objectid = device->devid;
812 key.type = BTRFS_DEV_EXTENT_KEY;
814 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
818 ret = btrfs_previous_item(root, path, key.objectid, key.type);
825 slot = path->slots[0];
826 if (slot >= btrfs_header_nritems(l)) {
827 ret = btrfs_next_leaf(root, path);
835 btrfs_item_key_to_cpu(l, &key, slot);
837 if (key.objectid < device->devid)
840 if (key.objectid > device->devid)
843 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
846 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
847 extent_end = key.offset + btrfs_dev_extent_length(l,
849 if (key.offset <= start && extent_end > end) {
850 *length = end - start + 1;
852 } else if (key.offset <= start && extent_end > start)
853 *length += extent_end - start;
854 else if (key.offset > start && extent_end <= end)
855 *length += extent_end - key.offset;
856 else if (key.offset > start && key.offset <= end) {
857 *length += end - key.offset + 1;
859 } else if (key.offset > end)
867 btrfs_free_path(path);
872 * find_free_dev_extent - find free space in the specified device
873 * @device: the device which we search the free space in
874 * @num_bytes: the size of the free space that we need
875 * @start: store the start of the free space.
876 * @len: the size of the free space. that we find, or the size of the max
877 * free space if we don't find suitable free space
879 * this uses a pretty simple search, the expectation is that it is
880 * called very infrequently and that a given device has a small number
883 * @start is used to store the start of the free space if we find. But if we
884 * don't find suitable free space, it will be used to store the start position
885 * of the max free space.
887 * @len is used to store the size of the free space that we find.
888 * But if we don't find suitable free space, it is used to store the size of
889 * the max free space.
891 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
892 u64 *start, u64 *len)
894 struct btrfs_key key;
895 struct btrfs_root *root = device->dev_root;
896 struct btrfs_dev_extent *dev_extent;
897 struct btrfs_path *path;
903 u64 search_end = device->total_bytes;
906 struct extent_buffer *l;
908 /* FIXME use last free of some kind */
910 /* we don't want to overwrite the superblock on the drive,
911 * so we make sure to start at an offset of at least 1MB
913 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
915 max_hole_start = search_start;
919 if (search_start >= search_end) {
924 path = btrfs_alloc_path();
931 key.objectid = device->devid;
932 key.offset = search_start;
933 key.type = BTRFS_DEV_EXTENT_KEY;
935 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
939 ret = btrfs_previous_item(root, path, key.objectid, key.type);
946 slot = path->slots[0];
947 if (slot >= btrfs_header_nritems(l)) {
948 ret = btrfs_next_leaf(root, path);
956 btrfs_item_key_to_cpu(l, &key, slot);
958 if (key.objectid < device->devid)
961 if (key.objectid > device->devid)
964 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
967 if (key.offset > search_start) {
968 hole_size = key.offset - search_start;
970 if (hole_size > max_hole_size) {
971 max_hole_start = search_start;
972 max_hole_size = hole_size;
976 * If this free space is greater than which we need,
977 * it must be the max free space that we have found
978 * until now, so max_hole_start must point to the start
979 * of this free space and the length of this free space
980 * is stored in max_hole_size. Thus, we return
981 * max_hole_start and max_hole_size and go back to the
984 if (hole_size >= num_bytes) {
990 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
991 extent_end = key.offset + btrfs_dev_extent_length(l,
993 if (extent_end > search_start)
994 search_start = extent_end;
1001 * At this point, search_start should be the end of
1002 * allocated dev extents, and when shrinking the device,
1003 * search_end may be smaller than search_start.
1005 if (search_end > search_start)
1006 hole_size = search_end - search_start;
1008 if (hole_size > max_hole_size) {
1009 max_hole_start = search_start;
1010 max_hole_size = hole_size;
1014 if (hole_size < num_bytes)
1020 btrfs_free_path(path);
1022 *start = max_hole_start;
1024 *len = max_hole_size;
1028 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1029 struct btrfs_device *device,
1033 struct btrfs_path *path;
1034 struct btrfs_root *root = device->dev_root;
1035 struct btrfs_key key;
1036 struct btrfs_key found_key;
1037 struct extent_buffer *leaf = NULL;
1038 struct btrfs_dev_extent *extent = NULL;
1040 path = btrfs_alloc_path();
1044 key.objectid = device->devid;
1046 key.type = BTRFS_DEV_EXTENT_KEY;
1048 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1050 ret = btrfs_previous_item(root, path, key.objectid,
1051 BTRFS_DEV_EXTENT_KEY);
1054 leaf = path->nodes[0];
1055 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1056 extent = btrfs_item_ptr(leaf, path->slots[0],
1057 struct btrfs_dev_extent);
1058 BUG_ON(found_key.offset > start || found_key.offset +
1059 btrfs_dev_extent_length(leaf, extent) < start);
1061 btrfs_release_path(path);
1063 } else if (ret == 0) {
1064 leaf = path->nodes[0];
1065 extent = btrfs_item_ptr(leaf, path->slots[0],
1066 struct btrfs_dev_extent);
1068 btrfs_error(root->fs_info, ret, "Slot search failed");
1072 if (device->bytes_used > 0) {
1073 u64 len = btrfs_dev_extent_length(leaf, extent);
1074 device->bytes_used -= len;
1075 spin_lock(&root->fs_info->free_chunk_lock);
1076 root->fs_info->free_chunk_space += len;
1077 spin_unlock(&root->fs_info->free_chunk_lock);
1079 ret = btrfs_del_item(trans, root, path);
1081 btrfs_error(root->fs_info, ret,
1082 "Failed to remove dev extent item");
1085 btrfs_free_path(path);
1089 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1090 struct btrfs_device *device,
1091 u64 chunk_tree, u64 chunk_objectid,
1092 u64 chunk_offset, u64 start, u64 num_bytes)
1095 struct btrfs_path *path;
1096 struct btrfs_root *root = device->dev_root;
1097 struct btrfs_dev_extent *extent;
1098 struct extent_buffer *leaf;
1099 struct btrfs_key key;
1101 WARN_ON(!device->in_fs_metadata);
1102 path = btrfs_alloc_path();
1106 key.objectid = device->devid;
1108 key.type = BTRFS_DEV_EXTENT_KEY;
1109 ret = btrfs_insert_empty_item(trans, root, path, &key,
1114 leaf = path->nodes[0];
1115 extent = btrfs_item_ptr(leaf, path->slots[0],
1116 struct btrfs_dev_extent);
1117 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1118 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1119 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1121 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1122 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1125 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1126 btrfs_mark_buffer_dirty(leaf);
1128 btrfs_free_path(path);
1132 static noinline int find_next_chunk(struct btrfs_root *root,
1133 u64 objectid, u64 *offset)
1135 struct btrfs_path *path;
1137 struct btrfs_key key;
1138 struct btrfs_chunk *chunk;
1139 struct btrfs_key found_key;
1141 path = btrfs_alloc_path();
1145 key.objectid = objectid;
1146 key.offset = (u64)-1;
1147 key.type = BTRFS_CHUNK_ITEM_KEY;
1149 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1153 BUG_ON(ret == 0); /* Corruption */
1155 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1159 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1161 if (found_key.objectid != objectid)
1164 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1165 struct btrfs_chunk);
1166 *offset = found_key.offset +
1167 btrfs_chunk_length(path->nodes[0], chunk);
1172 btrfs_free_path(path);
1176 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1179 struct btrfs_key key;
1180 struct btrfs_key found_key;
1181 struct btrfs_path *path;
1183 root = root->fs_info->chunk_root;
1185 path = btrfs_alloc_path();
1189 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1190 key.type = BTRFS_DEV_ITEM_KEY;
1191 key.offset = (u64)-1;
1193 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1197 BUG_ON(ret == 0); /* Corruption */
1199 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1200 BTRFS_DEV_ITEM_KEY);
1204 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1206 *objectid = found_key.offset + 1;
1210 btrfs_free_path(path);
1215 * the device information is stored in the chunk root
1216 * the btrfs_device struct should be fully filled in
1218 int btrfs_add_device(struct btrfs_trans_handle *trans,
1219 struct btrfs_root *root,
1220 struct btrfs_device *device)
1223 struct btrfs_path *path;
1224 struct btrfs_dev_item *dev_item;
1225 struct extent_buffer *leaf;
1226 struct btrfs_key key;
1229 root = root->fs_info->chunk_root;
1231 path = btrfs_alloc_path();
1235 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1236 key.type = BTRFS_DEV_ITEM_KEY;
1237 key.offset = device->devid;
1239 ret = btrfs_insert_empty_item(trans, root, path, &key,
1244 leaf = path->nodes[0];
1245 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1247 btrfs_set_device_id(leaf, dev_item, device->devid);
1248 btrfs_set_device_generation(leaf, dev_item, 0);
1249 btrfs_set_device_type(leaf, dev_item, device->type);
1250 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1251 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1252 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1253 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1254 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1255 btrfs_set_device_group(leaf, dev_item, 0);
1256 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1257 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1258 btrfs_set_device_start_offset(leaf, dev_item, 0);
1260 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1261 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1262 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1263 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1264 btrfs_mark_buffer_dirty(leaf);
1268 btrfs_free_path(path);
1272 static int btrfs_rm_dev_item(struct btrfs_root *root,
1273 struct btrfs_device *device)
1276 struct btrfs_path *path;
1277 struct btrfs_key key;
1278 struct btrfs_trans_handle *trans;
1280 root = root->fs_info->chunk_root;
1282 path = btrfs_alloc_path();
1286 trans = btrfs_start_transaction(root, 0);
1287 if (IS_ERR(trans)) {
1288 btrfs_free_path(path);
1289 return PTR_ERR(trans);
1291 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1292 key.type = BTRFS_DEV_ITEM_KEY;
1293 key.offset = device->devid;
1296 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1305 ret = btrfs_del_item(trans, root, path);
1309 btrfs_free_path(path);
1310 unlock_chunks(root);
1311 btrfs_commit_transaction(trans, root);
1315 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1317 struct btrfs_device *device;
1318 struct btrfs_device *next_device;
1319 struct block_device *bdev;
1320 struct buffer_head *bh = NULL;
1321 struct btrfs_super_block *disk_super;
1322 struct btrfs_fs_devices *cur_devices;
1328 bool clear_super = false;
1330 mutex_lock(&uuid_mutex);
1332 all_avail = root->fs_info->avail_data_alloc_bits |
1333 root->fs_info->avail_system_alloc_bits |
1334 root->fs_info->avail_metadata_alloc_bits;
1336 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1337 root->fs_info->fs_devices->num_devices <= 4) {
1338 printk(KERN_ERR "btrfs: unable to go below four devices "
1344 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1345 root->fs_info->fs_devices->num_devices <= 2) {
1346 printk(KERN_ERR "btrfs: unable to go below two "
1347 "devices on raid1\n");
1352 if (strcmp(device_path, "missing") == 0) {
1353 struct list_head *devices;
1354 struct btrfs_device *tmp;
1357 devices = &root->fs_info->fs_devices->devices;
1359 * It is safe to read the devices since the volume_mutex
1362 list_for_each_entry(tmp, devices, dev_list) {
1363 if (tmp->in_fs_metadata && !tmp->bdev) {
1372 printk(KERN_ERR "btrfs: no missing devices found to "
1377 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1378 root->fs_info->bdev_holder);
1380 ret = PTR_ERR(bdev);
1384 set_blocksize(bdev, 4096);
1385 invalidate_bdev(bdev);
1386 bh = btrfs_read_dev_super(bdev);
1391 disk_super = (struct btrfs_super_block *)bh->b_data;
1392 devid = btrfs_stack_device_id(&disk_super->dev_item);
1393 dev_uuid = disk_super->dev_item.uuid;
1394 device = btrfs_find_device(root, devid, dev_uuid,
1402 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1403 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1409 if (device->writeable) {
1411 list_del_init(&device->dev_alloc_list);
1412 unlock_chunks(root);
1413 root->fs_info->fs_devices->rw_devices--;
1417 ret = btrfs_shrink_device(device, 0);
1421 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1425 spin_lock(&root->fs_info->free_chunk_lock);
1426 root->fs_info->free_chunk_space = device->total_bytes -
1428 spin_unlock(&root->fs_info->free_chunk_lock);
1430 device->in_fs_metadata = 0;
1431 btrfs_scrub_cancel_dev(root, device);
1434 * the device list mutex makes sure that we don't change
1435 * the device list while someone else is writing out all
1436 * the device supers.
1439 cur_devices = device->fs_devices;
1440 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1441 list_del_rcu(&device->dev_list);
1443 device->fs_devices->num_devices--;
1444 device->fs_devices->total_devices--;
1446 if (device->missing)
1447 root->fs_info->fs_devices->missing_devices--;
1449 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1450 struct btrfs_device, dev_list);
1451 if (device->bdev == root->fs_info->sb->s_bdev)
1452 root->fs_info->sb->s_bdev = next_device->bdev;
1453 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1454 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1457 device->fs_devices->open_devices--;
1459 call_rcu(&device->rcu, free_device);
1460 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1462 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1463 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1465 if (cur_devices->open_devices == 0) {
1466 struct btrfs_fs_devices *fs_devices;
1467 fs_devices = root->fs_info->fs_devices;
1468 while (fs_devices) {
1469 if (fs_devices->seed == cur_devices)
1471 fs_devices = fs_devices->seed;
1473 fs_devices->seed = cur_devices->seed;
1474 cur_devices->seed = NULL;
1476 __btrfs_close_devices(cur_devices);
1477 unlock_chunks(root);
1478 free_fs_devices(cur_devices);
1481 root->fs_info->num_tolerated_disk_barrier_failures =
1482 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1485 * at this point, the device is zero sized. We want to
1486 * remove it from the devices list and zero out the old super
1489 /* make sure this device isn't detected as part of
1492 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1493 set_buffer_dirty(bh);
1494 sync_dirty_buffer(bh);
1503 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1505 mutex_unlock(&uuid_mutex);
1508 if (device->writeable) {
1510 list_add(&device->dev_alloc_list,
1511 &root->fs_info->fs_devices->alloc_list);
1512 unlock_chunks(root);
1513 root->fs_info->fs_devices->rw_devices++;
1519 * does all the dirty work required for changing file system's UUID.
1521 static int btrfs_prepare_sprout(struct btrfs_root *root)
1523 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1524 struct btrfs_fs_devices *old_devices;
1525 struct btrfs_fs_devices *seed_devices;
1526 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1527 struct btrfs_device *device;
1530 BUG_ON(!mutex_is_locked(&uuid_mutex));
1531 if (!fs_devices->seeding)
1534 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1538 old_devices = clone_fs_devices(fs_devices);
1539 if (IS_ERR(old_devices)) {
1540 kfree(seed_devices);
1541 return PTR_ERR(old_devices);
1544 list_add(&old_devices->list, &fs_uuids);
1546 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1547 seed_devices->opened = 1;
1548 INIT_LIST_HEAD(&seed_devices->devices);
1549 INIT_LIST_HEAD(&seed_devices->alloc_list);
1550 mutex_init(&seed_devices->device_list_mutex);
1552 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1553 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1555 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1557 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1558 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1559 device->fs_devices = seed_devices;
1562 fs_devices->seeding = 0;
1563 fs_devices->num_devices = 0;
1564 fs_devices->open_devices = 0;
1565 fs_devices->total_devices = 0;
1566 fs_devices->seed = seed_devices;
1568 generate_random_uuid(fs_devices->fsid);
1569 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1570 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1571 super_flags = btrfs_super_flags(disk_super) &
1572 ~BTRFS_SUPER_FLAG_SEEDING;
1573 btrfs_set_super_flags(disk_super, super_flags);
1579 * strore the expected generation for seed devices in device items.
1581 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1582 struct btrfs_root *root)
1584 struct btrfs_path *path;
1585 struct extent_buffer *leaf;
1586 struct btrfs_dev_item *dev_item;
1587 struct btrfs_device *device;
1588 struct btrfs_key key;
1589 u8 fs_uuid[BTRFS_UUID_SIZE];
1590 u8 dev_uuid[BTRFS_UUID_SIZE];
1594 path = btrfs_alloc_path();
1598 root = root->fs_info->chunk_root;
1599 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1601 key.type = BTRFS_DEV_ITEM_KEY;
1604 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1608 leaf = path->nodes[0];
1610 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1611 ret = btrfs_next_leaf(root, path);
1616 leaf = path->nodes[0];
1617 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1618 btrfs_release_path(path);
1622 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1623 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1624 key.type != BTRFS_DEV_ITEM_KEY)
1627 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1628 struct btrfs_dev_item);
1629 devid = btrfs_device_id(leaf, dev_item);
1630 read_extent_buffer(leaf, dev_uuid,
1631 (unsigned long)btrfs_device_uuid(dev_item),
1633 read_extent_buffer(leaf, fs_uuid,
1634 (unsigned long)btrfs_device_fsid(dev_item),
1636 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1637 BUG_ON(!device); /* Logic error */
1639 if (device->fs_devices->seeding) {
1640 btrfs_set_device_generation(leaf, dev_item,
1641 device->generation);
1642 btrfs_mark_buffer_dirty(leaf);
1650 btrfs_free_path(path);
1654 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1656 struct request_queue *q;
1657 struct btrfs_trans_handle *trans;
1658 struct btrfs_device *device;
1659 struct block_device *bdev;
1660 struct list_head *devices;
1661 struct super_block *sb = root->fs_info->sb;
1662 struct rcu_string *name;
1664 int seeding_dev = 0;
1667 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1670 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1671 root->fs_info->bdev_holder);
1673 return PTR_ERR(bdev);
1675 if (root->fs_info->fs_devices->seeding) {
1677 down_write(&sb->s_umount);
1678 mutex_lock(&uuid_mutex);
1681 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1683 devices = &root->fs_info->fs_devices->devices;
1685 * we have the volume lock, so we don't need the extra
1686 * device list mutex while reading the list here.
1688 list_for_each_entry(device, devices, dev_list) {
1689 if (device->bdev == bdev) {
1695 device = kzalloc(sizeof(*device), GFP_NOFS);
1697 /* we can safely leave the fs_devices entry around */
1702 name = rcu_string_strdup(device_path, GFP_NOFS);
1708 rcu_assign_pointer(device->name, name);
1710 ret = find_next_devid(root, &device->devid);
1712 rcu_string_free(device->name);
1717 trans = btrfs_start_transaction(root, 0);
1718 if (IS_ERR(trans)) {
1719 rcu_string_free(device->name);
1721 ret = PTR_ERR(trans);
1727 q = bdev_get_queue(bdev);
1728 if (blk_queue_discard(q))
1729 device->can_discard = 1;
1730 device->writeable = 1;
1731 device->work.func = pending_bios_fn;
1732 generate_random_uuid(device->uuid);
1733 spin_lock_init(&device->io_lock);
1734 device->generation = trans->transid;
1735 device->io_width = root->sectorsize;
1736 device->io_align = root->sectorsize;
1737 device->sector_size = root->sectorsize;
1738 device->total_bytes = i_size_read(bdev->bd_inode);
1739 device->disk_total_bytes = device->total_bytes;
1740 device->dev_root = root->fs_info->dev_root;
1741 device->bdev = bdev;
1742 device->in_fs_metadata = 1;
1743 device->mode = FMODE_EXCL;
1744 set_blocksize(device->bdev, 4096);
1747 sb->s_flags &= ~MS_RDONLY;
1748 ret = btrfs_prepare_sprout(root);
1749 BUG_ON(ret); /* -ENOMEM */
1752 device->fs_devices = root->fs_info->fs_devices;
1754 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1755 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1756 list_add(&device->dev_alloc_list,
1757 &root->fs_info->fs_devices->alloc_list);
1758 root->fs_info->fs_devices->num_devices++;
1759 root->fs_info->fs_devices->open_devices++;
1760 root->fs_info->fs_devices->rw_devices++;
1761 root->fs_info->fs_devices->total_devices++;
1762 if (device->can_discard)
1763 root->fs_info->fs_devices->num_can_discard++;
1764 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1766 spin_lock(&root->fs_info->free_chunk_lock);
1767 root->fs_info->free_chunk_space += device->total_bytes;
1768 spin_unlock(&root->fs_info->free_chunk_lock);
1770 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1771 root->fs_info->fs_devices->rotating = 1;
1773 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1774 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1775 total_bytes + device->total_bytes);
1777 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1778 btrfs_set_super_num_devices(root->fs_info->super_copy,
1780 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1783 ret = init_first_rw_device(trans, root, device);
1785 btrfs_abort_transaction(trans, root, ret);
1788 ret = btrfs_finish_sprout(trans, root);
1790 btrfs_abort_transaction(trans, root, ret);
1794 ret = btrfs_add_device(trans, root, device);
1796 btrfs_abort_transaction(trans, root, ret);
1802 * we've got more storage, clear any full flags on the space
1805 btrfs_clear_space_info_full(root->fs_info);
1807 unlock_chunks(root);
1808 root->fs_info->num_tolerated_disk_barrier_failures =
1809 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1810 ret = btrfs_commit_transaction(trans, root);
1813 mutex_unlock(&uuid_mutex);
1814 up_write(&sb->s_umount);
1816 if (ret) /* transaction commit */
1819 ret = btrfs_relocate_sys_chunks(root);
1821 btrfs_error(root->fs_info, ret,
1822 "Failed to relocate sys chunks after "
1823 "device initialization. This can be fixed "
1824 "using the \"btrfs balance\" command.");
1825 trans = btrfs_attach_transaction(root);
1826 if (IS_ERR(trans)) {
1827 if (PTR_ERR(trans) == -ENOENT)
1829 return PTR_ERR(trans);
1831 ret = btrfs_commit_transaction(trans, root);
1837 unlock_chunks(root);
1838 btrfs_end_transaction(trans, root);
1839 rcu_string_free(device->name);
1842 blkdev_put(bdev, FMODE_EXCL);
1844 mutex_unlock(&uuid_mutex);
1845 up_write(&sb->s_umount);
1850 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1851 struct btrfs_device *device)
1854 struct btrfs_path *path;
1855 struct btrfs_root *root;
1856 struct btrfs_dev_item *dev_item;
1857 struct extent_buffer *leaf;
1858 struct btrfs_key key;
1860 root = device->dev_root->fs_info->chunk_root;
1862 path = btrfs_alloc_path();
1866 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1867 key.type = BTRFS_DEV_ITEM_KEY;
1868 key.offset = device->devid;
1870 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1879 leaf = path->nodes[0];
1880 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1882 btrfs_set_device_id(leaf, dev_item, device->devid);
1883 btrfs_set_device_type(leaf, dev_item, device->type);
1884 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1885 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1886 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1887 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1888 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1889 btrfs_mark_buffer_dirty(leaf);
1892 btrfs_free_path(path);
1896 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1897 struct btrfs_device *device, u64 new_size)
1899 struct btrfs_super_block *super_copy =
1900 device->dev_root->fs_info->super_copy;
1901 u64 old_total = btrfs_super_total_bytes(super_copy);
1902 u64 diff = new_size - device->total_bytes;
1904 if (!device->writeable)
1906 if (new_size <= device->total_bytes)
1909 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1910 device->fs_devices->total_rw_bytes += diff;
1912 device->total_bytes = new_size;
1913 device->disk_total_bytes = new_size;
1914 btrfs_clear_space_info_full(device->dev_root->fs_info);
1916 return btrfs_update_device(trans, device);
1919 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1920 struct btrfs_device *device, u64 new_size)
1923 lock_chunks(device->dev_root);
1924 ret = __btrfs_grow_device(trans, device, new_size);
1925 unlock_chunks(device->dev_root);
1929 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1930 struct btrfs_root *root,
1931 u64 chunk_tree, u64 chunk_objectid,
1935 struct btrfs_path *path;
1936 struct btrfs_key key;
1938 root = root->fs_info->chunk_root;
1939 path = btrfs_alloc_path();
1943 key.objectid = chunk_objectid;
1944 key.offset = chunk_offset;
1945 key.type = BTRFS_CHUNK_ITEM_KEY;
1947 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1950 else if (ret > 0) { /* Logic error or corruption */
1951 btrfs_error(root->fs_info, -ENOENT,
1952 "Failed lookup while freeing chunk.");
1957 ret = btrfs_del_item(trans, root, path);
1959 btrfs_error(root->fs_info, ret,
1960 "Failed to delete chunk item.");
1962 btrfs_free_path(path);
1966 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1969 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1970 struct btrfs_disk_key *disk_key;
1971 struct btrfs_chunk *chunk;
1978 struct btrfs_key key;
1980 array_size = btrfs_super_sys_array_size(super_copy);
1982 ptr = super_copy->sys_chunk_array;
1985 while (cur < array_size) {
1986 disk_key = (struct btrfs_disk_key *)ptr;
1987 btrfs_disk_key_to_cpu(&key, disk_key);
1989 len = sizeof(*disk_key);
1991 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1992 chunk = (struct btrfs_chunk *)(ptr + len);
1993 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1994 len += btrfs_chunk_item_size(num_stripes);
1999 if (key.objectid == chunk_objectid &&
2000 key.offset == chunk_offset) {
2001 memmove(ptr, ptr + len, array_size - (cur + len));
2003 btrfs_set_super_sys_array_size(super_copy, array_size);
2012 static int btrfs_relocate_chunk(struct btrfs_root *root,
2013 u64 chunk_tree, u64 chunk_objectid,
2016 struct extent_map_tree *em_tree;
2017 struct btrfs_root *extent_root;
2018 struct btrfs_trans_handle *trans;
2019 struct extent_map *em;
2020 struct map_lookup *map;
2024 root = root->fs_info->chunk_root;
2025 extent_root = root->fs_info->extent_root;
2026 em_tree = &root->fs_info->mapping_tree.map_tree;
2028 ret = btrfs_can_relocate(extent_root, chunk_offset);
2032 /* step one, relocate all the extents inside this chunk */
2033 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2037 trans = btrfs_start_transaction(root, 0);
2038 BUG_ON(IS_ERR(trans));
2043 * step two, delete the device extents and the
2044 * chunk tree entries
2046 read_lock(&em_tree->lock);
2047 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2048 read_unlock(&em_tree->lock);
2050 BUG_ON(!em || em->start > chunk_offset ||
2051 em->start + em->len < chunk_offset);
2052 map = (struct map_lookup *)em->bdev;
2054 for (i = 0; i < map->num_stripes; i++) {
2055 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2056 map->stripes[i].physical);
2059 if (map->stripes[i].dev) {
2060 ret = btrfs_update_device(trans, map->stripes[i].dev);
2064 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2069 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2071 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2072 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2076 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2079 write_lock(&em_tree->lock);
2080 remove_extent_mapping(em_tree, em);
2081 write_unlock(&em_tree->lock);
2086 /* once for the tree */
2087 free_extent_map(em);
2089 free_extent_map(em);
2091 unlock_chunks(root);
2092 btrfs_end_transaction(trans, root);
2096 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2098 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2099 struct btrfs_path *path;
2100 struct extent_buffer *leaf;
2101 struct btrfs_chunk *chunk;
2102 struct btrfs_key key;
2103 struct btrfs_key found_key;
2104 u64 chunk_tree = chunk_root->root_key.objectid;
2106 bool retried = false;
2110 path = btrfs_alloc_path();
2115 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2116 key.offset = (u64)-1;
2117 key.type = BTRFS_CHUNK_ITEM_KEY;
2120 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2123 BUG_ON(ret == 0); /* Corruption */
2125 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2132 leaf = path->nodes[0];
2133 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2135 chunk = btrfs_item_ptr(leaf, path->slots[0],
2136 struct btrfs_chunk);
2137 chunk_type = btrfs_chunk_type(leaf, chunk);
2138 btrfs_release_path(path);
2140 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2141 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2150 if (found_key.offset == 0)
2152 key.offset = found_key.offset - 1;
2155 if (failed && !retried) {
2159 } else if (failed && retried) {
2164 btrfs_free_path(path);
2168 static int insert_balance_item(struct btrfs_root *root,
2169 struct btrfs_balance_control *bctl)
2171 struct btrfs_trans_handle *trans;
2172 struct btrfs_balance_item *item;
2173 struct btrfs_disk_balance_args disk_bargs;
2174 struct btrfs_path *path;
2175 struct extent_buffer *leaf;
2176 struct btrfs_key key;
2179 path = btrfs_alloc_path();
2183 trans = btrfs_start_transaction(root, 0);
2184 if (IS_ERR(trans)) {
2185 btrfs_free_path(path);
2186 return PTR_ERR(trans);
2189 key.objectid = BTRFS_BALANCE_OBJECTID;
2190 key.type = BTRFS_BALANCE_ITEM_KEY;
2193 ret = btrfs_insert_empty_item(trans, root, path, &key,
2198 leaf = path->nodes[0];
2199 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2201 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2203 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2204 btrfs_set_balance_data(leaf, item, &disk_bargs);
2205 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2206 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2207 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2208 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2210 btrfs_set_balance_flags(leaf, item, bctl->flags);
2212 btrfs_mark_buffer_dirty(leaf);
2214 btrfs_free_path(path);
2215 err = btrfs_commit_transaction(trans, root);
2221 static int del_balance_item(struct btrfs_root *root)
2223 struct btrfs_trans_handle *trans;
2224 struct btrfs_path *path;
2225 struct btrfs_key key;
2228 path = btrfs_alloc_path();
2232 trans = btrfs_start_transaction(root, 0);
2233 if (IS_ERR(trans)) {
2234 btrfs_free_path(path);
2235 return PTR_ERR(trans);
2238 key.objectid = BTRFS_BALANCE_OBJECTID;
2239 key.type = BTRFS_BALANCE_ITEM_KEY;
2242 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2250 ret = btrfs_del_item(trans, root, path);
2252 btrfs_free_path(path);
2253 err = btrfs_commit_transaction(trans, root);
2260 * This is a heuristic used to reduce the number of chunks balanced on
2261 * resume after balance was interrupted.
2263 static void update_balance_args(struct btrfs_balance_control *bctl)
2266 * Turn on soft mode for chunk types that were being converted.
2268 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2269 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2270 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2271 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2272 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2273 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2276 * Turn on usage filter if is not already used. The idea is
2277 * that chunks that we have already balanced should be
2278 * reasonably full. Don't do it for chunks that are being
2279 * converted - that will keep us from relocating unconverted
2280 * (albeit full) chunks.
2282 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2283 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2284 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2285 bctl->data.usage = 90;
2287 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2288 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2289 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2290 bctl->sys.usage = 90;
2292 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2293 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2294 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2295 bctl->meta.usage = 90;
2300 * Should be called with both balance and volume mutexes held to
2301 * serialize other volume operations (add_dev/rm_dev/resize) with
2302 * restriper. Same goes for unset_balance_control.
2304 static void set_balance_control(struct btrfs_balance_control *bctl)
2306 struct btrfs_fs_info *fs_info = bctl->fs_info;
2308 BUG_ON(fs_info->balance_ctl);
2310 spin_lock(&fs_info->balance_lock);
2311 fs_info->balance_ctl = bctl;
2312 spin_unlock(&fs_info->balance_lock);
2315 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2317 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2319 BUG_ON(!fs_info->balance_ctl);
2321 spin_lock(&fs_info->balance_lock);
2322 fs_info->balance_ctl = NULL;
2323 spin_unlock(&fs_info->balance_lock);
2329 * Balance filters. Return 1 if chunk should be filtered out
2330 * (should not be balanced).
2332 static int chunk_profiles_filter(u64 chunk_type,
2333 struct btrfs_balance_args *bargs)
2335 chunk_type = chunk_to_extended(chunk_type) &
2336 BTRFS_EXTENDED_PROFILE_MASK;
2338 if (bargs->profiles & chunk_type)
2344 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2345 struct btrfs_balance_args *bargs)
2347 struct btrfs_block_group_cache *cache;
2348 u64 chunk_used, user_thresh;
2351 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2352 chunk_used = btrfs_block_group_used(&cache->item);
2354 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2355 if (chunk_used < user_thresh)
2358 btrfs_put_block_group(cache);
2362 static int chunk_devid_filter(struct extent_buffer *leaf,
2363 struct btrfs_chunk *chunk,
2364 struct btrfs_balance_args *bargs)
2366 struct btrfs_stripe *stripe;
2367 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2370 for (i = 0; i < num_stripes; i++) {
2371 stripe = btrfs_stripe_nr(chunk, i);
2372 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2379 /* [pstart, pend) */
2380 static int chunk_drange_filter(struct extent_buffer *leaf,
2381 struct btrfs_chunk *chunk,
2383 struct btrfs_balance_args *bargs)
2385 struct btrfs_stripe *stripe;
2386 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2392 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2395 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2396 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2400 factor = num_stripes / factor;
2402 for (i = 0; i < num_stripes; i++) {
2403 stripe = btrfs_stripe_nr(chunk, i);
2404 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2407 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2408 stripe_length = btrfs_chunk_length(leaf, chunk);
2409 do_div(stripe_length, factor);
2411 if (stripe_offset < bargs->pend &&
2412 stripe_offset + stripe_length > bargs->pstart)
2419 /* [vstart, vend) */
2420 static int chunk_vrange_filter(struct extent_buffer *leaf,
2421 struct btrfs_chunk *chunk,
2423 struct btrfs_balance_args *bargs)
2425 if (chunk_offset < bargs->vend &&
2426 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2427 /* at least part of the chunk is inside this vrange */
2433 static int chunk_soft_convert_filter(u64 chunk_type,
2434 struct btrfs_balance_args *bargs)
2436 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2439 chunk_type = chunk_to_extended(chunk_type) &
2440 BTRFS_EXTENDED_PROFILE_MASK;
2442 if (bargs->target == chunk_type)
2448 static int should_balance_chunk(struct btrfs_root *root,
2449 struct extent_buffer *leaf,
2450 struct btrfs_chunk *chunk, u64 chunk_offset)
2452 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2453 struct btrfs_balance_args *bargs = NULL;
2454 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2457 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2458 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2462 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2463 bargs = &bctl->data;
2464 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2466 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2467 bargs = &bctl->meta;
2469 /* profiles filter */
2470 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2471 chunk_profiles_filter(chunk_type, bargs)) {
2476 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2477 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2482 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2483 chunk_devid_filter(leaf, chunk, bargs)) {
2487 /* drange filter, makes sense only with devid filter */
2488 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2489 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2494 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2495 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2499 /* soft profile changing mode */
2500 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2501 chunk_soft_convert_filter(chunk_type, bargs)) {
2508 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2510 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2511 struct btrfs_root *chunk_root = fs_info->chunk_root;
2512 struct btrfs_root *dev_root = fs_info->dev_root;
2513 struct list_head *devices;
2514 struct btrfs_device *device;
2517 struct btrfs_chunk *chunk;
2518 struct btrfs_path *path;
2519 struct btrfs_key key;
2520 struct btrfs_key found_key;
2521 struct btrfs_trans_handle *trans;
2522 struct extent_buffer *leaf;
2525 int enospc_errors = 0;
2526 bool counting = true;
2528 /* step one make some room on all the devices */
2529 devices = &fs_info->fs_devices->devices;
2530 list_for_each_entry(device, devices, dev_list) {
2531 old_size = device->total_bytes;
2532 size_to_free = div_factor(old_size, 1);
2533 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2534 if (!device->writeable ||
2535 device->total_bytes - device->bytes_used > size_to_free)
2538 ret = btrfs_shrink_device(device, old_size - size_to_free);
2543 trans = btrfs_start_transaction(dev_root, 0);
2544 BUG_ON(IS_ERR(trans));
2546 ret = btrfs_grow_device(trans, device, old_size);
2549 btrfs_end_transaction(trans, dev_root);
2552 /* step two, relocate all the chunks */
2553 path = btrfs_alloc_path();
2559 /* zero out stat counters */
2560 spin_lock(&fs_info->balance_lock);
2561 memset(&bctl->stat, 0, sizeof(bctl->stat));
2562 spin_unlock(&fs_info->balance_lock);
2564 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2565 key.offset = (u64)-1;
2566 key.type = BTRFS_CHUNK_ITEM_KEY;
2569 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2570 atomic_read(&fs_info->balance_cancel_req)) {
2575 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2580 * this shouldn't happen, it means the last relocate
2584 BUG(); /* FIXME break ? */
2586 ret = btrfs_previous_item(chunk_root, path, 0,
2587 BTRFS_CHUNK_ITEM_KEY);
2593 leaf = path->nodes[0];
2594 slot = path->slots[0];
2595 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2597 if (found_key.objectid != key.objectid)
2600 /* chunk zero is special */
2601 if (found_key.offset == 0)
2604 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2607 spin_lock(&fs_info->balance_lock);
2608 bctl->stat.considered++;
2609 spin_unlock(&fs_info->balance_lock);
2612 ret = should_balance_chunk(chunk_root, leaf, chunk,
2614 btrfs_release_path(path);
2619 spin_lock(&fs_info->balance_lock);
2620 bctl->stat.expected++;
2621 spin_unlock(&fs_info->balance_lock);
2625 ret = btrfs_relocate_chunk(chunk_root,
2626 chunk_root->root_key.objectid,
2629 if (ret && ret != -ENOSPC)
2631 if (ret == -ENOSPC) {
2634 spin_lock(&fs_info->balance_lock);
2635 bctl->stat.completed++;
2636 spin_unlock(&fs_info->balance_lock);
2639 key.offset = found_key.offset - 1;
2643 btrfs_release_path(path);
2648 btrfs_free_path(path);
2649 if (enospc_errors) {
2650 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2660 * alloc_profile_is_valid - see if a given profile is valid and reduced
2661 * @flags: profile to validate
2662 * @extended: if true @flags is treated as an extended profile
2664 static int alloc_profile_is_valid(u64 flags, int extended)
2666 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2667 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2669 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2671 /* 1) check that all other bits are zeroed */
2675 /* 2) see if profile is reduced */
2677 return !extended; /* "0" is valid for usual profiles */
2679 /* true if exactly one bit set */
2680 return (flags & (flags - 1)) == 0;
2683 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2685 /* cancel requested || normal exit path */
2686 return atomic_read(&fs_info->balance_cancel_req) ||
2687 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2688 atomic_read(&fs_info->balance_cancel_req) == 0);
2691 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2695 unset_balance_control(fs_info);
2696 ret = del_balance_item(fs_info->tree_root);
2700 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2701 struct btrfs_ioctl_balance_args *bargs);
2704 * Should be called with both balance and volume mutexes held
2706 int btrfs_balance(struct btrfs_balance_control *bctl,
2707 struct btrfs_ioctl_balance_args *bargs)
2709 struct btrfs_fs_info *fs_info = bctl->fs_info;
2714 if (btrfs_fs_closing(fs_info) ||
2715 atomic_read(&fs_info->balance_pause_req) ||
2716 atomic_read(&fs_info->balance_cancel_req)) {
2721 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2722 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2726 * In case of mixed groups both data and meta should be picked,
2727 * and identical options should be given for both of them.
2729 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2730 if (mixed && (bctl->flags & allowed)) {
2731 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2732 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2733 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2734 printk(KERN_ERR "btrfs: with mixed groups data and "
2735 "metadata balance options must be the same\n");
2741 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2742 if (fs_info->fs_devices->num_devices == 1)
2743 allowed |= BTRFS_BLOCK_GROUP_DUP;
2744 else if (fs_info->fs_devices->num_devices < 4)
2745 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2747 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2748 BTRFS_BLOCK_GROUP_RAID10);
2750 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2751 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2752 (bctl->data.target & ~allowed))) {
2753 printk(KERN_ERR "btrfs: unable to start balance with target "
2754 "data profile %llu\n",
2755 (unsigned long long)bctl->data.target);
2759 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2760 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2761 (bctl->meta.target & ~allowed))) {
2762 printk(KERN_ERR "btrfs: unable to start balance with target "
2763 "metadata profile %llu\n",
2764 (unsigned long long)bctl->meta.target);
2768 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2769 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2770 (bctl->sys.target & ~allowed))) {
2771 printk(KERN_ERR "btrfs: unable to start balance with target "
2772 "system profile %llu\n",
2773 (unsigned long long)bctl->sys.target);
2778 /* allow dup'ed data chunks only in mixed mode */
2779 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2780 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2781 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2786 /* allow to reduce meta or sys integrity only if force set */
2787 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2788 BTRFS_BLOCK_GROUP_RAID10;
2789 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2790 (fs_info->avail_system_alloc_bits & allowed) &&
2791 !(bctl->sys.target & allowed)) ||
2792 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2793 (fs_info->avail_metadata_alloc_bits & allowed) &&
2794 !(bctl->meta.target & allowed))) {
2795 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2796 printk(KERN_INFO "btrfs: force reducing metadata "
2799 printk(KERN_ERR "btrfs: balance will reduce metadata "
2800 "integrity, use force if you want this\n");
2806 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
2807 int num_tolerated_disk_barrier_failures;
2808 u64 target = bctl->sys.target;
2810 num_tolerated_disk_barrier_failures =
2811 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2812 if (num_tolerated_disk_barrier_failures > 0 &&
2814 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
2815 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
2816 num_tolerated_disk_barrier_failures = 0;
2817 else if (num_tolerated_disk_barrier_failures > 1 &&
2819 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
2820 num_tolerated_disk_barrier_failures = 1;
2822 fs_info->num_tolerated_disk_barrier_failures =
2823 num_tolerated_disk_barrier_failures;
2826 ret = insert_balance_item(fs_info->tree_root, bctl);
2827 if (ret && ret != -EEXIST)
2830 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2831 BUG_ON(ret == -EEXIST);
2832 set_balance_control(bctl);
2834 BUG_ON(ret != -EEXIST);
2835 spin_lock(&fs_info->balance_lock);
2836 update_balance_args(bctl);
2837 spin_unlock(&fs_info->balance_lock);
2840 atomic_inc(&fs_info->balance_running);
2841 mutex_unlock(&fs_info->balance_mutex);
2843 ret = __btrfs_balance(fs_info);
2845 mutex_lock(&fs_info->balance_mutex);
2846 atomic_dec(&fs_info->balance_running);
2849 memset(bargs, 0, sizeof(*bargs));
2850 update_ioctl_balance_args(fs_info, 0, bargs);
2853 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2854 balance_need_close(fs_info)) {
2855 __cancel_balance(fs_info);
2858 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
2859 fs_info->num_tolerated_disk_barrier_failures =
2860 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2863 wake_up(&fs_info->balance_wait_q);
2867 if (bctl->flags & BTRFS_BALANCE_RESUME)
2868 __cancel_balance(fs_info);
2874 static int balance_kthread(void *data)
2876 struct btrfs_fs_info *fs_info = data;
2879 mutex_lock(&fs_info->volume_mutex);
2880 mutex_lock(&fs_info->balance_mutex);
2882 if (fs_info->balance_ctl) {
2883 printk(KERN_INFO "btrfs: continuing balance\n");
2884 ret = btrfs_balance(fs_info->balance_ctl, NULL);
2887 mutex_unlock(&fs_info->balance_mutex);
2888 mutex_unlock(&fs_info->volume_mutex);
2893 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
2895 struct task_struct *tsk;
2897 spin_lock(&fs_info->balance_lock);
2898 if (!fs_info->balance_ctl) {
2899 spin_unlock(&fs_info->balance_lock);
2902 spin_unlock(&fs_info->balance_lock);
2904 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2905 printk(KERN_INFO "btrfs: force skipping balance\n");
2909 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
2911 return PTR_ERR(tsk);
2916 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
2918 struct btrfs_balance_control *bctl;
2919 struct btrfs_balance_item *item;
2920 struct btrfs_disk_balance_args disk_bargs;
2921 struct btrfs_path *path;
2922 struct extent_buffer *leaf;
2923 struct btrfs_key key;
2926 path = btrfs_alloc_path();
2930 key.objectid = BTRFS_BALANCE_OBJECTID;
2931 key.type = BTRFS_BALANCE_ITEM_KEY;
2934 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2937 if (ret > 0) { /* ret = -ENOENT; */
2942 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2948 leaf = path->nodes[0];
2949 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2951 bctl->fs_info = fs_info;
2952 bctl->flags = btrfs_balance_flags(leaf, item);
2953 bctl->flags |= BTRFS_BALANCE_RESUME;
2955 btrfs_balance_data(leaf, item, &disk_bargs);
2956 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2957 btrfs_balance_meta(leaf, item, &disk_bargs);
2958 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2959 btrfs_balance_sys(leaf, item, &disk_bargs);
2960 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2962 mutex_lock(&fs_info->volume_mutex);
2963 mutex_lock(&fs_info->balance_mutex);
2965 set_balance_control(bctl);
2967 mutex_unlock(&fs_info->balance_mutex);
2968 mutex_unlock(&fs_info->volume_mutex);
2970 btrfs_free_path(path);
2974 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2978 mutex_lock(&fs_info->balance_mutex);
2979 if (!fs_info->balance_ctl) {
2980 mutex_unlock(&fs_info->balance_mutex);
2984 if (atomic_read(&fs_info->balance_running)) {
2985 atomic_inc(&fs_info->balance_pause_req);
2986 mutex_unlock(&fs_info->balance_mutex);
2988 wait_event(fs_info->balance_wait_q,
2989 atomic_read(&fs_info->balance_running) == 0);
2991 mutex_lock(&fs_info->balance_mutex);
2992 /* we are good with balance_ctl ripped off from under us */
2993 BUG_ON(atomic_read(&fs_info->balance_running));
2994 atomic_dec(&fs_info->balance_pause_req);
2999 mutex_unlock(&fs_info->balance_mutex);
3003 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3005 mutex_lock(&fs_info->balance_mutex);
3006 if (!fs_info->balance_ctl) {
3007 mutex_unlock(&fs_info->balance_mutex);
3011 atomic_inc(&fs_info->balance_cancel_req);
3013 * if we are running just wait and return, balance item is
3014 * deleted in btrfs_balance in this case
3016 if (atomic_read(&fs_info->balance_running)) {
3017 mutex_unlock(&fs_info->balance_mutex);
3018 wait_event(fs_info->balance_wait_q,
3019 atomic_read(&fs_info->balance_running) == 0);
3020 mutex_lock(&fs_info->balance_mutex);
3022 /* __cancel_balance needs volume_mutex */
3023 mutex_unlock(&fs_info->balance_mutex);
3024 mutex_lock(&fs_info->volume_mutex);
3025 mutex_lock(&fs_info->balance_mutex);
3027 if (fs_info->balance_ctl)
3028 __cancel_balance(fs_info);
3030 mutex_unlock(&fs_info->volume_mutex);
3033 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3034 atomic_dec(&fs_info->balance_cancel_req);
3035 mutex_unlock(&fs_info->balance_mutex);
3040 * shrinking a device means finding all of the device extents past
3041 * the new size, and then following the back refs to the chunks.
3042 * The chunk relocation code actually frees the device extent
3044 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3046 struct btrfs_trans_handle *trans;
3047 struct btrfs_root *root = device->dev_root;
3048 struct btrfs_dev_extent *dev_extent = NULL;
3049 struct btrfs_path *path;
3057 bool retried = false;
3058 struct extent_buffer *l;
3059 struct btrfs_key key;
3060 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3061 u64 old_total = btrfs_super_total_bytes(super_copy);
3062 u64 old_size = device->total_bytes;
3063 u64 diff = device->total_bytes - new_size;
3065 path = btrfs_alloc_path();
3073 device->total_bytes = new_size;
3074 if (device->writeable) {
3075 device->fs_devices->total_rw_bytes -= diff;
3076 spin_lock(&root->fs_info->free_chunk_lock);
3077 root->fs_info->free_chunk_space -= diff;
3078 spin_unlock(&root->fs_info->free_chunk_lock);
3080 unlock_chunks(root);
3083 key.objectid = device->devid;
3084 key.offset = (u64)-1;
3085 key.type = BTRFS_DEV_EXTENT_KEY;
3088 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3092 ret = btrfs_previous_item(root, path, 0, key.type);
3097 btrfs_release_path(path);
3102 slot = path->slots[0];
3103 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3105 if (key.objectid != device->devid) {
3106 btrfs_release_path(path);
3110 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3111 length = btrfs_dev_extent_length(l, dev_extent);
3113 if (key.offset + length <= new_size) {
3114 btrfs_release_path(path);
3118 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3119 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3120 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3121 btrfs_release_path(path);
3123 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3125 if (ret && ret != -ENOSPC)
3129 } while (key.offset-- > 0);
3131 if (failed && !retried) {
3135 } else if (failed && retried) {
3139 device->total_bytes = old_size;
3140 if (device->writeable)
3141 device->fs_devices->total_rw_bytes += diff;
3142 spin_lock(&root->fs_info->free_chunk_lock);
3143 root->fs_info->free_chunk_space += diff;
3144 spin_unlock(&root->fs_info->free_chunk_lock);
3145 unlock_chunks(root);
3149 /* Shrinking succeeded, else we would be at "done". */
3150 trans = btrfs_start_transaction(root, 0);
3151 if (IS_ERR(trans)) {
3152 ret = PTR_ERR(trans);
3158 device->disk_total_bytes = new_size;
3159 /* Now btrfs_update_device() will change the on-disk size. */
3160 ret = btrfs_update_device(trans, device);
3162 unlock_chunks(root);
3163 btrfs_end_transaction(trans, root);
3166 WARN_ON(diff > old_total);
3167 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3168 unlock_chunks(root);
3169 btrfs_end_transaction(trans, root);
3171 btrfs_free_path(path);
3175 static int btrfs_add_system_chunk(struct btrfs_root *root,
3176 struct btrfs_key *key,
3177 struct btrfs_chunk *chunk, int item_size)
3179 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3180 struct btrfs_disk_key disk_key;
3184 array_size = btrfs_super_sys_array_size(super_copy);
3185 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3188 ptr = super_copy->sys_chunk_array + array_size;
3189 btrfs_cpu_key_to_disk(&disk_key, key);
3190 memcpy(ptr, &disk_key, sizeof(disk_key));
3191 ptr += sizeof(disk_key);
3192 memcpy(ptr, chunk, item_size);
3193 item_size += sizeof(disk_key);
3194 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3199 * sort the devices in descending order by max_avail, total_avail
3201 static int btrfs_cmp_device_info(const void *a, const void *b)
3203 const struct btrfs_device_info *di_a = a;
3204 const struct btrfs_device_info *di_b = b;
3206 if (di_a->max_avail > di_b->max_avail)
3208 if (di_a->max_avail < di_b->max_avail)
3210 if (di_a->total_avail > di_b->total_avail)
3212 if (di_a->total_avail < di_b->total_avail)
3217 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3218 struct btrfs_root *extent_root,
3219 struct map_lookup **map_ret,
3220 u64 *num_bytes_out, u64 *stripe_size_out,
3221 u64 start, u64 type)
3223 struct btrfs_fs_info *info = extent_root->fs_info;
3224 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3225 struct list_head *cur;
3226 struct map_lookup *map = NULL;
3227 struct extent_map_tree *em_tree;
3228 struct extent_map *em;
3229 struct btrfs_device_info *devices_info = NULL;
3231 int num_stripes; /* total number of stripes to allocate */
3232 int sub_stripes; /* sub_stripes info for map */
3233 int dev_stripes; /* stripes per dev */
3234 int devs_max; /* max devs to use */
3235 int devs_min; /* min devs needed */
3236 int devs_increment; /* ndevs has to be a multiple of this */
3237 int ncopies; /* how many copies to data has */
3239 u64 max_stripe_size;
3247 BUG_ON(!alloc_profile_is_valid(type, 0));
3249 if (list_empty(&fs_devices->alloc_list))
3256 devs_max = 0; /* 0 == as many as possible */
3260 * define the properties of each RAID type.
3261 * FIXME: move this to a global table and use it in all RAID
3264 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3268 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3270 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3275 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3284 if (type & BTRFS_BLOCK_GROUP_DATA) {
3285 max_stripe_size = 1024 * 1024 * 1024;
3286 max_chunk_size = 10 * max_stripe_size;
3287 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3288 /* for larger filesystems, use larger metadata chunks */
3289 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3290 max_stripe_size = 1024 * 1024 * 1024;
3292 max_stripe_size = 256 * 1024 * 1024;
3293 max_chunk_size = max_stripe_size;
3294 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3295 max_stripe_size = 32 * 1024 * 1024;
3296 max_chunk_size = 2 * max_stripe_size;
3298 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3303 /* we don't want a chunk larger than 10% of writeable space */
3304 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3307 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3312 cur = fs_devices->alloc_list.next;
3315 * in the first pass through the devices list, we gather information
3316 * about the available holes on each device.
3319 while (cur != &fs_devices->alloc_list) {
3320 struct btrfs_device *device;
3324 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3328 if (!device->writeable) {
3330 "btrfs: read-only device in alloc_list\n");
3334 if (!device->in_fs_metadata)
3337 if (device->total_bytes > device->bytes_used)
3338 total_avail = device->total_bytes - device->bytes_used;
3342 /* If there is no space on this device, skip it. */
3343 if (total_avail == 0)
3346 ret = find_free_dev_extent(device,
3347 max_stripe_size * dev_stripes,
3348 &dev_offset, &max_avail);
3349 if (ret && ret != -ENOSPC)
3353 max_avail = max_stripe_size * dev_stripes;
3355 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3358 devices_info[ndevs].dev_offset = dev_offset;
3359 devices_info[ndevs].max_avail = max_avail;
3360 devices_info[ndevs].total_avail = total_avail;
3361 devices_info[ndevs].dev = device;
3366 * now sort the devices by hole size / available space
3368 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3369 btrfs_cmp_device_info, NULL);
3371 /* round down to number of usable stripes */
3372 ndevs -= ndevs % devs_increment;
3374 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3379 if (devs_max && ndevs > devs_max)
3382 * the primary goal is to maximize the number of stripes, so use as many
3383 * devices as possible, even if the stripes are not maximum sized.
3385 stripe_size = devices_info[ndevs-1].max_avail;
3386 num_stripes = ndevs * dev_stripes;
3388 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3389 stripe_size = max_chunk_size * ncopies;
3390 do_div(stripe_size, ndevs);
3393 do_div(stripe_size, dev_stripes);
3395 /* align to BTRFS_STRIPE_LEN */
3396 do_div(stripe_size, BTRFS_STRIPE_LEN);
3397 stripe_size *= BTRFS_STRIPE_LEN;
3399 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3404 map->num_stripes = num_stripes;
3406 for (i = 0; i < ndevs; ++i) {
3407 for (j = 0; j < dev_stripes; ++j) {
3408 int s = i * dev_stripes + j;
3409 map->stripes[s].dev = devices_info[i].dev;
3410 map->stripes[s].physical = devices_info[i].dev_offset +
3414 map->sector_size = extent_root->sectorsize;
3415 map->stripe_len = BTRFS_STRIPE_LEN;
3416 map->io_align = BTRFS_STRIPE_LEN;
3417 map->io_width = BTRFS_STRIPE_LEN;
3419 map->sub_stripes = sub_stripes;
3422 num_bytes = stripe_size * (num_stripes / ncopies);
3424 *stripe_size_out = stripe_size;
3425 *num_bytes_out = num_bytes;
3427 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3429 em = alloc_extent_map();
3434 em->bdev = (struct block_device *)map;
3436 em->len = num_bytes;
3437 em->block_start = 0;
3438 em->block_len = em->len;
3440 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3441 write_lock(&em_tree->lock);
3442 ret = add_extent_mapping(em_tree, em);
3443 write_unlock(&em_tree->lock);
3444 free_extent_map(em);
3448 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3449 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3454 for (i = 0; i < map->num_stripes; ++i) {
3455 struct btrfs_device *device;
3458 device = map->stripes[i].dev;
3459 dev_offset = map->stripes[i].physical;
3461 ret = btrfs_alloc_dev_extent(trans, device,
3462 info->chunk_root->root_key.objectid,
3463 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3464 start, dev_offset, stripe_size);
3466 btrfs_abort_transaction(trans, extent_root, ret);
3471 kfree(devices_info);
3476 kfree(devices_info);
3480 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3481 struct btrfs_root *extent_root,
3482 struct map_lookup *map, u64 chunk_offset,
3483 u64 chunk_size, u64 stripe_size)
3486 struct btrfs_key key;
3487 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3488 struct btrfs_device *device;
3489 struct btrfs_chunk *chunk;
3490 struct btrfs_stripe *stripe;
3491 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3495 chunk = kzalloc(item_size, GFP_NOFS);
3500 while (index < map->num_stripes) {
3501 device = map->stripes[index].dev;
3502 device->bytes_used += stripe_size;
3503 ret = btrfs_update_device(trans, device);
3509 spin_lock(&extent_root->fs_info->free_chunk_lock);
3510 extent_root->fs_info->free_chunk_space -= (stripe_size *
3512 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3515 stripe = &chunk->stripe;
3516 while (index < map->num_stripes) {
3517 device = map->stripes[index].dev;
3518 dev_offset = map->stripes[index].physical;
3520 btrfs_set_stack_stripe_devid(stripe, device->devid);
3521 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3522 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3527 btrfs_set_stack_chunk_length(chunk, chunk_size);
3528 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3529 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3530 btrfs_set_stack_chunk_type(chunk, map->type);
3531 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3532 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3533 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3534 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3535 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3537 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3538 key.type = BTRFS_CHUNK_ITEM_KEY;
3539 key.offset = chunk_offset;
3541 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3543 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3545 * TODO: Cleanup of inserted chunk root in case of
3548 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3558 * Chunk allocation falls into two parts. The first part does works
3559 * that make the new allocated chunk useable, but not do any operation
3560 * that modifies the chunk tree. The second part does the works that
3561 * require modifying the chunk tree. This division is important for the
3562 * bootstrap process of adding storage to a seed btrfs.
3564 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3565 struct btrfs_root *extent_root, u64 type)
3570 struct map_lookup *map;
3571 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3574 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3579 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3580 &stripe_size, chunk_offset, type);
3584 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3585 chunk_size, stripe_size);
3591 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3592 struct btrfs_root *root,
3593 struct btrfs_device *device)
3596 u64 sys_chunk_offset;
3600 u64 sys_stripe_size;
3602 struct map_lookup *map;
3603 struct map_lookup *sys_map;
3604 struct btrfs_fs_info *fs_info = root->fs_info;
3605 struct btrfs_root *extent_root = fs_info->extent_root;
3608 ret = find_next_chunk(fs_info->chunk_root,
3609 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3613 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3614 fs_info->avail_metadata_alloc_bits;
3615 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3617 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3618 &stripe_size, chunk_offset, alloc_profile);
3622 sys_chunk_offset = chunk_offset + chunk_size;
3624 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3625 fs_info->avail_system_alloc_bits;
3626 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3628 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3629 &sys_chunk_size, &sys_stripe_size,
3630 sys_chunk_offset, alloc_profile);
3632 btrfs_abort_transaction(trans, root, ret);
3636 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3638 btrfs_abort_transaction(trans, root, ret);
3643 * Modifying chunk tree needs allocating new blocks from both
3644 * system block group and metadata block group. So we only can
3645 * do operations require modifying the chunk tree after both
3646 * block groups were created.
3648 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3649 chunk_size, stripe_size);
3651 btrfs_abort_transaction(trans, root, ret);
3655 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3656 sys_chunk_offset, sys_chunk_size,
3659 btrfs_abort_transaction(trans, root, ret);
3666 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3668 struct extent_map *em;
3669 struct map_lookup *map;
3670 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3674 read_lock(&map_tree->map_tree.lock);
3675 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3676 read_unlock(&map_tree->map_tree.lock);
3680 if (btrfs_test_opt(root, DEGRADED)) {
3681 free_extent_map(em);
3685 map = (struct map_lookup *)em->bdev;
3686 for (i = 0; i < map->num_stripes; i++) {
3687 if (!map->stripes[i].dev->writeable) {
3692 free_extent_map(em);
3696 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3698 extent_map_tree_init(&tree->map_tree);
3701 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3703 struct extent_map *em;
3706 write_lock(&tree->map_tree.lock);
3707 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3709 remove_extent_mapping(&tree->map_tree, em);
3710 write_unlock(&tree->map_tree.lock);
3715 free_extent_map(em);
3716 /* once for the tree */
3717 free_extent_map(em);
3721 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3723 struct extent_map *em;
3724 struct map_lookup *map;
3725 struct extent_map_tree *em_tree = &map_tree->map_tree;
3728 read_lock(&em_tree->lock);
3729 em = lookup_extent_mapping(em_tree, logical, len);
3730 read_unlock(&em_tree->lock);
3733 BUG_ON(em->start > logical || em->start + em->len < logical);
3734 map = (struct map_lookup *)em->bdev;
3735 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3736 ret = map->num_stripes;
3737 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3738 ret = map->sub_stripes;
3741 free_extent_map(em);
3745 static int find_live_mirror(struct map_lookup *map, int first, int num,
3749 if (map->stripes[optimal].dev->bdev)
3751 for (i = first; i < first + num; i++) {
3752 if (map->stripes[i].dev->bdev)
3755 /* we couldn't find one that doesn't fail. Just return something
3756 * and the io error handling code will clean up eventually
3761 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3762 u64 logical, u64 *length,
3763 struct btrfs_bio **bbio_ret,
3766 struct extent_map *em;
3767 struct map_lookup *map;
3768 struct extent_map_tree *em_tree = &map_tree->map_tree;
3771 u64 stripe_end_offset;
3780 struct btrfs_bio *bbio = NULL;
3782 read_lock(&em_tree->lock);
3783 em = lookup_extent_mapping(em_tree, logical, *length);
3784 read_unlock(&em_tree->lock);
3787 printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
3788 (unsigned long long)logical,
3789 (unsigned long long)*length);
3793 BUG_ON(em->start > logical || em->start + em->len < logical);
3794 map = (struct map_lookup *)em->bdev;
3795 offset = logical - em->start;
3797 if (mirror_num > map->num_stripes)
3802 * stripe_nr counts the total number of stripes we have to stride
3803 * to get to this block
3805 do_div(stripe_nr, map->stripe_len);
3807 stripe_offset = stripe_nr * map->stripe_len;
3808 BUG_ON(offset < stripe_offset);
3810 /* stripe_offset is the offset of this block in its stripe*/
3811 stripe_offset = offset - stripe_offset;
3813 if (rw & REQ_DISCARD)
3814 *length = min_t(u64, em->len - offset, *length);
3815 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3816 /* we limit the length of each bio to what fits in a stripe */
3817 *length = min_t(u64, em->len - offset,
3818 map->stripe_len - stripe_offset);
3820 *length = em->len - offset;
3828 stripe_nr_orig = stripe_nr;
3829 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3830 (~(map->stripe_len - 1));
3831 do_div(stripe_nr_end, map->stripe_len);
3832 stripe_end_offset = stripe_nr_end * map->stripe_len -
3834 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3835 if (rw & REQ_DISCARD)
3836 num_stripes = min_t(u64, map->num_stripes,
3837 stripe_nr_end - stripe_nr_orig);
3838 stripe_index = do_div(stripe_nr, map->num_stripes);
3839 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3840 if (rw & (REQ_WRITE | REQ_DISCARD))
3841 num_stripes = map->num_stripes;
3842 else if (mirror_num)
3843 stripe_index = mirror_num - 1;
3845 stripe_index = find_live_mirror(map, 0,
3847 current->pid % map->num_stripes);
3848 mirror_num = stripe_index + 1;
3851 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3852 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3853 num_stripes = map->num_stripes;
3854 } else if (mirror_num) {
3855 stripe_index = mirror_num - 1;
3860 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3861 int factor = map->num_stripes / map->sub_stripes;
3863 stripe_index = do_div(stripe_nr, factor);
3864 stripe_index *= map->sub_stripes;
3867 num_stripes = map->sub_stripes;
3868 else if (rw & REQ_DISCARD)
3869 num_stripes = min_t(u64, map->sub_stripes *
3870 (stripe_nr_end - stripe_nr_orig),
3872 else if (mirror_num)
3873 stripe_index += mirror_num - 1;
3875 int old_stripe_index = stripe_index;
3876 stripe_index = find_live_mirror(map, stripe_index,
3877 map->sub_stripes, stripe_index +
3878 current->pid % map->sub_stripes);
3879 mirror_num = stripe_index - old_stripe_index + 1;
3883 * after this do_div call, stripe_nr is the number of stripes
3884 * on this device we have to walk to find the data, and
3885 * stripe_index is the number of our device in the stripe array
3887 stripe_index = do_div(stripe_nr, map->num_stripes);
3888 mirror_num = stripe_index + 1;
3890 BUG_ON(stripe_index >= map->num_stripes);
3892 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3897 atomic_set(&bbio->error, 0);
3899 if (rw & REQ_DISCARD) {
3901 int sub_stripes = 0;
3902 u64 stripes_per_dev = 0;
3903 u32 remaining_stripes = 0;
3904 u32 last_stripe = 0;
3907 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3908 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3911 sub_stripes = map->sub_stripes;
3913 factor = map->num_stripes / sub_stripes;
3914 stripes_per_dev = div_u64_rem(stripe_nr_end -
3917 &remaining_stripes);
3918 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3919 last_stripe *= sub_stripes;
3922 for (i = 0; i < num_stripes; i++) {
3923 bbio->stripes[i].physical =
3924 map->stripes[stripe_index].physical +
3925 stripe_offset + stripe_nr * map->stripe_len;
3926 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3928 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3929 BTRFS_BLOCK_GROUP_RAID10)) {
3930 bbio->stripes[i].length = stripes_per_dev *
3933 if (i / sub_stripes < remaining_stripes)
3934 bbio->stripes[i].length +=
3938 * Special for the first stripe and
3941 * |-------|...|-------|
3945 if (i < sub_stripes)
3946 bbio->stripes[i].length -=
3949 if (stripe_index >= last_stripe &&
3950 stripe_index <= (last_stripe +
3952 bbio->stripes[i].length -=
3955 if (i == sub_stripes - 1)
3958 bbio->stripes[i].length = *length;
3961 if (stripe_index == map->num_stripes) {
3962 /* This could only happen for RAID0/10 */
3968 for (i = 0; i < num_stripes; i++) {
3969 bbio->stripes[i].physical =
3970 map->stripes[stripe_index].physical +
3972 stripe_nr * map->stripe_len;
3973 bbio->stripes[i].dev =
3974 map->stripes[stripe_index].dev;
3979 if (rw & REQ_WRITE) {
3980 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3981 BTRFS_BLOCK_GROUP_RAID10 |
3982 BTRFS_BLOCK_GROUP_DUP)) {
3988 bbio->num_stripes = num_stripes;
3989 bbio->max_errors = max_errors;
3990 bbio->mirror_num = mirror_num;
3992 free_extent_map(em);
3996 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3997 u64 logical, u64 *length,
3998 struct btrfs_bio **bbio_ret, int mirror_num)
4000 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
4004 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4005 u64 chunk_start, u64 physical, u64 devid,
4006 u64 **logical, int *naddrs, int *stripe_len)
4008 struct extent_map_tree *em_tree = &map_tree->map_tree;
4009 struct extent_map *em;
4010 struct map_lookup *map;
4017 read_lock(&em_tree->lock);
4018 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4019 read_unlock(&em_tree->lock);
4021 BUG_ON(!em || em->start != chunk_start);
4022 map = (struct map_lookup *)em->bdev;
4025 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4026 do_div(length, map->num_stripes / map->sub_stripes);
4027 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4028 do_div(length, map->num_stripes);
4030 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4031 BUG_ON(!buf); /* -ENOMEM */
4033 for (i = 0; i < map->num_stripes; i++) {
4034 if (devid && map->stripes[i].dev->devid != devid)
4036 if (map->stripes[i].physical > physical ||
4037 map->stripes[i].physical + length <= physical)
4040 stripe_nr = physical - map->stripes[i].physical;
4041 do_div(stripe_nr, map->stripe_len);
4043 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4044 stripe_nr = stripe_nr * map->num_stripes + i;
4045 do_div(stripe_nr, map->sub_stripes);
4046 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4047 stripe_nr = stripe_nr * map->num_stripes + i;
4049 bytenr = chunk_start + stripe_nr * map->stripe_len;
4050 WARN_ON(nr >= map->num_stripes);
4051 for (j = 0; j < nr; j++) {
4052 if (buf[j] == bytenr)
4056 WARN_ON(nr >= map->num_stripes);
4063 *stripe_len = map->stripe_len;
4065 free_extent_map(em);
4069 static void *merge_stripe_index_into_bio_private(void *bi_private,
4070 unsigned int stripe_index)
4073 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4075 * The alternative solution (instead of stealing bits from the
4076 * pointer) would be to allocate an intermediate structure
4077 * that contains the old private pointer plus the stripe_index.
4079 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4080 BUG_ON(stripe_index > 3);
4081 return (void *)(((uintptr_t)bi_private) | stripe_index);
4084 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4086 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4089 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4091 return (unsigned int)((uintptr_t)bi_private) & 3;
4094 static void btrfs_end_bio(struct bio *bio, int err)
4096 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4097 int is_orig_bio = 0;
4100 atomic_inc(&bbio->error);
4101 if (err == -EIO || err == -EREMOTEIO) {
4102 unsigned int stripe_index =
4103 extract_stripe_index_from_bio_private(
4105 struct btrfs_device *dev;
4107 BUG_ON(stripe_index >= bbio->num_stripes);
4108 dev = bbio->stripes[stripe_index].dev;
4110 if (bio->bi_rw & WRITE)
4111 btrfs_dev_stat_inc(dev,
4112 BTRFS_DEV_STAT_WRITE_ERRS);
4114 btrfs_dev_stat_inc(dev,
4115 BTRFS_DEV_STAT_READ_ERRS);
4116 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4117 btrfs_dev_stat_inc(dev,
4118 BTRFS_DEV_STAT_FLUSH_ERRS);
4119 btrfs_dev_stat_print_on_error(dev);
4124 if (bio == bbio->orig_bio)
4127 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4130 bio = bbio->orig_bio;
4132 bio->bi_private = bbio->private;
4133 bio->bi_end_io = bbio->end_io;
4134 bio->bi_bdev = (struct block_device *)
4135 (unsigned long)bbio->mirror_num;
4136 /* only send an error to the higher layers if it is
4137 * beyond the tolerance of the multi-bio
4139 if (atomic_read(&bbio->error) > bbio->max_errors) {
4143 * this bio is actually up to date, we didn't
4144 * go over the max number of errors
4146 set_bit(BIO_UPTODATE, &bio->bi_flags);
4151 bio_endio(bio, err);
4152 } else if (!is_orig_bio) {
4157 struct async_sched {
4160 struct btrfs_fs_info *info;
4161 struct btrfs_work work;
4165 * see run_scheduled_bios for a description of why bios are collected for
4168 * This will add one bio to the pending list for a device and make sure
4169 * the work struct is scheduled.
4171 static noinline void schedule_bio(struct btrfs_root *root,
4172 struct btrfs_device *device,
4173 int rw, struct bio *bio)
4175 int should_queue = 1;
4176 struct btrfs_pending_bios *pending_bios;
4178 /* don't bother with additional async steps for reads, right now */
4179 if (!(rw & REQ_WRITE)) {
4181 btrfsic_submit_bio(rw, bio);
4187 * nr_async_bios allows us to reliably return congestion to the
4188 * higher layers. Otherwise, the async bio makes it appear we have
4189 * made progress against dirty pages when we've really just put it
4190 * on a queue for later
4192 atomic_inc(&root->fs_info->nr_async_bios);
4193 WARN_ON(bio->bi_next);
4194 bio->bi_next = NULL;
4197 spin_lock(&device->io_lock);
4198 if (bio->bi_rw & REQ_SYNC)
4199 pending_bios = &device->pending_sync_bios;
4201 pending_bios = &device->pending_bios;
4203 if (pending_bios->tail)
4204 pending_bios->tail->bi_next = bio;
4206 pending_bios->tail = bio;
4207 if (!pending_bios->head)
4208 pending_bios->head = bio;
4209 if (device->running_pending)
4212 spin_unlock(&device->io_lock);
4215 btrfs_queue_worker(&root->fs_info->submit_workers,
4219 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
4222 struct bio_vec *prev;
4223 struct request_queue *q = bdev_get_queue(bdev);
4224 unsigned short max_sectors = queue_max_sectors(q);
4225 struct bvec_merge_data bvm = {
4227 .bi_sector = sector,
4228 .bi_rw = bio->bi_rw,
4231 if (bio->bi_vcnt == 0) {
4236 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
4237 if ((bio->bi_size >> 9) > max_sectors)
4240 if (!q->merge_bvec_fn)
4243 bvm.bi_size = bio->bi_size - prev->bv_len;
4244 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
4249 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4250 struct bio *bio, u64 physical, int dev_nr,
4253 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
4255 bio->bi_private = bbio;
4256 bio->bi_private = merge_stripe_index_into_bio_private(
4257 bio->bi_private, (unsigned int)dev_nr);
4258 bio->bi_end_io = btrfs_end_bio;
4259 bio->bi_sector = physical >> 9;
4262 struct rcu_string *name;
4265 name = rcu_dereference(dev->name);
4266 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
4267 "(%s id %llu), size=%u\n", rw,
4268 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4269 name->str, dev->devid, bio->bi_size);
4273 bio->bi_bdev = dev->bdev;
4275 schedule_bio(root, dev, rw, bio);
4277 btrfsic_submit_bio(rw, bio);
4280 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4281 struct bio *first_bio, struct btrfs_device *dev,
4282 int dev_nr, int rw, int async)
4284 struct bio_vec *bvec = first_bio->bi_io_vec;
4286 int nr_vecs = bio_get_nr_vecs(dev->bdev);
4287 u64 physical = bbio->stripes[dev_nr].physical;
4290 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
4294 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
4295 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
4296 bvec->bv_offset) < bvec->bv_len) {
4297 u64 len = bio->bi_size;
4299 atomic_inc(&bbio->stripes_pending);
4300 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
4308 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
4312 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
4314 atomic_inc(&bbio->error);
4315 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4316 bio->bi_private = bbio->private;
4317 bio->bi_end_io = bbio->end_io;
4318 bio->bi_bdev = (struct block_device *)
4319 (unsigned long)bbio->mirror_num;
4320 bio->bi_sector = logical >> 9;
4322 bio_endio(bio, -EIO);
4326 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4327 int mirror_num, int async_submit)
4329 struct btrfs_mapping_tree *map_tree;
4330 struct btrfs_device *dev;
4331 struct bio *first_bio = bio;
4332 u64 logical = (u64)bio->bi_sector << 9;
4338 struct btrfs_bio *bbio = NULL;
4340 length = bio->bi_size;
4341 map_tree = &root->fs_info->mapping_tree;
4342 map_length = length;
4344 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4346 if (ret) /* -ENOMEM */
4349 total_devs = bbio->num_stripes;
4350 if (map_length < length) {
4351 printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
4352 "len %llu\n", (unsigned long long)logical,
4353 (unsigned long long)length,
4354 (unsigned long long)map_length);
4358 bbio->orig_bio = first_bio;
4359 bbio->private = first_bio->bi_private;
4360 bbio->end_io = first_bio->bi_end_io;
4361 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4363 while (dev_nr < total_devs) {
4364 dev = bbio->stripes[dev_nr].dev;
4365 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
4366 bbio_error(bbio, first_bio, logical);
4372 * Check and see if we're ok with this bio based on it's size
4373 * and offset with the given device.
4375 if (!bio_size_ok(dev->bdev, first_bio,
4376 bbio->stripes[dev_nr].physical >> 9)) {
4377 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
4378 dev_nr, rw, async_submit);
4384 if (dev_nr < total_devs - 1) {
4385 bio = bio_clone(first_bio, GFP_NOFS);
4386 BUG_ON(!bio); /* -ENOMEM */
4391 submit_stripe_bio(root, bbio, bio,
4392 bbio->stripes[dev_nr].physical, dev_nr, rw,
4399 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4402 struct btrfs_device *device;
4403 struct btrfs_fs_devices *cur_devices;
4405 cur_devices = root->fs_info->fs_devices;
4406 while (cur_devices) {
4408 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4409 device = __find_device(&cur_devices->devices,
4414 cur_devices = cur_devices->seed;
4419 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4420 u64 devid, u8 *dev_uuid)
4422 struct btrfs_device *device;
4423 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4425 device = kzalloc(sizeof(*device), GFP_NOFS);
4428 list_add(&device->dev_list,
4429 &fs_devices->devices);
4430 device->dev_root = root->fs_info->dev_root;
4431 device->devid = devid;
4432 device->work.func = pending_bios_fn;
4433 device->fs_devices = fs_devices;
4434 device->missing = 1;
4435 fs_devices->num_devices++;
4436 fs_devices->missing_devices++;
4437 spin_lock_init(&device->io_lock);
4438 INIT_LIST_HEAD(&device->dev_alloc_list);
4439 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4443 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4444 struct extent_buffer *leaf,
4445 struct btrfs_chunk *chunk)
4447 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4448 struct map_lookup *map;
4449 struct extent_map *em;
4453 u8 uuid[BTRFS_UUID_SIZE];
4458 logical = key->offset;
4459 length = btrfs_chunk_length(leaf, chunk);
4461 read_lock(&map_tree->map_tree.lock);
4462 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4463 read_unlock(&map_tree->map_tree.lock);
4465 /* already mapped? */
4466 if (em && em->start <= logical && em->start + em->len > logical) {
4467 free_extent_map(em);
4470 free_extent_map(em);
4473 em = alloc_extent_map();
4476 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4477 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4479 free_extent_map(em);
4483 em->bdev = (struct block_device *)map;
4484 em->start = logical;
4486 em->block_start = 0;
4487 em->block_len = em->len;
4489 map->num_stripes = num_stripes;
4490 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4491 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4492 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4493 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4494 map->type = btrfs_chunk_type(leaf, chunk);
4495 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4496 for (i = 0; i < num_stripes; i++) {
4497 map->stripes[i].physical =
4498 btrfs_stripe_offset_nr(leaf, chunk, i);
4499 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4500 read_extent_buffer(leaf, uuid, (unsigned long)
4501 btrfs_stripe_dev_uuid_nr(chunk, i),
4503 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4505 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4507 free_extent_map(em);
4510 if (!map->stripes[i].dev) {
4511 map->stripes[i].dev =
4512 add_missing_dev(root, devid, uuid);
4513 if (!map->stripes[i].dev) {
4515 free_extent_map(em);
4519 map->stripes[i].dev->in_fs_metadata = 1;
4522 write_lock(&map_tree->map_tree.lock);
4523 ret = add_extent_mapping(&map_tree->map_tree, em);
4524 write_unlock(&map_tree->map_tree.lock);
4525 BUG_ON(ret); /* Tree corruption */
4526 free_extent_map(em);
4531 static void fill_device_from_item(struct extent_buffer *leaf,
4532 struct btrfs_dev_item *dev_item,
4533 struct btrfs_device *device)
4537 device->devid = btrfs_device_id(leaf, dev_item);
4538 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4539 device->total_bytes = device->disk_total_bytes;
4540 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4541 device->type = btrfs_device_type(leaf, dev_item);
4542 device->io_align = btrfs_device_io_align(leaf, dev_item);
4543 device->io_width = btrfs_device_io_width(leaf, dev_item);
4544 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4546 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4547 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4550 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4552 struct btrfs_fs_devices *fs_devices;
4555 BUG_ON(!mutex_is_locked(&uuid_mutex));
4557 fs_devices = root->fs_info->fs_devices->seed;
4558 while (fs_devices) {
4559 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4563 fs_devices = fs_devices->seed;
4566 fs_devices = find_fsid(fsid);
4572 fs_devices = clone_fs_devices(fs_devices);
4573 if (IS_ERR(fs_devices)) {
4574 ret = PTR_ERR(fs_devices);
4578 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4579 root->fs_info->bdev_holder);
4581 free_fs_devices(fs_devices);
4585 if (!fs_devices->seeding) {
4586 __btrfs_close_devices(fs_devices);
4587 free_fs_devices(fs_devices);
4592 fs_devices->seed = root->fs_info->fs_devices->seed;
4593 root->fs_info->fs_devices->seed = fs_devices;
4598 static int read_one_dev(struct btrfs_root *root,
4599 struct extent_buffer *leaf,
4600 struct btrfs_dev_item *dev_item)
4602 struct btrfs_device *device;
4605 u8 fs_uuid[BTRFS_UUID_SIZE];
4606 u8 dev_uuid[BTRFS_UUID_SIZE];
4608 devid = btrfs_device_id(leaf, dev_item);
4609 read_extent_buffer(leaf, dev_uuid,
4610 (unsigned long)btrfs_device_uuid(dev_item),
4612 read_extent_buffer(leaf, fs_uuid,
4613 (unsigned long)btrfs_device_fsid(dev_item),
4616 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4617 ret = open_seed_devices(root, fs_uuid);
4618 if (ret && !btrfs_test_opt(root, DEGRADED))
4622 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4623 if (!device || !device->bdev) {
4624 if (!btrfs_test_opt(root, DEGRADED))
4628 printk(KERN_WARNING "warning devid %llu missing\n",
4629 (unsigned long long)devid);
4630 device = add_missing_dev(root, devid, dev_uuid);
4633 } else if (!device->missing) {
4635 * this happens when a device that was properly setup
4636 * in the device info lists suddenly goes bad.
4637 * device->bdev is NULL, and so we have to set
4638 * device->missing to one here
4640 root->fs_info->fs_devices->missing_devices++;
4641 device->missing = 1;
4645 if (device->fs_devices != root->fs_info->fs_devices) {
4646 BUG_ON(device->writeable);
4647 if (device->generation !=
4648 btrfs_device_generation(leaf, dev_item))
4652 fill_device_from_item(leaf, dev_item, device);
4653 device->dev_root = root->fs_info->dev_root;
4654 device->in_fs_metadata = 1;
4655 if (device->writeable) {
4656 device->fs_devices->total_rw_bytes += device->total_bytes;
4657 spin_lock(&root->fs_info->free_chunk_lock);
4658 root->fs_info->free_chunk_space += device->total_bytes -
4660 spin_unlock(&root->fs_info->free_chunk_lock);
4666 int btrfs_read_sys_array(struct btrfs_root *root)
4668 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4669 struct extent_buffer *sb;
4670 struct btrfs_disk_key *disk_key;
4671 struct btrfs_chunk *chunk;
4673 unsigned long sb_ptr;
4679 struct btrfs_key key;
4681 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4682 BTRFS_SUPER_INFO_SIZE);
4685 btrfs_set_buffer_uptodate(sb);
4686 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4688 * The sb extent buffer is artifical and just used to read the system array.
4689 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4690 * pages up-to-date when the page is larger: extent does not cover the
4691 * whole page and consequently check_page_uptodate does not find all
4692 * the page's extents up-to-date (the hole beyond sb),
4693 * write_extent_buffer then triggers a WARN_ON.
4695 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4696 * but sb spans only this function. Add an explicit SetPageUptodate call
4697 * to silence the warning eg. on PowerPC 64.
4699 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4700 SetPageUptodate(sb->pages[0]);
4702 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4703 array_size = btrfs_super_sys_array_size(super_copy);
4705 ptr = super_copy->sys_chunk_array;
4706 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4709 while (cur < array_size) {
4710 disk_key = (struct btrfs_disk_key *)ptr;
4711 btrfs_disk_key_to_cpu(&key, disk_key);
4713 len = sizeof(*disk_key); ptr += len;
4717 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4718 chunk = (struct btrfs_chunk *)sb_ptr;
4719 ret = read_one_chunk(root, &key, sb, chunk);
4722 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4723 len = btrfs_chunk_item_size(num_stripes);
4732 free_extent_buffer(sb);
4736 int btrfs_read_chunk_tree(struct btrfs_root *root)
4738 struct btrfs_path *path;
4739 struct extent_buffer *leaf;
4740 struct btrfs_key key;
4741 struct btrfs_key found_key;
4745 root = root->fs_info->chunk_root;
4747 path = btrfs_alloc_path();
4751 mutex_lock(&uuid_mutex);
4754 /* first we search for all of the device items, and then we
4755 * read in all of the chunk items. This way we can create chunk
4756 * mappings that reference all of the devices that are afound
4758 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4762 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4766 leaf = path->nodes[0];
4767 slot = path->slots[0];
4768 if (slot >= btrfs_header_nritems(leaf)) {
4769 ret = btrfs_next_leaf(root, path);
4776 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4777 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4778 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4780 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4781 struct btrfs_dev_item *dev_item;
4782 dev_item = btrfs_item_ptr(leaf, slot,
4783 struct btrfs_dev_item);
4784 ret = read_one_dev(root, leaf, dev_item);
4788 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4789 struct btrfs_chunk *chunk;
4790 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4791 ret = read_one_chunk(root, &found_key, leaf, chunk);
4797 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4799 btrfs_release_path(path);
4804 unlock_chunks(root);
4805 mutex_unlock(&uuid_mutex);
4807 btrfs_free_path(path);
4811 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
4815 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4816 btrfs_dev_stat_reset(dev, i);
4819 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
4821 struct btrfs_key key;
4822 struct btrfs_key found_key;
4823 struct btrfs_root *dev_root = fs_info->dev_root;
4824 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4825 struct extent_buffer *eb;
4828 struct btrfs_device *device;
4829 struct btrfs_path *path = NULL;
4832 path = btrfs_alloc_path();
4838 mutex_lock(&fs_devices->device_list_mutex);
4839 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4841 struct btrfs_dev_stats_item *ptr;
4844 key.type = BTRFS_DEV_STATS_KEY;
4845 key.offset = device->devid;
4846 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
4848 __btrfs_reset_dev_stats(device);
4849 device->dev_stats_valid = 1;
4850 btrfs_release_path(path);
4853 slot = path->slots[0];
4854 eb = path->nodes[0];
4855 btrfs_item_key_to_cpu(eb, &found_key, slot);
4856 item_size = btrfs_item_size_nr(eb, slot);
4858 ptr = btrfs_item_ptr(eb, slot,
4859 struct btrfs_dev_stats_item);
4861 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4862 if (item_size >= (1 + i) * sizeof(__le64))
4863 btrfs_dev_stat_set(device, i,
4864 btrfs_dev_stats_value(eb, ptr, i));
4866 btrfs_dev_stat_reset(device, i);
4869 device->dev_stats_valid = 1;
4870 btrfs_dev_stat_print_on_load(device);
4871 btrfs_release_path(path);
4873 mutex_unlock(&fs_devices->device_list_mutex);
4876 btrfs_free_path(path);
4877 return ret < 0 ? ret : 0;
4880 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
4881 struct btrfs_root *dev_root,
4882 struct btrfs_device *device)
4884 struct btrfs_path *path;
4885 struct btrfs_key key;
4886 struct extent_buffer *eb;
4887 struct btrfs_dev_stats_item *ptr;
4892 key.type = BTRFS_DEV_STATS_KEY;
4893 key.offset = device->devid;
4895 path = btrfs_alloc_path();
4897 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
4899 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
4900 ret, rcu_str_deref(device->name));
4905 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
4906 /* need to delete old one and insert a new one */
4907 ret = btrfs_del_item(trans, dev_root, path);
4909 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
4910 rcu_str_deref(device->name), ret);
4917 /* need to insert a new item */
4918 btrfs_release_path(path);
4919 ret = btrfs_insert_empty_item(trans, dev_root, path,
4920 &key, sizeof(*ptr));
4922 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
4923 rcu_str_deref(device->name), ret);
4928 eb = path->nodes[0];
4929 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
4930 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4931 btrfs_set_dev_stats_value(eb, ptr, i,
4932 btrfs_dev_stat_read(device, i));
4933 btrfs_mark_buffer_dirty(eb);
4936 btrfs_free_path(path);
4941 * called from commit_transaction. Writes all changed device stats to disk.
4943 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
4944 struct btrfs_fs_info *fs_info)
4946 struct btrfs_root *dev_root = fs_info->dev_root;
4947 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4948 struct btrfs_device *device;
4951 mutex_lock(&fs_devices->device_list_mutex);
4952 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4953 if (!device->dev_stats_valid || !device->dev_stats_dirty)
4956 ret = update_dev_stat_item(trans, dev_root, device);
4958 device->dev_stats_dirty = 0;
4960 mutex_unlock(&fs_devices->device_list_mutex);
4965 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
4967 btrfs_dev_stat_inc(dev, index);
4968 btrfs_dev_stat_print_on_error(dev);
4971 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
4973 if (!dev->dev_stats_valid)
4975 printk_ratelimited_in_rcu(KERN_ERR
4976 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4977 rcu_str_deref(dev->name),
4978 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4979 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4980 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4981 btrfs_dev_stat_read(dev,
4982 BTRFS_DEV_STAT_CORRUPTION_ERRS),
4983 btrfs_dev_stat_read(dev,
4984 BTRFS_DEV_STAT_GENERATION_ERRS));
4987 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
4991 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4992 if (btrfs_dev_stat_read(dev, i) != 0)
4994 if (i == BTRFS_DEV_STAT_VALUES_MAX)
4995 return; /* all values == 0, suppress message */
4997 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4998 rcu_str_deref(dev->name),
4999 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5000 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5001 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5002 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5003 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5006 int btrfs_get_dev_stats(struct btrfs_root *root,
5007 struct btrfs_ioctl_get_dev_stats *stats)
5009 struct btrfs_device *dev;
5010 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5013 mutex_lock(&fs_devices->device_list_mutex);
5014 dev = btrfs_find_device(root, stats->devid, NULL, NULL);
5015 mutex_unlock(&fs_devices->device_list_mutex);
5019 "btrfs: get dev_stats failed, device not found\n");
5021 } else if (!dev->dev_stats_valid) {
5023 "btrfs: get dev_stats failed, not yet valid\n");
5025 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5026 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5027 if (stats->nr_items > i)
5029 btrfs_dev_stat_read_and_reset(dev, i);
5031 btrfs_dev_stat_reset(dev, i);
5034 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5035 if (stats->nr_items > i)
5036 stats->values[i] = btrfs_dev_stat_read(dev, i);
5038 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5039 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;