2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
30 #include "extent_map.h"
32 #include "transaction.h"
33 #include "print-tree.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37 #include "rcu-string.h"
40 static int init_first_rw_device(struct btrfs_trans_handle *trans,
41 struct btrfs_root *root,
42 struct btrfs_device *device);
43 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
45 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
47 static DEFINE_MUTEX(uuid_mutex);
48 static LIST_HEAD(fs_uuids);
50 static void lock_chunks(struct btrfs_root *root)
52 mutex_lock(&root->fs_info->chunk_mutex);
55 static void unlock_chunks(struct btrfs_root *root)
57 mutex_unlock(&root->fs_info->chunk_mutex);
60 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
62 struct btrfs_device *device;
63 WARN_ON(fs_devices->opened);
64 while (!list_empty(&fs_devices->devices)) {
65 device = list_entry(fs_devices->devices.next,
66 struct btrfs_device, dev_list);
67 list_del(&device->dev_list);
68 rcu_string_free(device->name);
74 void btrfs_cleanup_fs_uuids(void)
76 struct btrfs_fs_devices *fs_devices;
78 while (!list_empty(&fs_uuids)) {
79 fs_devices = list_entry(fs_uuids.next,
80 struct btrfs_fs_devices, list);
81 list_del(&fs_devices->list);
82 free_fs_devices(fs_devices);
86 static noinline struct btrfs_device *__find_device(struct list_head *head,
89 struct btrfs_device *dev;
91 list_for_each_entry(dev, head, dev_list) {
92 if (dev->devid == devid &&
93 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
100 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
102 struct btrfs_fs_devices *fs_devices;
104 list_for_each_entry(fs_devices, &fs_uuids, list) {
105 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
111 static void requeue_list(struct btrfs_pending_bios *pending_bios,
112 struct bio *head, struct bio *tail)
115 struct bio *old_head;
117 old_head = pending_bios->head;
118 pending_bios->head = head;
119 if (pending_bios->tail)
120 tail->bi_next = old_head;
122 pending_bios->tail = tail;
126 * we try to collect pending bios for a device so we don't get a large
127 * number of procs sending bios down to the same device. This greatly
128 * improves the schedulers ability to collect and merge the bios.
130 * But, it also turns into a long list of bios to process and that is sure
131 * to eventually make the worker thread block. The solution here is to
132 * make some progress and then put this work struct back at the end of
133 * the list if the block device is congested. This way, multiple devices
134 * can make progress from a single worker thread.
136 static noinline void run_scheduled_bios(struct btrfs_device *device)
139 struct backing_dev_info *bdi;
140 struct btrfs_fs_info *fs_info;
141 struct btrfs_pending_bios *pending_bios;
145 unsigned long num_run;
146 unsigned long batch_run = 0;
148 unsigned long last_waited = 0;
150 int sync_pending = 0;
151 struct blk_plug plug;
154 * this function runs all the bios we've collected for
155 * a particular device. We don't want to wander off to
156 * another device without first sending all of these down.
157 * So, setup a plug here and finish it off before we return
159 blk_start_plug(&plug);
161 bdi = blk_get_backing_dev_info(device->bdev);
162 fs_info = device->dev_root->fs_info;
163 limit = btrfs_async_submit_limit(fs_info);
164 limit = limit * 2 / 3;
167 spin_lock(&device->io_lock);
172 /* take all the bios off the list at once and process them
173 * later on (without the lock held). But, remember the
174 * tail and other pointers so the bios can be properly reinserted
175 * into the list if we hit congestion
177 if (!force_reg && device->pending_sync_bios.head) {
178 pending_bios = &device->pending_sync_bios;
181 pending_bios = &device->pending_bios;
185 pending = pending_bios->head;
186 tail = pending_bios->tail;
187 WARN_ON(pending && !tail);
190 * if pending was null this time around, no bios need processing
191 * at all and we can stop. Otherwise it'll loop back up again
192 * and do an additional check so no bios are missed.
194 * device->running_pending is used to synchronize with the
197 if (device->pending_sync_bios.head == NULL &&
198 device->pending_bios.head == NULL) {
200 device->running_pending = 0;
203 device->running_pending = 1;
206 pending_bios->head = NULL;
207 pending_bios->tail = NULL;
209 spin_unlock(&device->io_lock);
214 /* we want to work on both lists, but do more bios on the
215 * sync list than the regular list
218 pending_bios != &device->pending_sync_bios &&
219 device->pending_sync_bios.head) ||
220 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
221 device->pending_bios.head)) {
222 spin_lock(&device->io_lock);
223 requeue_list(pending_bios, pending, tail);
228 pending = pending->bi_next;
231 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
232 waitqueue_active(&fs_info->async_submit_wait))
233 wake_up(&fs_info->async_submit_wait);
235 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
238 * if we're doing the sync list, record that our
239 * plug has some sync requests on it
241 * If we're doing the regular list and there are
242 * sync requests sitting around, unplug before
245 if (pending_bios == &device->pending_sync_bios) {
247 } else if (sync_pending) {
248 blk_finish_plug(&plug);
249 blk_start_plug(&plug);
253 btrfsic_submit_bio(cur->bi_rw, cur);
260 * we made progress, there is more work to do and the bdi
261 * is now congested. Back off and let other work structs
264 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
265 fs_info->fs_devices->open_devices > 1) {
266 struct io_context *ioc;
268 ioc = current->io_context;
271 * the main goal here is that we don't want to
272 * block if we're going to be able to submit
273 * more requests without blocking.
275 * This code does two great things, it pokes into
276 * the elevator code from a filesystem _and_
277 * it makes assumptions about how batching works.
279 if (ioc && ioc->nr_batch_requests > 0 &&
280 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
282 ioc->last_waited == last_waited)) {
284 * we want to go through our batch of
285 * requests and stop. So, we copy out
286 * the ioc->last_waited time and test
287 * against it before looping
289 last_waited = ioc->last_waited;
294 spin_lock(&device->io_lock);
295 requeue_list(pending_bios, pending, tail);
296 device->running_pending = 1;
298 spin_unlock(&device->io_lock);
299 btrfs_requeue_work(&device->work);
302 /* unplug every 64 requests just for good measure */
303 if (batch_run % 64 == 0) {
304 blk_finish_plug(&plug);
305 blk_start_plug(&plug);
314 spin_lock(&device->io_lock);
315 if (device->pending_bios.head || device->pending_sync_bios.head)
317 spin_unlock(&device->io_lock);
320 blk_finish_plug(&plug);
323 static void pending_bios_fn(struct btrfs_work *work)
325 struct btrfs_device *device;
327 device = container_of(work, struct btrfs_device, work);
328 run_scheduled_bios(device);
331 static noinline int device_list_add(const char *path,
332 struct btrfs_super_block *disk_super,
333 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
335 struct btrfs_device *device;
336 struct btrfs_fs_devices *fs_devices;
337 struct rcu_string *name;
338 u64 found_transid = btrfs_super_generation(disk_super);
340 fs_devices = find_fsid(disk_super->fsid);
342 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
345 INIT_LIST_HEAD(&fs_devices->devices);
346 INIT_LIST_HEAD(&fs_devices->alloc_list);
347 list_add(&fs_devices->list, &fs_uuids);
348 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
349 fs_devices->latest_devid = devid;
350 fs_devices->latest_trans = found_transid;
351 mutex_init(&fs_devices->device_list_mutex);
354 device = __find_device(&fs_devices->devices, devid,
355 disk_super->dev_item.uuid);
358 if (fs_devices->opened)
361 device = kzalloc(sizeof(*device), GFP_NOFS);
363 /* we can safely leave the fs_devices entry around */
366 device->devid = devid;
367 device->dev_stats_valid = 0;
368 device->work.func = pending_bios_fn;
369 memcpy(device->uuid, disk_super->dev_item.uuid,
371 spin_lock_init(&device->io_lock);
373 name = rcu_string_strdup(path, GFP_NOFS);
378 rcu_assign_pointer(device->name, name);
379 INIT_LIST_HEAD(&device->dev_alloc_list);
381 /* init readahead state */
382 spin_lock_init(&device->reada_lock);
383 device->reada_curr_zone = NULL;
384 atomic_set(&device->reada_in_flight, 0);
385 device->reada_next = 0;
386 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
387 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
389 mutex_lock(&fs_devices->device_list_mutex);
390 list_add_rcu(&device->dev_list, &fs_devices->devices);
391 mutex_unlock(&fs_devices->device_list_mutex);
393 device->fs_devices = fs_devices;
394 fs_devices->num_devices++;
395 } else if (!device->name || strcmp(device->name->str, path)) {
396 name = rcu_string_strdup(path, GFP_NOFS);
399 rcu_string_free(device->name);
400 rcu_assign_pointer(device->name, name);
401 if (device->missing) {
402 fs_devices->missing_devices--;
407 if (found_transid > fs_devices->latest_trans) {
408 fs_devices->latest_devid = devid;
409 fs_devices->latest_trans = found_transid;
411 *fs_devices_ret = fs_devices;
415 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
417 struct btrfs_fs_devices *fs_devices;
418 struct btrfs_device *device;
419 struct btrfs_device *orig_dev;
421 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
423 return ERR_PTR(-ENOMEM);
425 INIT_LIST_HEAD(&fs_devices->devices);
426 INIT_LIST_HEAD(&fs_devices->alloc_list);
427 INIT_LIST_HEAD(&fs_devices->list);
428 mutex_init(&fs_devices->device_list_mutex);
429 fs_devices->latest_devid = orig->latest_devid;
430 fs_devices->latest_trans = orig->latest_trans;
431 fs_devices->total_devices = orig->total_devices;
432 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
434 /* We have held the volume lock, it is safe to get the devices. */
435 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
436 struct rcu_string *name;
438 device = kzalloc(sizeof(*device), GFP_NOFS);
443 * This is ok to do without rcu read locked because we hold the
444 * uuid mutex so nothing we touch in here is going to disappear.
446 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
451 rcu_assign_pointer(device->name, name);
453 device->devid = orig_dev->devid;
454 device->work.func = pending_bios_fn;
455 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
456 spin_lock_init(&device->io_lock);
457 INIT_LIST_HEAD(&device->dev_list);
458 INIT_LIST_HEAD(&device->dev_alloc_list);
460 list_add(&device->dev_list, &fs_devices->devices);
461 device->fs_devices = fs_devices;
462 fs_devices->num_devices++;
466 free_fs_devices(fs_devices);
467 return ERR_PTR(-ENOMEM);
470 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
472 struct btrfs_device *device, *next;
474 struct block_device *latest_bdev = NULL;
475 u64 latest_devid = 0;
476 u64 latest_transid = 0;
478 mutex_lock(&uuid_mutex);
480 /* This is the initialized path, it is safe to release the devices. */
481 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
482 if (device->in_fs_metadata) {
483 if (!latest_transid ||
484 device->generation > latest_transid) {
485 latest_devid = device->devid;
486 latest_transid = device->generation;
487 latest_bdev = device->bdev;
493 blkdev_put(device->bdev, device->mode);
495 fs_devices->open_devices--;
497 if (device->writeable) {
498 list_del_init(&device->dev_alloc_list);
499 device->writeable = 0;
500 fs_devices->rw_devices--;
502 list_del_init(&device->dev_list);
503 fs_devices->num_devices--;
504 rcu_string_free(device->name);
508 if (fs_devices->seed) {
509 fs_devices = fs_devices->seed;
513 fs_devices->latest_bdev = latest_bdev;
514 fs_devices->latest_devid = latest_devid;
515 fs_devices->latest_trans = latest_transid;
517 mutex_unlock(&uuid_mutex);
520 static void __free_device(struct work_struct *work)
522 struct btrfs_device *device;
524 device = container_of(work, struct btrfs_device, rcu_work);
527 blkdev_put(device->bdev, device->mode);
529 rcu_string_free(device->name);
533 static void free_device(struct rcu_head *head)
535 struct btrfs_device *device;
537 device = container_of(head, struct btrfs_device, rcu);
539 INIT_WORK(&device->rcu_work, __free_device);
540 schedule_work(&device->rcu_work);
543 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
545 struct btrfs_device *device;
547 if (--fs_devices->opened > 0)
550 mutex_lock(&fs_devices->device_list_mutex);
551 list_for_each_entry(device, &fs_devices->devices, dev_list) {
552 struct btrfs_device *new_device;
553 struct rcu_string *name;
556 fs_devices->open_devices--;
558 if (device->writeable) {
559 list_del_init(&device->dev_alloc_list);
560 fs_devices->rw_devices--;
563 if (device->can_discard)
564 fs_devices->num_can_discard--;
566 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
567 BUG_ON(!new_device); /* -ENOMEM */
568 memcpy(new_device, device, sizeof(*new_device));
570 /* Safe because we are under uuid_mutex */
572 name = rcu_string_strdup(device->name->str, GFP_NOFS);
573 BUG_ON(device->name && !name); /* -ENOMEM */
574 rcu_assign_pointer(new_device->name, name);
576 new_device->bdev = NULL;
577 new_device->writeable = 0;
578 new_device->in_fs_metadata = 0;
579 new_device->can_discard = 0;
580 list_replace_rcu(&device->dev_list, &new_device->dev_list);
582 call_rcu(&device->rcu, free_device);
584 mutex_unlock(&fs_devices->device_list_mutex);
586 WARN_ON(fs_devices->open_devices);
587 WARN_ON(fs_devices->rw_devices);
588 fs_devices->opened = 0;
589 fs_devices->seeding = 0;
594 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
596 struct btrfs_fs_devices *seed_devices = NULL;
599 mutex_lock(&uuid_mutex);
600 ret = __btrfs_close_devices(fs_devices);
601 if (!fs_devices->opened) {
602 seed_devices = fs_devices->seed;
603 fs_devices->seed = NULL;
605 mutex_unlock(&uuid_mutex);
607 while (seed_devices) {
608 fs_devices = seed_devices;
609 seed_devices = fs_devices->seed;
610 __btrfs_close_devices(fs_devices);
611 free_fs_devices(fs_devices);
616 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
617 fmode_t flags, void *holder)
619 struct request_queue *q;
620 struct block_device *bdev;
621 struct list_head *head = &fs_devices->devices;
622 struct btrfs_device *device;
623 struct block_device *latest_bdev = NULL;
624 struct buffer_head *bh;
625 struct btrfs_super_block *disk_super;
626 u64 latest_devid = 0;
627 u64 latest_transid = 0;
634 list_for_each_entry(device, head, dev_list) {
640 bdev = blkdev_get_by_path(device->name->str, flags, holder);
642 printk(KERN_INFO "btrfs: open %s failed\n", device->name->str);
645 filemap_write_and_wait(bdev->bd_inode->i_mapping);
646 invalidate_bdev(bdev);
647 set_blocksize(bdev, 4096);
649 bh = btrfs_read_dev_super(bdev);
653 disk_super = (struct btrfs_super_block *)bh->b_data;
654 devid = btrfs_stack_device_id(&disk_super->dev_item);
655 if (devid != device->devid)
658 if (memcmp(device->uuid, disk_super->dev_item.uuid,
662 device->generation = btrfs_super_generation(disk_super);
663 if (!latest_transid || device->generation > latest_transid) {
664 latest_devid = devid;
665 latest_transid = device->generation;
669 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
670 device->writeable = 0;
672 device->writeable = !bdev_read_only(bdev);
676 q = bdev_get_queue(bdev);
677 if (blk_queue_discard(q)) {
678 device->can_discard = 1;
679 fs_devices->num_can_discard++;
683 device->in_fs_metadata = 0;
684 device->mode = flags;
686 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
687 fs_devices->rotating = 1;
689 fs_devices->open_devices++;
690 if (device->writeable) {
691 fs_devices->rw_devices++;
692 list_add(&device->dev_alloc_list,
693 &fs_devices->alloc_list);
701 blkdev_put(bdev, flags);
705 if (fs_devices->open_devices == 0) {
709 fs_devices->seeding = seeding;
710 fs_devices->opened = 1;
711 fs_devices->latest_bdev = latest_bdev;
712 fs_devices->latest_devid = latest_devid;
713 fs_devices->latest_trans = latest_transid;
714 fs_devices->total_rw_bytes = 0;
719 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
720 fmode_t flags, void *holder)
724 mutex_lock(&uuid_mutex);
725 if (fs_devices->opened) {
726 fs_devices->opened++;
729 ret = __btrfs_open_devices(fs_devices, flags, holder);
731 mutex_unlock(&uuid_mutex);
735 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
736 struct btrfs_fs_devices **fs_devices_ret)
738 struct btrfs_super_block *disk_super;
739 struct block_device *bdev;
740 struct buffer_head *bh;
747 bdev = blkdev_get_by_path(path, flags, holder);
754 mutex_lock(&uuid_mutex);
755 ret = set_blocksize(bdev, 4096);
758 bh = btrfs_read_dev_super(bdev);
763 disk_super = (struct btrfs_super_block *)bh->b_data;
764 devid = btrfs_stack_device_id(&disk_super->dev_item);
765 transid = btrfs_super_generation(disk_super);
766 total_devices = btrfs_super_num_devices(disk_super);
767 if (disk_super->label[0])
768 printk(KERN_INFO "device label %s ", disk_super->label);
770 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
771 printk(KERN_CONT "devid %llu transid %llu %s\n",
772 (unsigned long long)devid, (unsigned long long)transid, path);
773 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
774 if (!ret && fs_devices_ret)
775 (*fs_devices_ret)->total_devices = total_devices;
778 mutex_unlock(&uuid_mutex);
779 blkdev_put(bdev, flags);
784 /* helper to account the used device space in the range */
785 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
786 u64 end, u64 *length)
788 struct btrfs_key key;
789 struct btrfs_root *root = device->dev_root;
790 struct btrfs_dev_extent *dev_extent;
791 struct btrfs_path *path;
795 struct extent_buffer *l;
799 if (start >= device->total_bytes)
802 path = btrfs_alloc_path();
807 key.objectid = device->devid;
809 key.type = BTRFS_DEV_EXTENT_KEY;
811 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
815 ret = btrfs_previous_item(root, path, key.objectid, key.type);
822 slot = path->slots[0];
823 if (slot >= btrfs_header_nritems(l)) {
824 ret = btrfs_next_leaf(root, path);
832 btrfs_item_key_to_cpu(l, &key, slot);
834 if (key.objectid < device->devid)
837 if (key.objectid > device->devid)
840 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
843 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
844 extent_end = key.offset + btrfs_dev_extent_length(l,
846 if (key.offset <= start && extent_end > end) {
847 *length = end - start + 1;
849 } else if (key.offset <= start && extent_end > start)
850 *length += extent_end - start;
851 else if (key.offset > start && extent_end <= end)
852 *length += extent_end - key.offset;
853 else if (key.offset > start && key.offset <= end) {
854 *length += end - key.offset + 1;
856 } else if (key.offset > end)
864 btrfs_free_path(path);
869 * find_free_dev_extent - find free space in the specified device
870 * @device: the device which we search the free space in
871 * @num_bytes: the size of the free space that we need
872 * @start: store the start of the free space.
873 * @len: the size of the free space. that we find, or the size of the max
874 * free space if we don't find suitable free space
876 * this uses a pretty simple search, the expectation is that it is
877 * called very infrequently and that a given device has a small number
880 * @start is used to store the start of the free space if we find. But if we
881 * don't find suitable free space, it will be used to store the start position
882 * of the max free space.
884 * @len is used to store the size of the free space that we find.
885 * But if we don't find suitable free space, it is used to store the size of
886 * the max free space.
888 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
889 u64 *start, u64 *len)
891 struct btrfs_key key;
892 struct btrfs_root *root = device->dev_root;
893 struct btrfs_dev_extent *dev_extent;
894 struct btrfs_path *path;
900 u64 search_end = device->total_bytes;
903 struct extent_buffer *l;
905 /* FIXME use last free of some kind */
907 /* we don't want to overwrite the superblock on the drive,
908 * so we make sure to start at an offset of at least 1MB
910 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
912 max_hole_start = search_start;
916 if (search_start >= search_end) {
921 path = btrfs_alloc_path();
928 key.objectid = device->devid;
929 key.offset = search_start;
930 key.type = BTRFS_DEV_EXTENT_KEY;
932 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
936 ret = btrfs_previous_item(root, path, key.objectid, key.type);
943 slot = path->slots[0];
944 if (slot >= btrfs_header_nritems(l)) {
945 ret = btrfs_next_leaf(root, path);
953 btrfs_item_key_to_cpu(l, &key, slot);
955 if (key.objectid < device->devid)
958 if (key.objectid > device->devid)
961 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
964 if (key.offset > search_start) {
965 hole_size = key.offset - search_start;
967 if (hole_size > max_hole_size) {
968 max_hole_start = search_start;
969 max_hole_size = hole_size;
973 * If this free space is greater than which we need,
974 * it must be the max free space that we have found
975 * until now, so max_hole_start must point to the start
976 * of this free space and the length of this free space
977 * is stored in max_hole_size. Thus, we return
978 * max_hole_start and max_hole_size and go back to the
981 if (hole_size >= num_bytes) {
987 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
988 extent_end = key.offset + btrfs_dev_extent_length(l,
990 if (extent_end > search_start)
991 search_start = extent_end;
998 * At this point, search_start should be the end of
999 * allocated dev extents, and when shrinking the device,
1000 * search_end may be smaller than search_start.
1002 if (search_end > search_start)
1003 hole_size = search_end - search_start;
1005 if (hole_size > max_hole_size) {
1006 max_hole_start = search_start;
1007 max_hole_size = hole_size;
1011 if (hole_size < num_bytes)
1017 btrfs_free_path(path);
1019 *start = max_hole_start;
1021 *len = max_hole_size;
1025 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1026 struct btrfs_device *device,
1030 struct btrfs_path *path;
1031 struct btrfs_root *root = device->dev_root;
1032 struct btrfs_key key;
1033 struct btrfs_key found_key;
1034 struct extent_buffer *leaf = NULL;
1035 struct btrfs_dev_extent *extent = NULL;
1037 path = btrfs_alloc_path();
1041 key.objectid = device->devid;
1043 key.type = BTRFS_DEV_EXTENT_KEY;
1045 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1047 ret = btrfs_previous_item(root, path, key.objectid,
1048 BTRFS_DEV_EXTENT_KEY);
1051 leaf = path->nodes[0];
1052 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1053 extent = btrfs_item_ptr(leaf, path->slots[0],
1054 struct btrfs_dev_extent);
1055 BUG_ON(found_key.offset > start || found_key.offset +
1056 btrfs_dev_extent_length(leaf, extent) < start);
1058 btrfs_release_path(path);
1060 } else if (ret == 0) {
1061 leaf = path->nodes[0];
1062 extent = btrfs_item_ptr(leaf, path->slots[0],
1063 struct btrfs_dev_extent);
1065 btrfs_error(root->fs_info, ret, "Slot search failed");
1069 if (device->bytes_used > 0) {
1070 u64 len = btrfs_dev_extent_length(leaf, extent);
1071 device->bytes_used -= len;
1072 spin_lock(&root->fs_info->free_chunk_lock);
1073 root->fs_info->free_chunk_space += len;
1074 spin_unlock(&root->fs_info->free_chunk_lock);
1076 ret = btrfs_del_item(trans, root, path);
1078 btrfs_error(root->fs_info, ret,
1079 "Failed to remove dev extent item");
1082 btrfs_free_path(path);
1086 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1087 struct btrfs_device *device,
1088 u64 chunk_tree, u64 chunk_objectid,
1089 u64 chunk_offset, u64 start, u64 num_bytes)
1092 struct btrfs_path *path;
1093 struct btrfs_root *root = device->dev_root;
1094 struct btrfs_dev_extent *extent;
1095 struct extent_buffer *leaf;
1096 struct btrfs_key key;
1098 WARN_ON(!device->in_fs_metadata);
1099 path = btrfs_alloc_path();
1103 key.objectid = device->devid;
1105 key.type = BTRFS_DEV_EXTENT_KEY;
1106 ret = btrfs_insert_empty_item(trans, root, path, &key,
1111 leaf = path->nodes[0];
1112 extent = btrfs_item_ptr(leaf, path->slots[0],
1113 struct btrfs_dev_extent);
1114 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1115 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1116 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1118 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1119 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1122 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1123 btrfs_mark_buffer_dirty(leaf);
1125 btrfs_free_path(path);
1129 static noinline int find_next_chunk(struct btrfs_root *root,
1130 u64 objectid, u64 *offset)
1132 struct btrfs_path *path;
1134 struct btrfs_key key;
1135 struct btrfs_chunk *chunk;
1136 struct btrfs_key found_key;
1138 path = btrfs_alloc_path();
1142 key.objectid = objectid;
1143 key.offset = (u64)-1;
1144 key.type = BTRFS_CHUNK_ITEM_KEY;
1146 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1150 BUG_ON(ret == 0); /* Corruption */
1152 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1156 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1158 if (found_key.objectid != objectid)
1161 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1162 struct btrfs_chunk);
1163 *offset = found_key.offset +
1164 btrfs_chunk_length(path->nodes[0], chunk);
1169 btrfs_free_path(path);
1173 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1176 struct btrfs_key key;
1177 struct btrfs_key found_key;
1178 struct btrfs_path *path;
1180 root = root->fs_info->chunk_root;
1182 path = btrfs_alloc_path();
1186 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1187 key.type = BTRFS_DEV_ITEM_KEY;
1188 key.offset = (u64)-1;
1190 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1194 BUG_ON(ret == 0); /* Corruption */
1196 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1197 BTRFS_DEV_ITEM_KEY);
1201 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1203 *objectid = found_key.offset + 1;
1207 btrfs_free_path(path);
1212 * the device information is stored in the chunk root
1213 * the btrfs_device struct should be fully filled in
1215 int btrfs_add_device(struct btrfs_trans_handle *trans,
1216 struct btrfs_root *root,
1217 struct btrfs_device *device)
1220 struct btrfs_path *path;
1221 struct btrfs_dev_item *dev_item;
1222 struct extent_buffer *leaf;
1223 struct btrfs_key key;
1226 root = root->fs_info->chunk_root;
1228 path = btrfs_alloc_path();
1232 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1233 key.type = BTRFS_DEV_ITEM_KEY;
1234 key.offset = device->devid;
1236 ret = btrfs_insert_empty_item(trans, root, path, &key,
1241 leaf = path->nodes[0];
1242 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1244 btrfs_set_device_id(leaf, dev_item, device->devid);
1245 btrfs_set_device_generation(leaf, dev_item, 0);
1246 btrfs_set_device_type(leaf, dev_item, device->type);
1247 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1248 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1249 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1250 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1251 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1252 btrfs_set_device_group(leaf, dev_item, 0);
1253 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1254 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1255 btrfs_set_device_start_offset(leaf, dev_item, 0);
1257 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1258 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1259 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1260 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1261 btrfs_mark_buffer_dirty(leaf);
1265 btrfs_free_path(path);
1269 static int btrfs_rm_dev_item(struct btrfs_root *root,
1270 struct btrfs_device *device)
1273 struct btrfs_path *path;
1274 struct btrfs_key key;
1275 struct btrfs_trans_handle *trans;
1277 root = root->fs_info->chunk_root;
1279 path = btrfs_alloc_path();
1283 trans = btrfs_start_transaction(root, 0);
1284 if (IS_ERR(trans)) {
1285 btrfs_free_path(path);
1286 return PTR_ERR(trans);
1288 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1289 key.type = BTRFS_DEV_ITEM_KEY;
1290 key.offset = device->devid;
1293 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1302 ret = btrfs_del_item(trans, root, path);
1306 btrfs_free_path(path);
1307 unlock_chunks(root);
1308 btrfs_commit_transaction(trans, root);
1312 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1314 struct btrfs_device *device;
1315 struct btrfs_device *next_device;
1316 struct block_device *bdev;
1317 struct buffer_head *bh = NULL;
1318 struct btrfs_super_block *disk_super;
1319 struct btrfs_fs_devices *cur_devices;
1325 bool clear_super = false;
1327 mutex_lock(&uuid_mutex);
1329 all_avail = root->fs_info->avail_data_alloc_bits |
1330 root->fs_info->avail_system_alloc_bits |
1331 root->fs_info->avail_metadata_alloc_bits;
1333 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1334 root->fs_info->fs_devices->num_devices <= 4) {
1335 printk(KERN_ERR "btrfs: unable to go below four devices "
1341 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1342 root->fs_info->fs_devices->num_devices <= 2) {
1343 printk(KERN_ERR "btrfs: unable to go below two "
1344 "devices on raid1\n");
1349 if (strcmp(device_path, "missing") == 0) {
1350 struct list_head *devices;
1351 struct btrfs_device *tmp;
1354 devices = &root->fs_info->fs_devices->devices;
1356 * It is safe to read the devices since the volume_mutex
1359 list_for_each_entry(tmp, devices, dev_list) {
1360 if (tmp->in_fs_metadata && !tmp->bdev) {
1369 printk(KERN_ERR "btrfs: no missing devices found to "
1374 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1375 root->fs_info->bdev_holder);
1377 ret = PTR_ERR(bdev);
1381 set_blocksize(bdev, 4096);
1382 invalidate_bdev(bdev);
1383 bh = btrfs_read_dev_super(bdev);
1388 disk_super = (struct btrfs_super_block *)bh->b_data;
1389 devid = btrfs_stack_device_id(&disk_super->dev_item);
1390 dev_uuid = disk_super->dev_item.uuid;
1391 device = btrfs_find_device(root, devid, dev_uuid,
1399 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1400 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1406 if (device->writeable) {
1408 list_del_init(&device->dev_alloc_list);
1409 unlock_chunks(root);
1410 root->fs_info->fs_devices->rw_devices--;
1414 ret = btrfs_shrink_device(device, 0);
1418 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1422 spin_lock(&root->fs_info->free_chunk_lock);
1423 root->fs_info->free_chunk_space = device->total_bytes -
1425 spin_unlock(&root->fs_info->free_chunk_lock);
1427 device->in_fs_metadata = 0;
1428 btrfs_scrub_cancel_dev(root, device);
1431 * the device list mutex makes sure that we don't change
1432 * the device list while someone else is writing out all
1433 * the device supers.
1436 cur_devices = device->fs_devices;
1437 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1438 list_del_rcu(&device->dev_list);
1440 device->fs_devices->num_devices--;
1441 device->fs_devices->total_devices--;
1443 if (device->missing)
1444 root->fs_info->fs_devices->missing_devices--;
1446 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1447 struct btrfs_device, dev_list);
1448 if (device->bdev == root->fs_info->sb->s_bdev)
1449 root->fs_info->sb->s_bdev = next_device->bdev;
1450 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1451 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1454 device->fs_devices->open_devices--;
1456 call_rcu(&device->rcu, free_device);
1457 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1459 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1460 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1462 if (cur_devices->open_devices == 0) {
1463 struct btrfs_fs_devices *fs_devices;
1464 fs_devices = root->fs_info->fs_devices;
1465 while (fs_devices) {
1466 if (fs_devices->seed == cur_devices)
1468 fs_devices = fs_devices->seed;
1470 fs_devices->seed = cur_devices->seed;
1471 cur_devices->seed = NULL;
1473 __btrfs_close_devices(cur_devices);
1474 unlock_chunks(root);
1475 free_fs_devices(cur_devices);
1478 root->fs_info->num_tolerated_disk_barrier_failures =
1479 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1482 * at this point, the device is zero sized. We want to
1483 * remove it from the devices list and zero out the old super
1486 /* make sure this device isn't detected as part of
1489 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1490 set_buffer_dirty(bh);
1491 sync_dirty_buffer(bh);
1500 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1502 mutex_unlock(&uuid_mutex);
1505 if (device->writeable) {
1507 list_add(&device->dev_alloc_list,
1508 &root->fs_info->fs_devices->alloc_list);
1509 unlock_chunks(root);
1510 root->fs_info->fs_devices->rw_devices++;
1516 * does all the dirty work required for changing file system's UUID.
1518 static int btrfs_prepare_sprout(struct btrfs_root *root)
1520 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1521 struct btrfs_fs_devices *old_devices;
1522 struct btrfs_fs_devices *seed_devices;
1523 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1524 struct btrfs_device *device;
1527 BUG_ON(!mutex_is_locked(&uuid_mutex));
1528 if (!fs_devices->seeding)
1531 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1535 old_devices = clone_fs_devices(fs_devices);
1536 if (IS_ERR(old_devices)) {
1537 kfree(seed_devices);
1538 return PTR_ERR(old_devices);
1541 list_add(&old_devices->list, &fs_uuids);
1543 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1544 seed_devices->opened = 1;
1545 INIT_LIST_HEAD(&seed_devices->devices);
1546 INIT_LIST_HEAD(&seed_devices->alloc_list);
1547 mutex_init(&seed_devices->device_list_mutex);
1549 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1550 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1552 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1554 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1555 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1556 device->fs_devices = seed_devices;
1559 fs_devices->seeding = 0;
1560 fs_devices->num_devices = 0;
1561 fs_devices->open_devices = 0;
1562 fs_devices->total_devices = 0;
1563 fs_devices->seed = seed_devices;
1565 generate_random_uuid(fs_devices->fsid);
1566 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1567 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1568 super_flags = btrfs_super_flags(disk_super) &
1569 ~BTRFS_SUPER_FLAG_SEEDING;
1570 btrfs_set_super_flags(disk_super, super_flags);
1576 * strore the expected generation for seed devices in device items.
1578 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1579 struct btrfs_root *root)
1581 struct btrfs_path *path;
1582 struct extent_buffer *leaf;
1583 struct btrfs_dev_item *dev_item;
1584 struct btrfs_device *device;
1585 struct btrfs_key key;
1586 u8 fs_uuid[BTRFS_UUID_SIZE];
1587 u8 dev_uuid[BTRFS_UUID_SIZE];
1591 path = btrfs_alloc_path();
1595 root = root->fs_info->chunk_root;
1596 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1598 key.type = BTRFS_DEV_ITEM_KEY;
1601 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1605 leaf = path->nodes[0];
1607 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1608 ret = btrfs_next_leaf(root, path);
1613 leaf = path->nodes[0];
1614 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1615 btrfs_release_path(path);
1619 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1620 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1621 key.type != BTRFS_DEV_ITEM_KEY)
1624 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1625 struct btrfs_dev_item);
1626 devid = btrfs_device_id(leaf, dev_item);
1627 read_extent_buffer(leaf, dev_uuid,
1628 (unsigned long)btrfs_device_uuid(dev_item),
1630 read_extent_buffer(leaf, fs_uuid,
1631 (unsigned long)btrfs_device_fsid(dev_item),
1633 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1634 BUG_ON(!device); /* Logic error */
1636 if (device->fs_devices->seeding) {
1637 btrfs_set_device_generation(leaf, dev_item,
1638 device->generation);
1639 btrfs_mark_buffer_dirty(leaf);
1647 btrfs_free_path(path);
1651 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1653 struct request_queue *q;
1654 struct btrfs_trans_handle *trans;
1655 struct btrfs_device *device;
1656 struct block_device *bdev;
1657 struct list_head *devices;
1658 struct super_block *sb = root->fs_info->sb;
1659 struct rcu_string *name;
1661 int seeding_dev = 0;
1664 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1667 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1668 root->fs_info->bdev_holder);
1670 return PTR_ERR(bdev);
1672 if (root->fs_info->fs_devices->seeding) {
1674 down_write(&sb->s_umount);
1675 mutex_lock(&uuid_mutex);
1678 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1680 devices = &root->fs_info->fs_devices->devices;
1682 * we have the volume lock, so we don't need the extra
1683 * device list mutex while reading the list here.
1685 list_for_each_entry(device, devices, dev_list) {
1686 if (device->bdev == bdev) {
1692 device = kzalloc(sizeof(*device), GFP_NOFS);
1694 /* we can safely leave the fs_devices entry around */
1699 name = rcu_string_strdup(device_path, GFP_NOFS);
1705 rcu_assign_pointer(device->name, name);
1707 ret = find_next_devid(root, &device->devid);
1709 rcu_string_free(device->name);
1714 trans = btrfs_start_transaction(root, 0);
1715 if (IS_ERR(trans)) {
1716 rcu_string_free(device->name);
1718 ret = PTR_ERR(trans);
1724 q = bdev_get_queue(bdev);
1725 if (blk_queue_discard(q))
1726 device->can_discard = 1;
1727 device->writeable = 1;
1728 device->work.func = pending_bios_fn;
1729 generate_random_uuid(device->uuid);
1730 spin_lock_init(&device->io_lock);
1731 device->generation = trans->transid;
1732 device->io_width = root->sectorsize;
1733 device->io_align = root->sectorsize;
1734 device->sector_size = root->sectorsize;
1735 device->total_bytes = i_size_read(bdev->bd_inode);
1736 device->disk_total_bytes = device->total_bytes;
1737 device->dev_root = root->fs_info->dev_root;
1738 device->bdev = bdev;
1739 device->in_fs_metadata = 1;
1740 device->mode = FMODE_EXCL;
1741 set_blocksize(device->bdev, 4096);
1744 sb->s_flags &= ~MS_RDONLY;
1745 ret = btrfs_prepare_sprout(root);
1746 BUG_ON(ret); /* -ENOMEM */
1749 device->fs_devices = root->fs_info->fs_devices;
1751 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1752 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1753 list_add(&device->dev_alloc_list,
1754 &root->fs_info->fs_devices->alloc_list);
1755 root->fs_info->fs_devices->num_devices++;
1756 root->fs_info->fs_devices->open_devices++;
1757 root->fs_info->fs_devices->rw_devices++;
1758 root->fs_info->fs_devices->total_devices++;
1759 if (device->can_discard)
1760 root->fs_info->fs_devices->num_can_discard++;
1761 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1763 spin_lock(&root->fs_info->free_chunk_lock);
1764 root->fs_info->free_chunk_space += device->total_bytes;
1765 spin_unlock(&root->fs_info->free_chunk_lock);
1767 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1768 root->fs_info->fs_devices->rotating = 1;
1770 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1771 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1772 total_bytes + device->total_bytes);
1774 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1775 btrfs_set_super_num_devices(root->fs_info->super_copy,
1777 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1780 ret = init_first_rw_device(trans, root, device);
1782 btrfs_abort_transaction(trans, root, ret);
1785 ret = btrfs_finish_sprout(trans, root);
1787 btrfs_abort_transaction(trans, root, ret);
1791 ret = btrfs_add_device(trans, root, device);
1793 btrfs_abort_transaction(trans, root, ret);
1799 * we've got more storage, clear any full flags on the space
1802 btrfs_clear_space_info_full(root->fs_info);
1804 unlock_chunks(root);
1805 root->fs_info->num_tolerated_disk_barrier_failures =
1806 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1807 ret = btrfs_commit_transaction(trans, root);
1810 mutex_unlock(&uuid_mutex);
1811 up_write(&sb->s_umount);
1813 if (ret) /* transaction commit */
1816 ret = btrfs_relocate_sys_chunks(root);
1818 btrfs_error(root->fs_info, ret,
1819 "Failed to relocate sys chunks after "
1820 "device initialization. This can be fixed "
1821 "using the \"btrfs balance\" command.");
1822 trans = btrfs_attach_transaction(root);
1823 if (IS_ERR(trans)) {
1824 if (PTR_ERR(trans) == -ENOENT)
1826 return PTR_ERR(trans);
1828 ret = btrfs_commit_transaction(trans, root);
1834 unlock_chunks(root);
1835 btrfs_end_transaction(trans, root);
1836 rcu_string_free(device->name);
1839 blkdev_put(bdev, FMODE_EXCL);
1841 mutex_unlock(&uuid_mutex);
1842 up_write(&sb->s_umount);
1847 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1848 struct btrfs_device *device)
1851 struct btrfs_path *path;
1852 struct btrfs_root *root;
1853 struct btrfs_dev_item *dev_item;
1854 struct extent_buffer *leaf;
1855 struct btrfs_key key;
1857 root = device->dev_root->fs_info->chunk_root;
1859 path = btrfs_alloc_path();
1863 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1864 key.type = BTRFS_DEV_ITEM_KEY;
1865 key.offset = device->devid;
1867 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1876 leaf = path->nodes[0];
1877 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1879 btrfs_set_device_id(leaf, dev_item, device->devid);
1880 btrfs_set_device_type(leaf, dev_item, device->type);
1881 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1882 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1883 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1884 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1885 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1886 btrfs_mark_buffer_dirty(leaf);
1889 btrfs_free_path(path);
1893 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1894 struct btrfs_device *device, u64 new_size)
1896 struct btrfs_super_block *super_copy =
1897 device->dev_root->fs_info->super_copy;
1898 u64 old_total = btrfs_super_total_bytes(super_copy);
1899 u64 diff = new_size - device->total_bytes;
1901 if (!device->writeable)
1903 if (new_size <= device->total_bytes)
1906 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1907 device->fs_devices->total_rw_bytes += diff;
1909 device->total_bytes = new_size;
1910 device->disk_total_bytes = new_size;
1911 btrfs_clear_space_info_full(device->dev_root->fs_info);
1913 return btrfs_update_device(trans, device);
1916 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1917 struct btrfs_device *device, u64 new_size)
1920 lock_chunks(device->dev_root);
1921 ret = __btrfs_grow_device(trans, device, new_size);
1922 unlock_chunks(device->dev_root);
1926 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1927 struct btrfs_root *root,
1928 u64 chunk_tree, u64 chunk_objectid,
1932 struct btrfs_path *path;
1933 struct btrfs_key key;
1935 root = root->fs_info->chunk_root;
1936 path = btrfs_alloc_path();
1940 key.objectid = chunk_objectid;
1941 key.offset = chunk_offset;
1942 key.type = BTRFS_CHUNK_ITEM_KEY;
1944 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1947 else if (ret > 0) { /* Logic error or corruption */
1948 btrfs_error(root->fs_info, -ENOENT,
1949 "Failed lookup while freeing chunk.");
1954 ret = btrfs_del_item(trans, root, path);
1956 btrfs_error(root->fs_info, ret,
1957 "Failed to delete chunk item.");
1959 btrfs_free_path(path);
1963 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1966 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1967 struct btrfs_disk_key *disk_key;
1968 struct btrfs_chunk *chunk;
1975 struct btrfs_key key;
1977 array_size = btrfs_super_sys_array_size(super_copy);
1979 ptr = super_copy->sys_chunk_array;
1982 while (cur < array_size) {
1983 disk_key = (struct btrfs_disk_key *)ptr;
1984 btrfs_disk_key_to_cpu(&key, disk_key);
1986 len = sizeof(*disk_key);
1988 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1989 chunk = (struct btrfs_chunk *)(ptr + len);
1990 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1991 len += btrfs_chunk_item_size(num_stripes);
1996 if (key.objectid == chunk_objectid &&
1997 key.offset == chunk_offset) {
1998 memmove(ptr, ptr + len, array_size - (cur + len));
2000 btrfs_set_super_sys_array_size(super_copy, array_size);
2009 static int btrfs_relocate_chunk(struct btrfs_root *root,
2010 u64 chunk_tree, u64 chunk_objectid,
2013 struct extent_map_tree *em_tree;
2014 struct btrfs_root *extent_root;
2015 struct btrfs_trans_handle *trans;
2016 struct extent_map *em;
2017 struct map_lookup *map;
2021 root = root->fs_info->chunk_root;
2022 extent_root = root->fs_info->extent_root;
2023 em_tree = &root->fs_info->mapping_tree.map_tree;
2025 ret = btrfs_can_relocate(extent_root, chunk_offset);
2029 /* step one, relocate all the extents inside this chunk */
2030 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2034 trans = btrfs_start_transaction(root, 0);
2035 BUG_ON(IS_ERR(trans));
2040 * step two, delete the device extents and the
2041 * chunk tree entries
2043 read_lock(&em_tree->lock);
2044 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2045 read_unlock(&em_tree->lock);
2047 BUG_ON(!em || em->start > chunk_offset ||
2048 em->start + em->len < chunk_offset);
2049 map = (struct map_lookup *)em->bdev;
2051 for (i = 0; i < map->num_stripes; i++) {
2052 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2053 map->stripes[i].physical);
2056 if (map->stripes[i].dev) {
2057 ret = btrfs_update_device(trans, map->stripes[i].dev);
2061 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2066 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2068 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2069 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2073 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2076 write_lock(&em_tree->lock);
2077 remove_extent_mapping(em_tree, em);
2078 write_unlock(&em_tree->lock);
2083 /* once for the tree */
2084 free_extent_map(em);
2086 free_extent_map(em);
2088 unlock_chunks(root);
2089 btrfs_end_transaction(trans, root);
2093 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2095 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2096 struct btrfs_path *path;
2097 struct extent_buffer *leaf;
2098 struct btrfs_chunk *chunk;
2099 struct btrfs_key key;
2100 struct btrfs_key found_key;
2101 u64 chunk_tree = chunk_root->root_key.objectid;
2103 bool retried = false;
2107 path = btrfs_alloc_path();
2112 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2113 key.offset = (u64)-1;
2114 key.type = BTRFS_CHUNK_ITEM_KEY;
2117 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2120 BUG_ON(ret == 0); /* Corruption */
2122 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2129 leaf = path->nodes[0];
2130 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2132 chunk = btrfs_item_ptr(leaf, path->slots[0],
2133 struct btrfs_chunk);
2134 chunk_type = btrfs_chunk_type(leaf, chunk);
2135 btrfs_release_path(path);
2137 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2138 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2147 if (found_key.offset == 0)
2149 key.offset = found_key.offset - 1;
2152 if (failed && !retried) {
2156 } else if (failed && retried) {
2161 btrfs_free_path(path);
2165 static int insert_balance_item(struct btrfs_root *root,
2166 struct btrfs_balance_control *bctl)
2168 struct btrfs_trans_handle *trans;
2169 struct btrfs_balance_item *item;
2170 struct btrfs_disk_balance_args disk_bargs;
2171 struct btrfs_path *path;
2172 struct extent_buffer *leaf;
2173 struct btrfs_key key;
2176 path = btrfs_alloc_path();
2180 trans = btrfs_start_transaction(root, 0);
2181 if (IS_ERR(trans)) {
2182 btrfs_free_path(path);
2183 return PTR_ERR(trans);
2186 key.objectid = BTRFS_BALANCE_OBJECTID;
2187 key.type = BTRFS_BALANCE_ITEM_KEY;
2190 ret = btrfs_insert_empty_item(trans, root, path, &key,
2195 leaf = path->nodes[0];
2196 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2198 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2200 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2201 btrfs_set_balance_data(leaf, item, &disk_bargs);
2202 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2203 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2204 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2205 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2207 btrfs_set_balance_flags(leaf, item, bctl->flags);
2209 btrfs_mark_buffer_dirty(leaf);
2211 btrfs_free_path(path);
2212 err = btrfs_commit_transaction(trans, root);
2218 static int del_balance_item(struct btrfs_root *root)
2220 struct btrfs_trans_handle *trans;
2221 struct btrfs_path *path;
2222 struct btrfs_key key;
2225 path = btrfs_alloc_path();
2229 trans = btrfs_start_transaction(root, 0);
2230 if (IS_ERR(trans)) {
2231 btrfs_free_path(path);
2232 return PTR_ERR(trans);
2235 key.objectid = BTRFS_BALANCE_OBJECTID;
2236 key.type = BTRFS_BALANCE_ITEM_KEY;
2239 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2247 ret = btrfs_del_item(trans, root, path);
2249 btrfs_free_path(path);
2250 err = btrfs_commit_transaction(trans, root);
2257 * This is a heuristic used to reduce the number of chunks balanced on
2258 * resume after balance was interrupted.
2260 static void update_balance_args(struct btrfs_balance_control *bctl)
2263 * Turn on soft mode for chunk types that were being converted.
2265 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2266 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2267 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2268 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2269 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2270 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2273 * Turn on usage filter if is not already used. The idea is
2274 * that chunks that we have already balanced should be
2275 * reasonably full. Don't do it for chunks that are being
2276 * converted - that will keep us from relocating unconverted
2277 * (albeit full) chunks.
2279 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2280 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2281 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2282 bctl->data.usage = 90;
2284 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2285 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2286 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2287 bctl->sys.usage = 90;
2289 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2290 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2291 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2292 bctl->meta.usage = 90;
2297 * Should be called with both balance and volume mutexes held to
2298 * serialize other volume operations (add_dev/rm_dev/resize) with
2299 * restriper. Same goes for unset_balance_control.
2301 static void set_balance_control(struct btrfs_balance_control *bctl)
2303 struct btrfs_fs_info *fs_info = bctl->fs_info;
2305 BUG_ON(fs_info->balance_ctl);
2307 spin_lock(&fs_info->balance_lock);
2308 fs_info->balance_ctl = bctl;
2309 spin_unlock(&fs_info->balance_lock);
2312 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2314 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2316 BUG_ON(!fs_info->balance_ctl);
2318 spin_lock(&fs_info->balance_lock);
2319 fs_info->balance_ctl = NULL;
2320 spin_unlock(&fs_info->balance_lock);
2326 * Balance filters. Return 1 if chunk should be filtered out
2327 * (should not be balanced).
2329 static int chunk_profiles_filter(u64 chunk_type,
2330 struct btrfs_balance_args *bargs)
2332 chunk_type = chunk_to_extended(chunk_type) &
2333 BTRFS_EXTENDED_PROFILE_MASK;
2335 if (bargs->profiles & chunk_type)
2341 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2342 struct btrfs_balance_args *bargs)
2344 struct btrfs_block_group_cache *cache;
2345 u64 chunk_used, user_thresh;
2348 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2349 chunk_used = btrfs_block_group_used(&cache->item);
2351 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2352 if (chunk_used < user_thresh)
2355 btrfs_put_block_group(cache);
2359 static int chunk_devid_filter(struct extent_buffer *leaf,
2360 struct btrfs_chunk *chunk,
2361 struct btrfs_balance_args *bargs)
2363 struct btrfs_stripe *stripe;
2364 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2367 for (i = 0; i < num_stripes; i++) {
2368 stripe = btrfs_stripe_nr(chunk, i);
2369 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2376 /* [pstart, pend) */
2377 static int chunk_drange_filter(struct extent_buffer *leaf,
2378 struct btrfs_chunk *chunk,
2380 struct btrfs_balance_args *bargs)
2382 struct btrfs_stripe *stripe;
2383 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2389 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2392 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2393 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2397 factor = num_stripes / factor;
2399 for (i = 0; i < num_stripes; i++) {
2400 stripe = btrfs_stripe_nr(chunk, i);
2401 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2404 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2405 stripe_length = btrfs_chunk_length(leaf, chunk);
2406 do_div(stripe_length, factor);
2408 if (stripe_offset < bargs->pend &&
2409 stripe_offset + stripe_length > bargs->pstart)
2416 /* [vstart, vend) */
2417 static int chunk_vrange_filter(struct extent_buffer *leaf,
2418 struct btrfs_chunk *chunk,
2420 struct btrfs_balance_args *bargs)
2422 if (chunk_offset < bargs->vend &&
2423 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2424 /* at least part of the chunk is inside this vrange */
2430 static int chunk_soft_convert_filter(u64 chunk_type,
2431 struct btrfs_balance_args *bargs)
2433 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2436 chunk_type = chunk_to_extended(chunk_type) &
2437 BTRFS_EXTENDED_PROFILE_MASK;
2439 if (bargs->target == chunk_type)
2445 static int should_balance_chunk(struct btrfs_root *root,
2446 struct extent_buffer *leaf,
2447 struct btrfs_chunk *chunk, u64 chunk_offset)
2449 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2450 struct btrfs_balance_args *bargs = NULL;
2451 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2454 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2455 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2459 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2460 bargs = &bctl->data;
2461 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2463 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2464 bargs = &bctl->meta;
2466 /* profiles filter */
2467 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2468 chunk_profiles_filter(chunk_type, bargs)) {
2473 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2474 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2479 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2480 chunk_devid_filter(leaf, chunk, bargs)) {
2484 /* drange filter, makes sense only with devid filter */
2485 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2486 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2491 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2492 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2496 /* soft profile changing mode */
2497 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2498 chunk_soft_convert_filter(chunk_type, bargs)) {
2505 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2507 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2508 struct btrfs_root *chunk_root = fs_info->chunk_root;
2509 struct btrfs_root *dev_root = fs_info->dev_root;
2510 struct list_head *devices;
2511 struct btrfs_device *device;
2514 struct btrfs_chunk *chunk;
2515 struct btrfs_path *path;
2516 struct btrfs_key key;
2517 struct btrfs_key found_key;
2518 struct btrfs_trans_handle *trans;
2519 struct extent_buffer *leaf;
2522 int enospc_errors = 0;
2523 bool counting = true;
2525 /* step one make some room on all the devices */
2526 devices = &fs_info->fs_devices->devices;
2527 list_for_each_entry(device, devices, dev_list) {
2528 old_size = device->total_bytes;
2529 size_to_free = div_factor(old_size, 1);
2530 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2531 if (!device->writeable ||
2532 device->total_bytes - device->bytes_used > size_to_free)
2535 ret = btrfs_shrink_device(device, old_size - size_to_free);
2540 trans = btrfs_start_transaction(dev_root, 0);
2541 BUG_ON(IS_ERR(trans));
2543 ret = btrfs_grow_device(trans, device, old_size);
2546 btrfs_end_transaction(trans, dev_root);
2549 /* step two, relocate all the chunks */
2550 path = btrfs_alloc_path();
2556 /* zero out stat counters */
2557 spin_lock(&fs_info->balance_lock);
2558 memset(&bctl->stat, 0, sizeof(bctl->stat));
2559 spin_unlock(&fs_info->balance_lock);
2561 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2562 key.offset = (u64)-1;
2563 key.type = BTRFS_CHUNK_ITEM_KEY;
2566 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2567 atomic_read(&fs_info->balance_cancel_req)) {
2572 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2577 * this shouldn't happen, it means the last relocate
2581 BUG(); /* FIXME break ? */
2583 ret = btrfs_previous_item(chunk_root, path, 0,
2584 BTRFS_CHUNK_ITEM_KEY);
2590 leaf = path->nodes[0];
2591 slot = path->slots[0];
2592 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2594 if (found_key.objectid != key.objectid)
2597 /* chunk zero is special */
2598 if (found_key.offset == 0)
2601 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2604 spin_lock(&fs_info->balance_lock);
2605 bctl->stat.considered++;
2606 spin_unlock(&fs_info->balance_lock);
2609 ret = should_balance_chunk(chunk_root, leaf, chunk,
2611 btrfs_release_path(path);
2616 spin_lock(&fs_info->balance_lock);
2617 bctl->stat.expected++;
2618 spin_unlock(&fs_info->balance_lock);
2622 ret = btrfs_relocate_chunk(chunk_root,
2623 chunk_root->root_key.objectid,
2626 if (ret && ret != -ENOSPC)
2628 if (ret == -ENOSPC) {
2631 spin_lock(&fs_info->balance_lock);
2632 bctl->stat.completed++;
2633 spin_unlock(&fs_info->balance_lock);
2636 key.offset = found_key.offset - 1;
2640 btrfs_release_path(path);
2645 btrfs_free_path(path);
2646 if (enospc_errors) {
2647 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2657 * alloc_profile_is_valid - see if a given profile is valid and reduced
2658 * @flags: profile to validate
2659 * @extended: if true @flags is treated as an extended profile
2661 static int alloc_profile_is_valid(u64 flags, int extended)
2663 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2664 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2666 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2668 /* 1) check that all other bits are zeroed */
2672 /* 2) see if profile is reduced */
2674 return !extended; /* "0" is valid for usual profiles */
2676 /* true if exactly one bit set */
2677 return (flags & (flags - 1)) == 0;
2680 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2682 /* cancel requested || normal exit path */
2683 return atomic_read(&fs_info->balance_cancel_req) ||
2684 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2685 atomic_read(&fs_info->balance_cancel_req) == 0);
2688 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2692 unset_balance_control(fs_info);
2693 ret = del_balance_item(fs_info->tree_root);
2697 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2698 struct btrfs_ioctl_balance_args *bargs);
2701 * Should be called with both balance and volume mutexes held
2703 int btrfs_balance(struct btrfs_balance_control *bctl,
2704 struct btrfs_ioctl_balance_args *bargs)
2706 struct btrfs_fs_info *fs_info = bctl->fs_info;
2711 if (btrfs_fs_closing(fs_info) ||
2712 atomic_read(&fs_info->balance_pause_req) ||
2713 atomic_read(&fs_info->balance_cancel_req)) {
2718 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2719 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2723 * In case of mixed groups both data and meta should be picked,
2724 * and identical options should be given for both of them.
2726 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2727 if (mixed && (bctl->flags & allowed)) {
2728 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2729 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2730 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2731 printk(KERN_ERR "btrfs: with mixed groups data and "
2732 "metadata balance options must be the same\n");
2738 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2739 if (fs_info->fs_devices->num_devices == 1)
2740 allowed |= BTRFS_BLOCK_GROUP_DUP;
2741 else if (fs_info->fs_devices->num_devices < 4)
2742 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2744 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2745 BTRFS_BLOCK_GROUP_RAID10);
2747 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2748 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2749 (bctl->data.target & ~allowed))) {
2750 printk(KERN_ERR "btrfs: unable to start balance with target "
2751 "data profile %llu\n",
2752 (unsigned long long)bctl->data.target);
2756 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2757 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2758 (bctl->meta.target & ~allowed))) {
2759 printk(KERN_ERR "btrfs: unable to start balance with target "
2760 "metadata profile %llu\n",
2761 (unsigned long long)bctl->meta.target);
2765 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2766 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2767 (bctl->sys.target & ~allowed))) {
2768 printk(KERN_ERR "btrfs: unable to start balance with target "
2769 "system profile %llu\n",
2770 (unsigned long long)bctl->sys.target);
2775 /* allow dup'ed data chunks only in mixed mode */
2776 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2777 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2778 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2783 /* allow to reduce meta or sys integrity only if force set */
2784 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2785 BTRFS_BLOCK_GROUP_RAID10;
2786 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2787 (fs_info->avail_system_alloc_bits & allowed) &&
2788 !(bctl->sys.target & allowed)) ||
2789 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2790 (fs_info->avail_metadata_alloc_bits & allowed) &&
2791 !(bctl->meta.target & allowed))) {
2792 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2793 printk(KERN_INFO "btrfs: force reducing metadata "
2796 printk(KERN_ERR "btrfs: balance will reduce metadata "
2797 "integrity, use force if you want this\n");
2803 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
2804 int num_tolerated_disk_barrier_failures;
2805 u64 target = bctl->sys.target;
2807 num_tolerated_disk_barrier_failures =
2808 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2809 if (num_tolerated_disk_barrier_failures > 0 &&
2811 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
2812 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
2813 num_tolerated_disk_barrier_failures = 0;
2814 else if (num_tolerated_disk_barrier_failures > 1 &&
2816 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
2817 num_tolerated_disk_barrier_failures = 1;
2819 fs_info->num_tolerated_disk_barrier_failures =
2820 num_tolerated_disk_barrier_failures;
2823 ret = insert_balance_item(fs_info->tree_root, bctl);
2824 if (ret && ret != -EEXIST)
2827 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2828 BUG_ON(ret == -EEXIST);
2829 set_balance_control(bctl);
2831 BUG_ON(ret != -EEXIST);
2832 spin_lock(&fs_info->balance_lock);
2833 update_balance_args(bctl);
2834 spin_unlock(&fs_info->balance_lock);
2837 atomic_inc(&fs_info->balance_running);
2838 mutex_unlock(&fs_info->balance_mutex);
2840 ret = __btrfs_balance(fs_info);
2842 mutex_lock(&fs_info->balance_mutex);
2843 atomic_dec(&fs_info->balance_running);
2846 memset(bargs, 0, sizeof(*bargs));
2847 update_ioctl_balance_args(fs_info, 0, bargs);
2850 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2851 balance_need_close(fs_info)) {
2852 __cancel_balance(fs_info);
2855 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
2856 fs_info->num_tolerated_disk_barrier_failures =
2857 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2860 wake_up(&fs_info->balance_wait_q);
2864 if (bctl->flags & BTRFS_BALANCE_RESUME)
2865 __cancel_balance(fs_info);
2871 static int balance_kthread(void *data)
2873 struct btrfs_fs_info *fs_info = data;
2876 mutex_lock(&fs_info->volume_mutex);
2877 mutex_lock(&fs_info->balance_mutex);
2879 if (fs_info->balance_ctl) {
2880 printk(KERN_INFO "btrfs: continuing balance\n");
2881 ret = btrfs_balance(fs_info->balance_ctl, NULL);
2884 mutex_unlock(&fs_info->balance_mutex);
2885 mutex_unlock(&fs_info->volume_mutex);
2890 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
2892 struct task_struct *tsk;
2894 spin_lock(&fs_info->balance_lock);
2895 if (!fs_info->balance_ctl) {
2896 spin_unlock(&fs_info->balance_lock);
2899 spin_unlock(&fs_info->balance_lock);
2901 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2902 printk(KERN_INFO "btrfs: force skipping balance\n");
2906 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
2908 return PTR_ERR(tsk);
2913 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
2915 struct btrfs_balance_control *bctl;
2916 struct btrfs_balance_item *item;
2917 struct btrfs_disk_balance_args disk_bargs;
2918 struct btrfs_path *path;
2919 struct extent_buffer *leaf;
2920 struct btrfs_key key;
2923 path = btrfs_alloc_path();
2927 key.objectid = BTRFS_BALANCE_OBJECTID;
2928 key.type = BTRFS_BALANCE_ITEM_KEY;
2931 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2934 if (ret > 0) { /* ret = -ENOENT; */
2939 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2945 leaf = path->nodes[0];
2946 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2948 bctl->fs_info = fs_info;
2949 bctl->flags = btrfs_balance_flags(leaf, item);
2950 bctl->flags |= BTRFS_BALANCE_RESUME;
2952 btrfs_balance_data(leaf, item, &disk_bargs);
2953 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2954 btrfs_balance_meta(leaf, item, &disk_bargs);
2955 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2956 btrfs_balance_sys(leaf, item, &disk_bargs);
2957 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2959 mutex_lock(&fs_info->volume_mutex);
2960 mutex_lock(&fs_info->balance_mutex);
2962 set_balance_control(bctl);
2964 mutex_unlock(&fs_info->balance_mutex);
2965 mutex_unlock(&fs_info->volume_mutex);
2967 btrfs_free_path(path);
2971 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2975 mutex_lock(&fs_info->balance_mutex);
2976 if (!fs_info->balance_ctl) {
2977 mutex_unlock(&fs_info->balance_mutex);
2981 if (atomic_read(&fs_info->balance_running)) {
2982 atomic_inc(&fs_info->balance_pause_req);
2983 mutex_unlock(&fs_info->balance_mutex);
2985 wait_event(fs_info->balance_wait_q,
2986 atomic_read(&fs_info->balance_running) == 0);
2988 mutex_lock(&fs_info->balance_mutex);
2989 /* we are good with balance_ctl ripped off from under us */
2990 BUG_ON(atomic_read(&fs_info->balance_running));
2991 atomic_dec(&fs_info->balance_pause_req);
2996 mutex_unlock(&fs_info->balance_mutex);
3000 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3002 mutex_lock(&fs_info->balance_mutex);
3003 if (!fs_info->balance_ctl) {
3004 mutex_unlock(&fs_info->balance_mutex);
3008 atomic_inc(&fs_info->balance_cancel_req);
3010 * if we are running just wait and return, balance item is
3011 * deleted in btrfs_balance in this case
3013 if (atomic_read(&fs_info->balance_running)) {
3014 mutex_unlock(&fs_info->balance_mutex);
3015 wait_event(fs_info->balance_wait_q,
3016 atomic_read(&fs_info->balance_running) == 0);
3017 mutex_lock(&fs_info->balance_mutex);
3019 /* __cancel_balance needs volume_mutex */
3020 mutex_unlock(&fs_info->balance_mutex);
3021 mutex_lock(&fs_info->volume_mutex);
3022 mutex_lock(&fs_info->balance_mutex);
3024 if (fs_info->balance_ctl)
3025 __cancel_balance(fs_info);
3027 mutex_unlock(&fs_info->volume_mutex);
3030 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3031 atomic_dec(&fs_info->balance_cancel_req);
3032 mutex_unlock(&fs_info->balance_mutex);
3037 * shrinking a device means finding all of the device extents past
3038 * the new size, and then following the back refs to the chunks.
3039 * The chunk relocation code actually frees the device extent
3041 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3043 struct btrfs_trans_handle *trans;
3044 struct btrfs_root *root = device->dev_root;
3045 struct btrfs_dev_extent *dev_extent = NULL;
3046 struct btrfs_path *path;
3054 bool retried = false;
3055 struct extent_buffer *l;
3056 struct btrfs_key key;
3057 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3058 u64 old_total = btrfs_super_total_bytes(super_copy);
3059 u64 old_size = device->total_bytes;
3060 u64 diff = device->total_bytes - new_size;
3062 if (new_size >= device->total_bytes)
3065 path = btrfs_alloc_path();
3073 device->total_bytes = new_size;
3074 if (device->writeable) {
3075 device->fs_devices->total_rw_bytes -= diff;
3076 spin_lock(&root->fs_info->free_chunk_lock);
3077 root->fs_info->free_chunk_space -= diff;
3078 spin_unlock(&root->fs_info->free_chunk_lock);
3080 unlock_chunks(root);
3083 key.objectid = device->devid;
3084 key.offset = (u64)-1;
3085 key.type = BTRFS_DEV_EXTENT_KEY;
3088 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3092 ret = btrfs_previous_item(root, path, 0, key.type);
3097 btrfs_release_path(path);
3102 slot = path->slots[0];
3103 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3105 if (key.objectid != device->devid) {
3106 btrfs_release_path(path);
3110 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3111 length = btrfs_dev_extent_length(l, dev_extent);
3113 if (key.offset + length <= new_size) {
3114 btrfs_release_path(path);
3118 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3119 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3120 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3121 btrfs_release_path(path);
3123 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3125 if (ret && ret != -ENOSPC)
3129 } while (key.offset-- > 0);
3131 if (failed && !retried) {
3135 } else if (failed && retried) {
3139 device->total_bytes = old_size;
3140 if (device->writeable)
3141 device->fs_devices->total_rw_bytes += diff;
3142 spin_lock(&root->fs_info->free_chunk_lock);
3143 root->fs_info->free_chunk_space += diff;
3144 spin_unlock(&root->fs_info->free_chunk_lock);
3145 unlock_chunks(root);
3149 /* Shrinking succeeded, else we would be at "done". */
3150 trans = btrfs_start_transaction(root, 0);
3151 if (IS_ERR(trans)) {
3152 ret = PTR_ERR(trans);
3158 device->disk_total_bytes = new_size;
3159 /* Now btrfs_update_device() will change the on-disk size. */
3160 ret = btrfs_update_device(trans, device);
3162 unlock_chunks(root);
3163 btrfs_end_transaction(trans, root);
3166 WARN_ON(diff > old_total);
3167 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3168 unlock_chunks(root);
3169 btrfs_end_transaction(trans, root);
3171 btrfs_free_path(path);
3175 static int btrfs_add_system_chunk(struct btrfs_root *root,
3176 struct btrfs_key *key,
3177 struct btrfs_chunk *chunk, int item_size)
3179 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3180 struct btrfs_disk_key disk_key;
3184 array_size = btrfs_super_sys_array_size(super_copy);
3185 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3188 ptr = super_copy->sys_chunk_array + array_size;
3189 btrfs_cpu_key_to_disk(&disk_key, key);
3190 memcpy(ptr, &disk_key, sizeof(disk_key));
3191 ptr += sizeof(disk_key);
3192 memcpy(ptr, chunk, item_size);
3193 item_size += sizeof(disk_key);
3194 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3199 * sort the devices in descending order by max_avail, total_avail
3201 static int btrfs_cmp_device_info(const void *a, const void *b)
3203 const struct btrfs_device_info *di_a = a;
3204 const struct btrfs_device_info *di_b = b;
3206 if (di_a->max_avail > di_b->max_avail)
3208 if (di_a->max_avail < di_b->max_avail)
3210 if (di_a->total_avail > di_b->total_avail)
3212 if (di_a->total_avail < di_b->total_avail)
3217 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3218 struct btrfs_root *extent_root,
3219 struct map_lookup **map_ret,
3220 u64 *num_bytes_out, u64 *stripe_size_out,
3221 u64 start, u64 type)
3223 struct btrfs_fs_info *info = extent_root->fs_info;
3224 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3225 struct list_head *cur;
3226 struct map_lookup *map = NULL;
3227 struct extent_map_tree *em_tree;
3228 struct extent_map *em;
3229 struct btrfs_device_info *devices_info = NULL;
3231 int num_stripes; /* total number of stripes to allocate */
3232 int sub_stripes; /* sub_stripes info for map */
3233 int dev_stripes; /* stripes per dev */
3234 int devs_max; /* max devs to use */
3235 int devs_min; /* min devs needed */
3236 int devs_increment; /* ndevs has to be a multiple of this */
3237 int ncopies; /* how many copies to data has */
3239 u64 max_stripe_size;
3247 BUG_ON(!alloc_profile_is_valid(type, 0));
3249 if (list_empty(&fs_devices->alloc_list))
3256 devs_max = 0; /* 0 == as many as possible */
3260 * define the properties of each RAID type.
3261 * FIXME: move this to a global table and use it in all RAID
3264 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3268 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3270 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3275 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3284 if (type & BTRFS_BLOCK_GROUP_DATA) {
3285 max_stripe_size = 1024 * 1024 * 1024;
3286 max_chunk_size = 10 * max_stripe_size;
3287 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3288 /* for larger filesystems, use larger metadata chunks */
3289 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3290 max_stripe_size = 1024 * 1024 * 1024;
3292 max_stripe_size = 256 * 1024 * 1024;
3293 max_chunk_size = max_stripe_size;
3294 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3295 max_stripe_size = 32 * 1024 * 1024;
3296 max_chunk_size = 2 * max_stripe_size;
3298 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3303 /* we don't want a chunk larger than 10% of writeable space */
3304 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3307 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3312 cur = fs_devices->alloc_list.next;
3315 * in the first pass through the devices list, we gather information
3316 * about the available holes on each device.
3319 while (cur != &fs_devices->alloc_list) {
3320 struct btrfs_device *device;
3324 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3328 if (!device->writeable) {
3330 "btrfs: read-only device in alloc_list\n");
3335 if (!device->in_fs_metadata)
3338 if (device->total_bytes > device->bytes_used)
3339 total_avail = device->total_bytes - device->bytes_used;
3343 /* If there is no space on this device, skip it. */
3344 if (total_avail == 0)
3347 ret = find_free_dev_extent(device,
3348 max_stripe_size * dev_stripes,
3349 &dev_offset, &max_avail);
3350 if (ret && ret != -ENOSPC)
3354 max_avail = max_stripe_size * dev_stripes;
3356 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3359 devices_info[ndevs].dev_offset = dev_offset;
3360 devices_info[ndevs].max_avail = max_avail;
3361 devices_info[ndevs].total_avail = total_avail;
3362 devices_info[ndevs].dev = device;
3367 * now sort the devices by hole size / available space
3369 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3370 btrfs_cmp_device_info, NULL);
3372 /* round down to number of usable stripes */
3373 ndevs -= ndevs % devs_increment;
3375 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3380 if (devs_max && ndevs > devs_max)
3383 * the primary goal is to maximize the number of stripes, so use as many
3384 * devices as possible, even if the stripes are not maximum sized.
3386 stripe_size = devices_info[ndevs-1].max_avail;
3387 num_stripes = ndevs * dev_stripes;
3389 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3390 stripe_size = max_chunk_size * ncopies;
3391 do_div(stripe_size, ndevs);
3394 do_div(stripe_size, dev_stripes);
3396 /* align to BTRFS_STRIPE_LEN */
3397 do_div(stripe_size, BTRFS_STRIPE_LEN);
3398 stripe_size *= BTRFS_STRIPE_LEN;
3400 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3405 map->num_stripes = num_stripes;
3407 for (i = 0; i < ndevs; ++i) {
3408 for (j = 0; j < dev_stripes; ++j) {
3409 int s = i * dev_stripes + j;
3410 map->stripes[s].dev = devices_info[i].dev;
3411 map->stripes[s].physical = devices_info[i].dev_offset +
3415 map->sector_size = extent_root->sectorsize;
3416 map->stripe_len = BTRFS_STRIPE_LEN;
3417 map->io_align = BTRFS_STRIPE_LEN;
3418 map->io_width = BTRFS_STRIPE_LEN;
3420 map->sub_stripes = sub_stripes;
3423 num_bytes = stripe_size * (num_stripes / ncopies);
3425 *stripe_size_out = stripe_size;
3426 *num_bytes_out = num_bytes;
3428 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3430 em = alloc_extent_map();
3435 em->bdev = (struct block_device *)map;
3437 em->len = num_bytes;
3438 em->block_start = 0;
3439 em->block_len = em->len;
3441 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3442 write_lock(&em_tree->lock);
3443 ret = add_extent_mapping(em_tree, em);
3444 write_unlock(&em_tree->lock);
3445 free_extent_map(em);
3449 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3450 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3455 for (i = 0; i < map->num_stripes; ++i) {
3456 struct btrfs_device *device;
3459 device = map->stripes[i].dev;
3460 dev_offset = map->stripes[i].physical;
3462 ret = btrfs_alloc_dev_extent(trans, device,
3463 info->chunk_root->root_key.objectid,
3464 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3465 start, dev_offset, stripe_size);
3467 btrfs_abort_transaction(trans, extent_root, ret);
3472 kfree(devices_info);
3477 kfree(devices_info);
3481 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3482 struct btrfs_root *extent_root,
3483 struct map_lookup *map, u64 chunk_offset,
3484 u64 chunk_size, u64 stripe_size)
3487 struct btrfs_key key;
3488 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3489 struct btrfs_device *device;
3490 struct btrfs_chunk *chunk;
3491 struct btrfs_stripe *stripe;
3492 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3496 chunk = kzalloc(item_size, GFP_NOFS);
3501 while (index < map->num_stripes) {
3502 device = map->stripes[index].dev;
3503 device->bytes_used += stripe_size;
3504 ret = btrfs_update_device(trans, device);
3510 spin_lock(&extent_root->fs_info->free_chunk_lock);
3511 extent_root->fs_info->free_chunk_space -= (stripe_size *
3513 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3516 stripe = &chunk->stripe;
3517 while (index < map->num_stripes) {
3518 device = map->stripes[index].dev;
3519 dev_offset = map->stripes[index].physical;
3521 btrfs_set_stack_stripe_devid(stripe, device->devid);
3522 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3523 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3528 btrfs_set_stack_chunk_length(chunk, chunk_size);
3529 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3530 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3531 btrfs_set_stack_chunk_type(chunk, map->type);
3532 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3533 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3534 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3535 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3536 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3538 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3539 key.type = BTRFS_CHUNK_ITEM_KEY;
3540 key.offset = chunk_offset;
3542 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3544 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3546 * TODO: Cleanup of inserted chunk root in case of
3549 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3559 * Chunk allocation falls into two parts. The first part does works
3560 * that make the new allocated chunk useable, but not do any operation
3561 * that modifies the chunk tree. The second part does the works that
3562 * require modifying the chunk tree. This division is important for the
3563 * bootstrap process of adding storage to a seed btrfs.
3565 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3566 struct btrfs_root *extent_root, u64 type)
3571 struct map_lookup *map;
3572 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3575 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3580 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3581 &stripe_size, chunk_offset, type);
3585 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3586 chunk_size, stripe_size);
3592 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3593 struct btrfs_root *root,
3594 struct btrfs_device *device)
3597 u64 sys_chunk_offset;
3601 u64 sys_stripe_size;
3603 struct map_lookup *map;
3604 struct map_lookup *sys_map;
3605 struct btrfs_fs_info *fs_info = root->fs_info;
3606 struct btrfs_root *extent_root = fs_info->extent_root;
3609 ret = find_next_chunk(fs_info->chunk_root,
3610 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3614 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3615 fs_info->avail_metadata_alloc_bits;
3616 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3618 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3619 &stripe_size, chunk_offset, alloc_profile);
3623 sys_chunk_offset = chunk_offset + chunk_size;
3625 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3626 fs_info->avail_system_alloc_bits;
3627 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3629 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3630 &sys_chunk_size, &sys_stripe_size,
3631 sys_chunk_offset, alloc_profile);
3633 btrfs_abort_transaction(trans, root, ret);
3637 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3639 btrfs_abort_transaction(trans, root, ret);
3644 * Modifying chunk tree needs allocating new blocks from both
3645 * system block group and metadata block group. So we only can
3646 * do operations require modifying the chunk tree after both
3647 * block groups were created.
3649 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3650 chunk_size, stripe_size);
3652 btrfs_abort_transaction(trans, root, ret);
3656 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3657 sys_chunk_offset, sys_chunk_size,
3660 btrfs_abort_transaction(trans, root, ret);
3667 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3669 struct extent_map *em;
3670 struct map_lookup *map;
3671 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3675 read_lock(&map_tree->map_tree.lock);
3676 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3677 read_unlock(&map_tree->map_tree.lock);
3681 if (btrfs_test_opt(root, DEGRADED)) {
3682 free_extent_map(em);
3686 map = (struct map_lookup *)em->bdev;
3687 for (i = 0; i < map->num_stripes; i++) {
3688 if (!map->stripes[i].dev->writeable) {
3693 free_extent_map(em);
3697 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3699 extent_map_tree_init(&tree->map_tree);
3702 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3704 struct extent_map *em;
3707 write_lock(&tree->map_tree.lock);
3708 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3710 remove_extent_mapping(&tree->map_tree, em);
3711 write_unlock(&tree->map_tree.lock);
3716 free_extent_map(em);
3717 /* once for the tree */
3718 free_extent_map(em);
3722 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3724 struct extent_map *em;
3725 struct map_lookup *map;
3726 struct extent_map_tree *em_tree = &map_tree->map_tree;
3729 read_lock(&em_tree->lock);
3730 em = lookup_extent_mapping(em_tree, logical, len);
3731 read_unlock(&em_tree->lock);
3734 BUG_ON(em->start > logical || em->start + em->len < logical);
3735 map = (struct map_lookup *)em->bdev;
3736 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3737 ret = map->num_stripes;
3738 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3739 ret = map->sub_stripes;
3742 free_extent_map(em);
3746 static int find_live_mirror(struct map_lookup *map, int first, int num,
3750 if (map->stripes[optimal].dev->bdev)
3752 for (i = first; i < first + num; i++) {
3753 if (map->stripes[i].dev->bdev)
3756 /* we couldn't find one that doesn't fail. Just return something
3757 * and the io error handling code will clean up eventually
3762 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3763 u64 logical, u64 *length,
3764 struct btrfs_bio **bbio_ret,
3767 struct extent_map *em;
3768 struct map_lookup *map;
3769 struct extent_map_tree *em_tree = &map_tree->map_tree;
3772 u64 stripe_end_offset;
3781 struct btrfs_bio *bbio = NULL;
3783 read_lock(&em_tree->lock);
3784 em = lookup_extent_mapping(em_tree, logical, *length);
3785 read_unlock(&em_tree->lock);
3788 printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
3789 (unsigned long long)logical,
3790 (unsigned long long)*length);
3794 BUG_ON(em->start > logical || em->start + em->len < logical);
3795 map = (struct map_lookup *)em->bdev;
3796 offset = logical - em->start;
3798 if (mirror_num > map->num_stripes)
3803 * stripe_nr counts the total number of stripes we have to stride
3804 * to get to this block
3806 do_div(stripe_nr, map->stripe_len);
3808 stripe_offset = stripe_nr * map->stripe_len;
3809 BUG_ON(offset < stripe_offset);
3811 /* stripe_offset is the offset of this block in its stripe*/
3812 stripe_offset = offset - stripe_offset;
3814 if (rw & REQ_DISCARD)
3815 *length = min_t(u64, em->len - offset, *length);
3816 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3817 /* we limit the length of each bio to what fits in a stripe */
3818 *length = min_t(u64, em->len - offset,
3819 map->stripe_len - stripe_offset);
3821 *length = em->len - offset;
3829 stripe_nr_orig = stripe_nr;
3830 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3831 (~(map->stripe_len - 1));
3832 do_div(stripe_nr_end, map->stripe_len);
3833 stripe_end_offset = stripe_nr_end * map->stripe_len -
3835 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3836 if (rw & REQ_DISCARD)
3837 num_stripes = min_t(u64, map->num_stripes,
3838 stripe_nr_end - stripe_nr_orig);
3839 stripe_index = do_div(stripe_nr, map->num_stripes);
3840 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3841 if (rw & (REQ_WRITE | REQ_DISCARD))
3842 num_stripes = map->num_stripes;
3843 else if (mirror_num)
3844 stripe_index = mirror_num - 1;
3846 stripe_index = find_live_mirror(map, 0,
3848 current->pid % map->num_stripes);
3849 mirror_num = stripe_index + 1;
3852 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3853 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3854 num_stripes = map->num_stripes;
3855 } else if (mirror_num) {
3856 stripe_index = mirror_num - 1;
3861 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3862 int factor = map->num_stripes / map->sub_stripes;
3864 stripe_index = do_div(stripe_nr, factor);
3865 stripe_index *= map->sub_stripes;
3868 num_stripes = map->sub_stripes;
3869 else if (rw & REQ_DISCARD)
3870 num_stripes = min_t(u64, map->sub_stripes *
3871 (stripe_nr_end - stripe_nr_orig),
3873 else if (mirror_num)
3874 stripe_index += mirror_num - 1;
3876 int old_stripe_index = stripe_index;
3877 stripe_index = find_live_mirror(map, stripe_index,
3878 map->sub_stripes, stripe_index +
3879 current->pid % map->sub_stripes);
3880 mirror_num = stripe_index - old_stripe_index + 1;
3884 * after this do_div call, stripe_nr is the number of stripes
3885 * on this device we have to walk to find the data, and
3886 * stripe_index is the number of our device in the stripe array
3888 stripe_index = do_div(stripe_nr, map->num_stripes);
3889 mirror_num = stripe_index + 1;
3891 BUG_ON(stripe_index >= map->num_stripes);
3893 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3898 atomic_set(&bbio->error, 0);
3900 if (rw & REQ_DISCARD) {
3902 int sub_stripes = 0;
3903 u64 stripes_per_dev = 0;
3904 u32 remaining_stripes = 0;
3905 u32 last_stripe = 0;
3908 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3909 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3912 sub_stripes = map->sub_stripes;
3914 factor = map->num_stripes / sub_stripes;
3915 stripes_per_dev = div_u64_rem(stripe_nr_end -
3918 &remaining_stripes);
3919 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3920 last_stripe *= sub_stripes;
3923 for (i = 0; i < num_stripes; i++) {
3924 bbio->stripes[i].physical =
3925 map->stripes[stripe_index].physical +
3926 stripe_offset + stripe_nr * map->stripe_len;
3927 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3929 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3930 BTRFS_BLOCK_GROUP_RAID10)) {
3931 bbio->stripes[i].length = stripes_per_dev *
3934 if (i / sub_stripes < remaining_stripes)
3935 bbio->stripes[i].length +=
3939 * Special for the first stripe and
3942 * |-------|...|-------|
3946 if (i < sub_stripes)
3947 bbio->stripes[i].length -=
3950 if (stripe_index >= last_stripe &&
3951 stripe_index <= (last_stripe +
3953 bbio->stripes[i].length -=
3956 if (i == sub_stripes - 1)
3959 bbio->stripes[i].length = *length;
3962 if (stripe_index == map->num_stripes) {
3963 /* This could only happen for RAID0/10 */
3969 for (i = 0; i < num_stripes; i++) {
3970 bbio->stripes[i].physical =
3971 map->stripes[stripe_index].physical +
3973 stripe_nr * map->stripe_len;
3974 bbio->stripes[i].dev =
3975 map->stripes[stripe_index].dev;
3980 if (rw & REQ_WRITE) {
3981 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3982 BTRFS_BLOCK_GROUP_RAID10 |
3983 BTRFS_BLOCK_GROUP_DUP)) {
3989 bbio->num_stripes = num_stripes;
3990 bbio->max_errors = max_errors;
3991 bbio->mirror_num = mirror_num;
3993 free_extent_map(em);
3997 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3998 u64 logical, u64 *length,
3999 struct btrfs_bio **bbio_ret, int mirror_num)
4001 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
4005 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4006 u64 chunk_start, u64 physical, u64 devid,
4007 u64 **logical, int *naddrs, int *stripe_len)
4009 struct extent_map_tree *em_tree = &map_tree->map_tree;
4010 struct extent_map *em;
4011 struct map_lookup *map;
4018 read_lock(&em_tree->lock);
4019 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4020 read_unlock(&em_tree->lock);
4022 BUG_ON(!em || em->start != chunk_start);
4023 map = (struct map_lookup *)em->bdev;
4026 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4027 do_div(length, map->num_stripes / map->sub_stripes);
4028 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4029 do_div(length, map->num_stripes);
4031 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4032 BUG_ON(!buf); /* -ENOMEM */
4034 for (i = 0; i < map->num_stripes; i++) {
4035 if (devid && map->stripes[i].dev->devid != devid)
4037 if (map->stripes[i].physical > physical ||
4038 map->stripes[i].physical + length <= physical)
4041 stripe_nr = physical - map->stripes[i].physical;
4042 do_div(stripe_nr, map->stripe_len);
4044 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4045 stripe_nr = stripe_nr * map->num_stripes + i;
4046 do_div(stripe_nr, map->sub_stripes);
4047 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4048 stripe_nr = stripe_nr * map->num_stripes + i;
4050 bytenr = chunk_start + stripe_nr * map->stripe_len;
4051 WARN_ON(nr >= map->num_stripes);
4052 for (j = 0; j < nr; j++) {
4053 if (buf[j] == bytenr)
4057 WARN_ON(nr >= map->num_stripes);
4064 *stripe_len = map->stripe_len;
4066 free_extent_map(em);
4070 static void *merge_stripe_index_into_bio_private(void *bi_private,
4071 unsigned int stripe_index)
4074 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4076 * The alternative solution (instead of stealing bits from the
4077 * pointer) would be to allocate an intermediate structure
4078 * that contains the old private pointer plus the stripe_index.
4080 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4081 BUG_ON(stripe_index > 3);
4082 return (void *)(((uintptr_t)bi_private) | stripe_index);
4085 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4087 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4090 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4092 return (unsigned int)((uintptr_t)bi_private) & 3;
4095 static void btrfs_end_bio(struct bio *bio, int err)
4097 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4098 int is_orig_bio = 0;
4101 atomic_inc(&bbio->error);
4102 if (err == -EIO || err == -EREMOTEIO) {
4103 unsigned int stripe_index =
4104 extract_stripe_index_from_bio_private(
4106 struct btrfs_device *dev;
4108 BUG_ON(stripe_index >= bbio->num_stripes);
4109 dev = bbio->stripes[stripe_index].dev;
4111 if (bio->bi_rw & WRITE)
4112 btrfs_dev_stat_inc(dev,
4113 BTRFS_DEV_STAT_WRITE_ERRS);
4115 btrfs_dev_stat_inc(dev,
4116 BTRFS_DEV_STAT_READ_ERRS);
4117 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4118 btrfs_dev_stat_inc(dev,
4119 BTRFS_DEV_STAT_FLUSH_ERRS);
4120 btrfs_dev_stat_print_on_error(dev);
4125 if (bio == bbio->orig_bio)
4128 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4131 bio = bbio->orig_bio;
4133 bio->bi_private = bbio->private;
4134 bio->bi_end_io = bbio->end_io;
4135 bio->bi_bdev = (struct block_device *)
4136 (unsigned long)bbio->mirror_num;
4137 /* only send an error to the higher layers if it is
4138 * beyond the tolerance of the multi-bio
4140 if (atomic_read(&bbio->error) > bbio->max_errors) {
4144 * this bio is actually up to date, we didn't
4145 * go over the max number of errors
4147 set_bit(BIO_UPTODATE, &bio->bi_flags);
4152 bio_endio(bio, err);
4153 } else if (!is_orig_bio) {
4158 struct async_sched {
4161 struct btrfs_fs_info *info;
4162 struct btrfs_work work;
4166 * see run_scheduled_bios for a description of why bios are collected for
4169 * This will add one bio to the pending list for a device and make sure
4170 * the work struct is scheduled.
4172 static noinline void schedule_bio(struct btrfs_root *root,
4173 struct btrfs_device *device,
4174 int rw, struct bio *bio)
4176 int should_queue = 1;
4177 struct btrfs_pending_bios *pending_bios;
4179 /* don't bother with additional async steps for reads, right now */
4180 if (!(rw & REQ_WRITE)) {
4182 btrfsic_submit_bio(rw, bio);
4188 * nr_async_bios allows us to reliably return congestion to the
4189 * higher layers. Otherwise, the async bio makes it appear we have
4190 * made progress against dirty pages when we've really just put it
4191 * on a queue for later
4193 atomic_inc(&root->fs_info->nr_async_bios);
4194 WARN_ON(bio->bi_next);
4195 bio->bi_next = NULL;
4198 spin_lock(&device->io_lock);
4199 if (bio->bi_rw & REQ_SYNC)
4200 pending_bios = &device->pending_sync_bios;
4202 pending_bios = &device->pending_bios;
4204 if (pending_bios->tail)
4205 pending_bios->tail->bi_next = bio;
4207 pending_bios->tail = bio;
4208 if (!pending_bios->head)
4209 pending_bios->head = bio;
4210 if (device->running_pending)
4213 spin_unlock(&device->io_lock);
4216 btrfs_queue_worker(&root->fs_info->submit_workers,
4220 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4221 int mirror_num, int async_submit)
4223 struct btrfs_mapping_tree *map_tree;
4224 struct btrfs_device *dev;
4225 struct bio *first_bio = bio;
4226 u64 logical = (u64)bio->bi_sector << 9;
4232 struct btrfs_bio *bbio = NULL;
4234 length = bio->bi_size;
4235 map_tree = &root->fs_info->mapping_tree;
4236 map_length = length;
4238 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4240 if (ret) /* -ENOMEM */
4243 total_devs = bbio->num_stripes;
4244 if (map_length < length) {
4245 printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
4246 "len %llu\n", (unsigned long long)logical,
4247 (unsigned long long)length,
4248 (unsigned long long)map_length);
4252 bbio->orig_bio = first_bio;
4253 bbio->private = first_bio->bi_private;
4254 bbio->end_io = first_bio->bi_end_io;
4255 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4257 while (dev_nr < total_devs) {
4258 if (dev_nr < total_devs - 1) {
4259 bio = bio_clone(first_bio, GFP_NOFS);
4260 BUG_ON(!bio); /* -ENOMEM */
4264 bio->bi_private = bbio;
4265 bio->bi_private = merge_stripe_index_into_bio_private(
4266 bio->bi_private, (unsigned int)dev_nr);
4267 bio->bi_end_io = btrfs_end_bio;
4268 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4269 dev = bbio->stripes[dev_nr].dev;
4270 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4272 struct rcu_string *name;
4275 name = rcu_dereference(dev->name);
4276 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4277 "(%s id %llu), size=%u\n", rw,
4278 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4279 name->str, dev->devid, bio->bi_size);
4282 bio->bi_bdev = dev->bdev;
4284 schedule_bio(root, dev, rw, bio);
4286 btrfsic_submit_bio(rw, bio);
4288 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4289 bio->bi_sector = logical >> 9;
4290 bio_endio(bio, -EIO);
4297 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4300 struct btrfs_device *device;
4301 struct btrfs_fs_devices *cur_devices;
4303 cur_devices = root->fs_info->fs_devices;
4304 while (cur_devices) {
4306 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4307 device = __find_device(&cur_devices->devices,
4312 cur_devices = cur_devices->seed;
4317 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4318 u64 devid, u8 *dev_uuid)
4320 struct btrfs_device *device;
4321 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4323 device = kzalloc(sizeof(*device), GFP_NOFS);
4326 list_add(&device->dev_list,
4327 &fs_devices->devices);
4328 device->dev_root = root->fs_info->dev_root;
4329 device->devid = devid;
4330 device->work.func = pending_bios_fn;
4331 device->fs_devices = fs_devices;
4332 device->missing = 1;
4333 fs_devices->num_devices++;
4334 fs_devices->missing_devices++;
4335 spin_lock_init(&device->io_lock);
4336 INIT_LIST_HEAD(&device->dev_alloc_list);
4337 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4341 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4342 struct extent_buffer *leaf,
4343 struct btrfs_chunk *chunk)
4345 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4346 struct map_lookup *map;
4347 struct extent_map *em;
4351 u8 uuid[BTRFS_UUID_SIZE];
4356 logical = key->offset;
4357 length = btrfs_chunk_length(leaf, chunk);
4359 read_lock(&map_tree->map_tree.lock);
4360 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4361 read_unlock(&map_tree->map_tree.lock);
4363 /* already mapped? */
4364 if (em && em->start <= logical && em->start + em->len > logical) {
4365 free_extent_map(em);
4368 free_extent_map(em);
4371 em = alloc_extent_map();
4374 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4375 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4377 free_extent_map(em);
4381 em->bdev = (struct block_device *)map;
4382 em->start = logical;
4384 em->block_start = 0;
4385 em->block_len = em->len;
4387 map->num_stripes = num_stripes;
4388 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4389 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4390 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4391 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4392 map->type = btrfs_chunk_type(leaf, chunk);
4393 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4394 for (i = 0; i < num_stripes; i++) {
4395 map->stripes[i].physical =
4396 btrfs_stripe_offset_nr(leaf, chunk, i);
4397 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4398 read_extent_buffer(leaf, uuid, (unsigned long)
4399 btrfs_stripe_dev_uuid_nr(chunk, i),
4401 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4403 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4405 free_extent_map(em);
4408 if (!map->stripes[i].dev) {
4409 map->stripes[i].dev =
4410 add_missing_dev(root, devid, uuid);
4411 if (!map->stripes[i].dev) {
4413 free_extent_map(em);
4417 map->stripes[i].dev->in_fs_metadata = 1;
4420 write_lock(&map_tree->map_tree.lock);
4421 ret = add_extent_mapping(&map_tree->map_tree, em);
4422 write_unlock(&map_tree->map_tree.lock);
4423 BUG_ON(ret); /* Tree corruption */
4424 free_extent_map(em);
4429 static void fill_device_from_item(struct extent_buffer *leaf,
4430 struct btrfs_dev_item *dev_item,
4431 struct btrfs_device *device)
4435 device->devid = btrfs_device_id(leaf, dev_item);
4436 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4437 device->total_bytes = device->disk_total_bytes;
4438 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4439 device->type = btrfs_device_type(leaf, dev_item);
4440 device->io_align = btrfs_device_io_align(leaf, dev_item);
4441 device->io_width = btrfs_device_io_width(leaf, dev_item);
4442 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4444 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4445 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4448 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4450 struct btrfs_fs_devices *fs_devices;
4453 BUG_ON(!mutex_is_locked(&uuid_mutex));
4455 fs_devices = root->fs_info->fs_devices->seed;
4456 while (fs_devices) {
4457 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4461 fs_devices = fs_devices->seed;
4464 fs_devices = find_fsid(fsid);
4470 fs_devices = clone_fs_devices(fs_devices);
4471 if (IS_ERR(fs_devices)) {
4472 ret = PTR_ERR(fs_devices);
4476 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4477 root->fs_info->bdev_holder);
4479 free_fs_devices(fs_devices);
4483 if (!fs_devices->seeding) {
4484 __btrfs_close_devices(fs_devices);
4485 free_fs_devices(fs_devices);
4490 fs_devices->seed = root->fs_info->fs_devices->seed;
4491 root->fs_info->fs_devices->seed = fs_devices;
4496 static int read_one_dev(struct btrfs_root *root,
4497 struct extent_buffer *leaf,
4498 struct btrfs_dev_item *dev_item)
4500 struct btrfs_device *device;
4503 u8 fs_uuid[BTRFS_UUID_SIZE];
4504 u8 dev_uuid[BTRFS_UUID_SIZE];
4506 devid = btrfs_device_id(leaf, dev_item);
4507 read_extent_buffer(leaf, dev_uuid,
4508 (unsigned long)btrfs_device_uuid(dev_item),
4510 read_extent_buffer(leaf, fs_uuid,
4511 (unsigned long)btrfs_device_fsid(dev_item),
4514 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4515 ret = open_seed_devices(root, fs_uuid);
4516 if (ret && !btrfs_test_opt(root, DEGRADED))
4520 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4521 if (!device || !device->bdev) {
4522 if (!btrfs_test_opt(root, DEGRADED))
4526 printk(KERN_WARNING "warning devid %llu missing\n",
4527 (unsigned long long)devid);
4528 device = add_missing_dev(root, devid, dev_uuid);
4531 } else if (!device->missing) {
4533 * this happens when a device that was properly setup
4534 * in the device info lists suddenly goes bad.
4535 * device->bdev is NULL, and so we have to set
4536 * device->missing to one here
4538 root->fs_info->fs_devices->missing_devices++;
4539 device->missing = 1;
4543 if (device->fs_devices != root->fs_info->fs_devices) {
4544 BUG_ON(device->writeable);
4545 if (device->generation !=
4546 btrfs_device_generation(leaf, dev_item))
4550 fill_device_from_item(leaf, dev_item, device);
4551 device->dev_root = root->fs_info->dev_root;
4552 device->in_fs_metadata = 1;
4553 if (device->writeable) {
4554 device->fs_devices->total_rw_bytes += device->total_bytes;
4555 spin_lock(&root->fs_info->free_chunk_lock);
4556 root->fs_info->free_chunk_space += device->total_bytes -
4558 spin_unlock(&root->fs_info->free_chunk_lock);
4564 int btrfs_read_sys_array(struct btrfs_root *root)
4566 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4567 struct extent_buffer *sb;
4568 struct btrfs_disk_key *disk_key;
4569 struct btrfs_chunk *chunk;
4571 unsigned long sb_ptr;
4577 struct btrfs_key key;
4579 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4580 BTRFS_SUPER_INFO_SIZE);
4583 btrfs_set_buffer_uptodate(sb);
4584 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4586 * The sb extent buffer is artifical and just used to read the system array.
4587 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4588 * pages up-to-date when the page is larger: extent does not cover the
4589 * whole page and consequently check_page_uptodate does not find all
4590 * the page's extents up-to-date (the hole beyond sb),
4591 * write_extent_buffer then triggers a WARN_ON.
4593 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4594 * but sb spans only this function. Add an explicit SetPageUptodate call
4595 * to silence the warning eg. on PowerPC 64.
4597 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4598 SetPageUptodate(sb->pages[0]);
4600 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4601 array_size = btrfs_super_sys_array_size(super_copy);
4603 ptr = super_copy->sys_chunk_array;
4604 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4607 while (cur < array_size) {
4608 disk_key = (struct btrfs_disk_key *)ptr;
4609 btrfs_disk_key_to_cpu(&key, disk_key);
4611 len = sizeof(*disk_key); ptr += len;
4615 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4616 chunk = (struct btrfs_chunk *)sb_ptr;
4617 ret = read_one_chunk(root, &key, sb, chunk);
4620 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4621 len = btrfs_chunk_item_size(num_stripes);
4630 free_extent_buffer(sb);
4634 int btrfs_read_chunk_tree(struct btrfs_root *root)
4636 struct btrfs_path *path;
4637 struct extent_buffer *leaf;
4638 struct btrfs_key key;
4639 struct btrfs_key found_key;
4643 root = root->fs_info->chunk_root;
4645 path = btrfs_alloc_path();
4649 mutex_lock(&uuid_mutex);
4652 /* first we search for all of the device items, and then we
4653 * read in all of the chunk items. This way we can create chunk
4654 * mappings that reference all of the devices that are afound
4656 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4660 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4664 leaf = path->nodes[0];
4665 slot = path->slots[0];
4666 if (slot >= btrfs_header_nritems(leaf)) {
4667 ret = btrfs_next_leaf(root, path);
4674 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4675 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4676 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4678 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4679 struct btrfs_dev_item *dev_item;
4680 dev_item = btrfs_item_ptr(leaf, slot,
4681 struct btrfs_dev_item);
4682 ret = read_one_dev(root, leaf, dev_item);
4686 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4687 struct btrfs_chunk *chunk;
4688 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4689 ret = read_one_chunk(root, &found_key, leaf, chunk);
4695 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4697 btrfs_release_path(path);
4702 unlock_chunks(root);
4703 mutex_unlock(&uuid_mutex);
4705 btrfs_free_path(path);
4709 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
4713 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4714 btrfs_dev_stat_reset(dev, i);
4717 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
4719 struct btrfs_key key;
4720 struct btrfs_key found_key;
4721 struct btrfs_root *dev_root = fs_info->dev_root;
4722 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4723 struct extent_buffer *eb;
4726 struct btrfs_device *device;
4727 struct btrfs_path *path = NULL;
4730 path = btrfs_alloc_path();
4736 mutex_lock(&fs_devices->device_list_mutex);
4737 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4739 struct btrfs_dev_stats_item *ptr;
4742 key.type = BTRFS_DEV_STATS_KEY;
4743 key.offset = device->devid;
4744 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
4746 __btrfs_reset_dev_stats(device);
4747 device->dev_stats_valid = 1;
4748 btrfs_release_path(path);
4751 slot = path->slots[0];
4752 eb = path->nodes[0];
4753 btrfs_item_key_to_cpu(eb, &found_key, slot);
4754 item_size = btrfs_item_size_nr(eb, slot);
4756 ptr = btrfs_item_ptr(eb, slot,
4757 struct btrfs_dev_stats_item);
4759 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4760 if (item_size >= (1 + i) * sizeof(__le64))
4761 btrfs_dev_stat_set(device, i,
4762 btrfs_dev_stats_value(eb, ptr, i));
4764 btrfs_dev_stat_reset(device, i);
4767 device->dev_stats_valid = 1;
4768 btrfs_dev_stat_print_on_load(device);
4769 btrfs_release_path(path);
4771 mutex_unlock(&fs_devices->device_list_mutex);
4774 btrfs_free_path(path);
4775 return ret < 0 ? ret : 0;
4778 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
4779 struct btrfs_root *dev_root,
4780 struct btrfs_device *device)
4782 struct btrfs_path *path;
4783 struct btrfs_key key;
4784 struct extent_buffer *eb;
4785 struct btrfs_dev_stats_item *ptr;
4790 key.type = BTRFS_DEV_STATS_KEY;
4791 key.offset = device->devid;
4793 path = btrfs_alloc_path();
4795 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
4797 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
4798 ret, rcu_str_deref(device->name));
4803 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
4804 /* need to delete old one and insert a new one */
4805 ret = btrfs_del_item(trans, dev_root, path);
4807 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
4808 rcu_str_deref(device->name), ret);
4815 /* need to insert a new item */
4816 btrfs_release_path(path);
4817 ret = btrfs_insert_empty_item(trans, dev_root, path,
4818 &key, sizeof(*ptr));
4820 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
4821 rcu_str_deref(device->name), ret);
4826 eb = path->nodes[0];
4827 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
4828 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4829 btrfs_set_dev_stats_value(eb, ptr, i,
4830 btrfs_dev_stat_read(device, i));
4831 btrfs_mark_buffer_dirty(eb);
4834 btrfs_free_path(path);
4839 * called from commit_transaction. Writes all changed device stats to disk.
4841 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
4842 struct btrfs_fs_info *fs_info)
4844 struct btrfs_root *dev_root = fs_info->dev_root;
4845 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4846 struct btrfs_device *device;
4849 mutex_lock(&fs_devices->device_list_mutex);
4850 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4851 if (!device->dev_stats_valid || !device->dev_stats_dirty)
4854 ret = update_dev_stat_item(trans, dev_root, device);
4856 device->dev_stats_dirty = 0;
4858 mutex_unlock(&fs_devices->device_list_mutex);
4863 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
4865 btrfs_dev_stat_inc(dev, index);
4866 btrfs_dev_stat_print_on_error(dev);
4869 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
4871 if (!dev->dev_stats_valid)
4873 printk_ratelimited_in_rcu(KERN_ERR
4874 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4875 rcu_str_deref(dev->name),
4876 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4877 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4878 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4879 btrfs_dev_stat_read(dev,
4880 BTRFS_DEV_STAT_CORRUPTION_ERRS),
4881 btrfs_dev_stat_read(dev,
4882 BTRFS_DEV_STAT_GENERATION_ERRS));
4885 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
4889 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4890 if (btrfs_dev_stat_read(dev, i) != 0)
4892 if (i == BTRFS_DEV_STAT_VALUES_MAX)
4893 return; /* all values == 0, suppress message */
4895 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4896 rcu_str_deref(dev->name),
4897 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4898 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4899 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4900 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
4901 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
4904 int btrfs_get_dev_stats(struct btrfs_root *root,
4905 struct btrfs_ioctl_get_dev_stats *stats)
4907 struct btrfs_device *dev;
4908 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4911 mutex_lock(&fs_devices->device_list_mutex);
4912 dev = btrfs_find_device(root, stats->devid, NULL, NULL);
4913 mutex_unlock(&fs_devices->device_list_mutex);
4917 "btrfs: get dev_stats failed, device not found\n");
4919 } else if (!dev->dev_stats_valid) {
4921 "btrfs: get dev_stats failed, not yet valid\n");
4923 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
4924 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4925 if (stats->nr_items > i)
4927 btrfs_dev_stat_read_and_reset(dev, i);
4929 btrfs_dev_stat_reset(dev, i);
4932 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4933 if (stats->nr_items > i)
4934 stats->values[i] = btrfs_dev_stat_read(dev, i);
4936 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
4937 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;