2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <asm/div64.h>
31 #include "extent_map.h"
33 #include "transaction.h"
34 #include "print-tree.h"
36 #include "async-thread.h"
37 #include "check-integrity.h"
38 #include "rcu-string.h"
40 static int init_first_rw_device(struct btrfs_trans_handle *trans,
41 struct btrfs_root *root,
42 struct btrfs_device *device);
43 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
45 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
47 static DEFINE_MUTEX(uuid_mutex);
48 static LIST_HEAD(fs_uuids);
50 static void lock_chunks(struct btrfs_root *root)
52 mutex_lock(&root->fs_info->chunk_mutex);
55 static void unlock_chunks(struct btrfs_root *root)
57 mutex_unlock(&root->fs_info->chunk_mutex);
60 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
62 struct btrfs_device *device;
63 WARN_ON(fs_devices->opened);
64 while (!list_empty(&fs_devices->devices)) {
65 device = list_entry(fs_devices->devices.next,
66 struct btrfs_device, dev_list);
67 list_del(&device->dev_list);
68 rcu_string_free(device->name);
74 void btrfs_cleanup_fs_uuids(void)
76 struct btrfs_fs_devices *fs_devices;
78 while (!list_empty(&fs_uuids)) {
79 fs_devices = list_entry(fs_uuids.next,
80 struct btrfs_fs_devices, list);
81 list_del(&fs_devices->list);
82 free_fs_devices(fs_devices);
86 static noinline struct btrfs_device *__find_device(struct list_head *head,
89 struct btrfs_device *dev;
91 list_for_each_entry(dev, head, dev_list) {
92 if (dev->devid == devid &&
93 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
100 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
102 struct btrfs_fs_devices *fs_devices;
104 list_for_each_entry(fs_devices, &fs_uuids, list) {
105 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
111 static void requeue_list(struct btrfs_pending_bios *pending_bios,
112 struct bio *head, struct bio *tail)
115 struct bio *old_head;
117 old_head = pending_bios->head;
118 pending_bios->head = head;
119 if (pending_bios->tail)
120 tail->bi_next = old_head;
122 pending_bios->tail = tail;
126 * we try to collect pending bios for a device so we don't get a large
127 * number of procs sending bios down to the same device. This greatly
128 * improves the schedulers ability to collect and merge the bios.
130 * But, it also turns into a long list of bios to process and that is sure
131 * to eventually make the worker thread block. The solution here is to
132 * make some progress and then put this work struct back at the end of
133 * the list if the block device is congested. This way, multiple devices
134 * can make progress from a single worker thread.
136 static noinline void run_scheduled_bios(struct btrfs_device *device)
139 struct backing_dev_info *bdi;
140 struct btrfs_fs_info *fs_info;
141 struct btrfs_pending_bios *pending_bios;
145 unsigned long num_run;
146 unsigned long batch_run = 0;
148 unsigned long last_waited = 0;
150 int sync_pending = 0;
151 struct blk_plug plug;
154 * this function runs all the bios we've collected for
155 * a particular device. We don't want to wander off to
156 * another device without first sending all of these down.
157 * So, setup a plug here and finish it off before we return
159 blk_start_plug(&plug);
161 bdi = blk_get_backing_dev_info(device->bdev);
162 fs_info = device->dev_root->fs_info;
163 limit = btrfs_async_submit_limit(fs_info);
164 limit = limit * 2 / 3;
167 spin_lock(&device->io_lock);
172 /* take all the bios off the list at once and process them
173 * later on (without the lock held). But, remember the
174 * tail and other pointers so the bios can be properly reinserted
175 * into the list if we hit congestion
177 if (!force_reg && device->pending_sync_bios.head) {
178 pending_bios = &device->pending_sync_bios;
181 pending_bios = &device->pending_bios;
185 pending = pending_bios->head;
186 tail = pending_bios->tail;
187 WARN_ON(pending && !tail);
190 * if pending was null this time around, no bios need processing
191 * at all and we can stop. Otherwise it'll loop back up again
192 * and do an additional check so no bios are missed.
194 * device->running_pending is used to synchronize with the
197 if (device->pending_sync_bios.head == NULL &&
198 device->pending_bios.head == NULL) {
200 device->running_pending = 0;
203 device->running_pending = 1;
206 pending_bios->head = NULL;
207 pending_bios->tail = NULL;
209 spin_unlock(&device->io_lock);
214 /* we want to work on both lists, but do more bios on the
215 * sync list than the regular list
218 pending_bios != &device->pending_sync_bios &&
219 device->pending_sync_bios.head) ||
220 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
221 device->pending_bios.head)) {
222 spin_lock(&device->io_lock);
223 requeue_list(pending_bios, pending, tail);
228 pending = pending->bi_next;
230 atomic_dec(&fs_info->nr_async_bios);
232 if (atomic_read(&fs_info->nr_async_bios) < limit &&
233 waitqueue_active(&fs_info->async_submit_wait))
234 wake_up(&fs_info->async_submit_wait);
236 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
239 * if we're doing the sync list, record that our
240 * plug has some sync requests on it
242 * If we're doing the regular list and there are
243 * sync requests sitting around, unplug before
246 if (pending_bios == &device->pending_sync_bios) {
248 } else if (sync_pending) {
249 blk_finish_plug(&plug);
250 blk_start_plug(&plug);
254 btrfsic_submit_bio(cur->bi_rw, cur);
261 * we made progress, there is more work to do and the bdi
262 * is now congested. Back off and let other work structs
265 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
266 fs_info->fs_devices->open_devices > 1) {
267 struct io_context *ioc;
269 ioc = current->io_context;
272 * the main goal here is that we don't want to
273 * block if we're going to be able to submit
274 * more requests without blocking.
276 * This code does two great things, it pokes into
277 * the elevator code from a filesystem _and_
278 * it makes assumptions about how batching works.
280 if (ioc && ioc->nr_batch_requests > 0 &&
281 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
283 ioc->last_waited == last_waited)) {
285 * we want to go through our batch of
286 * requests and stop. So, we copy out
287 * the ioc->last_waited time and test
288 * against it before looping
290 last_waited = ioc->last_waited;
295 spin_lock(&device->io_lock);
296 requeue_list(pending_bios, pending, tail);
297 device->running_pending = 1;
299 spin_unlock(&device->io_lock);
300 btrfs_requeue_work(&device->work);
303 /* unplug every 64 requests just for good measure */
304 if (batch_run % 64 == 0) {
305 blk_finish_plug(&plug);
306 blk_start_plug(&plug);
315 spin_lock(&device->io_lock);
316 if (device->pending_bios.head || device->pending_sync_bios.head)
318 spin_unlock(&device->io_lock);
321 blk_finish_plug(&plug);
324 static void pending_bios_fn(struct btrfs_work *work)
326 struct btrfs_device *device;
328 device = container_of(work, struct btrfs_device, work);
329 run_scheduled_bios(device);
332 static noinline int device_list_add(const char *path,
333 struct btrfs_super_block *disk_super,
334 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
336 struct btrfs_device *device;
337 struct btrfs_fs_devices *fs_devices;
338 struct rcu_string *name;
339 u64 found_transid = btrfs_super_generation(disk_super);
341 fs_devices = find_fsid(disk_super->fsid);
343 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
346 INIT_LIST_HEAD(&fs_devices->devices);
347 INIT_LIST_HEAD(&fs_devices->alloc_list);
348 list_add(&fs_devices->list, &fs_uuids);
349 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
350 fs_devices->latest_devid = devid;
351 fs_devices->latest_trans = found_transid;
352 mutex_init(&fs_devices->device_list_mutex);
355 device = __find_device(&fs_devices->devices, devid,
356 disk_super->dev_item.uuid);
359 if (fs_devices->opened)
362 device = kzalloc(sizeof(*device), GFP_NOFS);
364 /* we can safely leave the fs_devices entry around */
367 device->devid = devid;
368 device->dev_stats_valid = 0;
369 device->work.func = pending_bios_fn;
370 memcpy(device->uuid, disk_super->dev_item.uuid,
372 spin_lock_init(&device->io_lock);
374 name = rcu_string_strdup(path, GFP_NOFS);
379 rcu_assign_pointer(device->name, name);
380 INIT_LIST_HEAD(&device->dev_alloc_list);
382 /* init readahead state */
383 spin_lock_init(&device->reada_lock);
384 device->reada_curr_zone = NULL;
385 atomic_set(&device->reada_in_flight, 0);
386 device->reada_next = 0;
387 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
388 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
390 mutex_lock(&fs_devices->device_list_mutex);
391 list_add_rcu(&device->dev_list, &fs_devices->devices);
392 mutex_unlock(&fs_devices->device_list_mutex);
394 device->fs_devices = fs_devices;
395 fs_devices->num_devices++;
396 } else if (!device->name || strcmp(device->name->str, path)) {
397 name = rcu_string_strdup(path, GFP_NOFS);
400 rcu_string_free(device->name);
401 rcu_assign_pointer(device->name, name);
402 if (device->missing) {
403 fs_devices->missing_devices--;
408 if (found_transid > fs_devices->latest_trans) {
409 fs_devices->latest_devid = devid;
410 fs_devices->latest_trans = found_transid;
412 *fs_devices_ret = fs_devices;
416 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
418 struct btrfs_fs_devices *fs_devices;
419 struct btrfs_device *device;
420 struct btrfs_device *orig_dev;
422 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
424 return ERR_PTR(-ENOMEM);
426 INIT_LIST_HEAD(&fs_devices->devices);
427 INIT_LIST_HEAD(&fs_devices->alloc_list);
428 INIT_LIST_HEAD(&fs_devices->list);
429 mutex_init(&fs_devices->device_list_mutex);
430 fs_devices->latest_devid = orig->latest_devid;
431 fs_devices->latest_trans = orig->latest_trans;
432 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
434 /* We have held the volume lock, it is safe to get the devices. */
435 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
436 struct rcu_string *name;
438 device = kzalloc(sizeof(*device), GFP_NOFS);
443 * This is ok to do without rcu read locked because we hold the
444 * uuid mutex so nothing we touch in here is going to disappear.
446 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
451 rcu_assign_pointer(device->name, name);
453 device->devid = orig_dev->devid;
454 device->work.func = pending_bios_fn;
455 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
456 spin_lock_init(&device->io_lock);
457 INIT_LIST_HEAD(&device->dev_list);
458 INIT_LIST_HEAD(&device->dev_alloc_list);
460 list_add(&device->dev_list, &fs_devices->devices);
461 device->fs_devices = fs_devices;
462 fs_devices->num_devices++;
466 free_fs_devices(fs_devices);
467 return ERR_PTR(-ENOMEM);
470 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
472 struct btrfs_device *device, *next;
474 struct block_device *latest_bdev = NULL;
475 u64 latest_devid = 0;
476 u64 latest_transid = 0;
478 mutex_lock(&uuid_mutex);
480 /* This is the initialized path, it is safe to release the devices. */
481 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
482 if (device->in_fs_metadata) {
483 if (!latest_transid ||
484 device->generation > latest_transid) {
485 latest_devid = device->devid;
486 latest_transid = device->generation;
487 latest_bdev = device->bdev;
493 blkdev_put(device->bdev, device->mode);
495 fs_devices->open_devices--;
497 if (device->writeable) {
498 list_del_init(&device->dev_alloc_list);
499 device->writeable = 0;
500 fs_devices->rw_devices--;
502 list_del_init(&device->dev_list);
503 fs_devices->num_devices--;
504 rcu_string_free(device->name);
508 if (fs_devices->seed) {
509 fs_devices = fs_devices->seed;
513 fs_devices->latest_bdev = latest_bdev;
514 fs_devices->latest_devid = latest_devid;
515 fs_devices->latest_trans = latest_transid;
517 mutex_unlock(&uuid_mutex);
520 static void __free_device(struct work_struct *work)
522 struct btrfs_device *device;
524 device = container_of(work, struct btrfs_device, rcu_work);
527 blkdev_put(device->bdev, device->mode);
529 rcu_string_free(device->name);
533 static void free_device(struct rcu_head *head)
535 struct btrfs_device *device;
537 device = container_of(head, struct btrfs_device, rcu);
539 INIT_WORK(&device->rcu_work, __free_device);
540 schedule_work(&device->rcu_work);
543 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
545 struct btrfs_device *device;
547 if (--fs_devices->opened > 0)
550 mutex_lock(&fs_devices->device_list_mutex);
551 list_for_each_entry(device, &fs_devices->devices, dev_list) {
552 struct btrfs_device *new_device;
553 struct rcu_string *name;
556 fs_devices->open_devices--;
558 if (device->writeable) {
559 list_del_init(&device->dev_alloc_list);
560 fs_devices->rw_devices--;
563 if (device->can_discard)
564 fs_devices->num_can_discard--;
566 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
567 BUG_ON(!new_device); /* -ENOMEM */
568 memcpy(new_device, device, sizeof(*new_device));
570 /* Safe because we are under uuid_mutex */
571 name = rcu_string_strdup(device->name->str, GFP_NOFS);
572 BUG_ON(device->name && !name); /* -ENOMEM */
573 rcu_assign_pointer(new_device->name, name);
574 new_device->bdev = NULL;
575 new_device->writeable = 0;
576 new_device->in_fs_metadata = 0;
577 new_device->can_discard = 0;
578 list_replace_rcu(&device->dev_list, &new_device->dev_list);
580 call_rcu(&device->rcu, free_device);
582 mutex_unlock(&fs_devices->device_list_mutex);
584 WARN_ON(fs_devices->open_devices);
585 WARN_ON(fs_devices->rw_devices);
586 fs_devices->opened = 0;
587 fs_devices->seeding = 0;
592 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
594 struct btrfs_fs_devices *seed_devices = NULL;
597 mutex_lock(&uuid_mutex);
598 ret = __btrfs_close_devices(fs_devices);
599 if (!fs_devices->opened) {
600 seed_devices = fs_devices->seed;
601 fs_devices->seed = NULL;
603 mutex_unlock(&uuid_mutex);
605 while (seed_devices) {
606 fs_devices = seed_devices;
607 seed_devices = fs_devices->seed;
608 __btrfs_close_devices(fs_devices);
609 free_fs_devices(fs_devices);
614 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
615 fmode_t flags, void *holder)
617 struct request_queue *q;
618 struct block_device *bdev;
619 struct list_head *head = &fs_devices->devices;
620 struct btrfs_device *device;
621 struct block_device *latest_bdev = NULL;
622 struct buffer_head *bh;
623 struct btrfs_super_block *disk_super;
624 u64 latest_devid = 0;
625 u64 latest_transid = 0;
632 list_for_each_entry(device, head, dev_list) {
638 bdev = blkdev_get_by_path(device->name->str, flags, holder);
640 printk(KERN_INFO "open %s failed\n", device->name->str);
643 filemap_write_and_wait(bdev->bd_inode->i_mapping);
644 invalidate_bdev(bdev);
645 set_blocksize(bdev, 4096);
647 bh = btrfs_read_dev_super(bdev);
651 disk_super = (struct btrfs_super_block *)bh->b_data;
652 devid = btrfs_stack_device_id(&disk_super->dev_item);
653 if (devid != device->devid)
656 if (memcmp(device->uuid, disk_super->dev_item.uuid,
660 device->generation = btrfs_super_generation(disk_super);
661 if (!latest_transid || device->generation > latest_transid) {
662 latest_devid = devid;
663 latest_transid = device->generation;
667 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
668 device->writeable = 0;
670 device->writeable = !bdev_read_only(bdev);
674 q = bdev_get_queue(bdev);
675 if (blk_queue_discard(q)) {
676 device->can_discard = 1;
677 fs_devices->num_can_discard++;
681 device->in_fs_metadata = 0;
682 device->mode = flags;
684 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
685 fs_devices->rotating = 1;
687 fs_devices->open_devices++;
688 if (device->writeable) {
689 fs_devices->rw_devices++;
690 list_add(&device->dev_alloc_list,
691 &fs_devices->alloc_list);
699 blkdev_put(bdev, flags);
703 if (fs_devices->open_devices == 0) {
707 fs_devices->seeding = seeding;
708 fs_devices->opened = 1;
709 fs_devices->latest_bdev = latest_bdev;
710 fs_devices->latest_devid = latest_devid;
711 fs_devices->latest_trans = latest_transid;
712 fs_devices->total_rw_bytes = 0;
717 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
718 fmode_t flags, void *holder)
722 mutex_lock(&uuid_mutex);
723 if (fs_devices->opened) {
724 fs_devices->opened++;
727 ret = __btrfs_open_devices(fs_devices, flags, holder);
729 mutex_unlock(&uuid_mutex);
733 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
734 struct btrfs_fs_devices **fs_devices_ret)
736 struct btrfs_super_block *disk_super;
737 struct block_device *bdev;
738 struct buffer_head *bh;
744 bdev = blkdev_get_by_path(path, flags, holder);
751 mutex_lock(&uuid_mutex);
752 ret = set_blocksize(bdev, 4096);
755 bh = btrfs_read_dev_super(bdev);
760 disk_super = (struct btrfs_super_block *)bh->b_data;
761 devid = btrfs_stack_device_id(&disk_super->dev_item);
762 transid = btrfs_super_generation(disk_super);
763 if (disk_super->label[0])
764 printk(KERN_INFO "device label %s ", disk_super->label);
766 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
767 printk(KERN_CONT "devid %llu transid %llu %s\n",
768 (unsigned long long)devid, (unsigned long long)transid, path);
769 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
773 mutex_unlock(&uuid_mutex);
774 blkdev_put(bdev, flags);
779 /* helper to account the used device space in the range */
780 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
781 u64 end, u64 *length)
783 struct btrfs_key key;
784 struct btrfs_root *root = device->dev_root;
785 struct btrfs_dev_extent *dev_extent;
786 struct btrfs_path *path;
790 struct extent_buffer *l;
794 if (start >= device->total_bytes)
797 path = btrfs_alloc_path();
802 key.objectid = device->devid;
804 key.type = BTRFS_DEV_EXTENT_KEY;
806 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
810 ret = btrfs_previous_item(root, path, key.objectid, key.type);
817 slot = path->slots[0];
818 if (slot >= btrfs_header_nritems(l)) {
819 ret = btrfs_next_leaf(root, path);
827 btrfs_item_key_to_cpu(l, &key, slot);
829 if (key.objectid < device->devid)
832 if (key.objectid > device->devid)
835 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
838 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
839 extent_end = key.offset + btrfs_dev_extent_length(l,
841 if (key.offset <= start && extent_end > end) {
842 *length = end - start + 1;
844 } else if (key.offset <= start && extent_end > start)
845 *length += extent_end - start;
846 else if (key.offset > start && extent_end <= end)
847 *length += extent_end - key.offset;
848 else if (key.offset > start && key.offset <= end) {
849 *length += end - key.offset + 1;
851 } else if (key.offset > end)
859 btrfs_free_path(path);
864 * find_free_dev_extent - find free space in the specified device
865 * @device: the device which we search the free space in
866 * @num_bytes: the size of the free space that we need
867 * @start: store the start of the free space.
868 * @len: the size of the free space. that we find, or the size of the max
869 * free space if we don't find suitable free space
871 * this uses a pretty simple search, the expectation is that it is
872 * called very infrequently and that a given device has a small number
875 * @start is used to store the start of the free space if we find. But if we
876 * don't find suitable free space, it will be used to store the start position
877 * of the max free space.
879 * @len is used to store the size of the free space that we find.
880 * But if we don't find suitable free space, it is used to store the size of
881 * the max free space.
883 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
884 u64 *start, u64 *len)
886 struct btrfs_key key;
887 struct btrfs_root *root = device->dev_root;
888 struct btrfs_dev_extent *dev_extent;
889 struct btrfs_path *path;
895 u64 search_end = device->total_bytes;
898 struct extent_buffer *l;
900 /* FIXME use last free of some kind */
902 /* we don't want to overwrite the superblock on the drive,
903 * so we make sure to start at an offset of at least 1MB
905 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
907 max_hole_start = search_start;
911 if (search_start >= search_end) {
916 path = btrfs_alloc_path();
923 key.objectid = device->devid;
924 key.offset = search_start;
925 key.type = BTRFS_DEV_EXTENT_KEY;
927 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
931 ret = btrfs_previous_item(root, path, key.objectid, key.type);
938 slot = path->slots[0];
939 if (slot >= btrfs_header_nritems(l)) {
940 ret = btrfs_next_leaf(root, path);
948 btrfs_item_key_to_cpu(l, &key, slot);
950 if (key.objectid < device->devid)
953 if (key.objectid > device->devid)
956 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
959 if (key.offset > search_start) {
960 hole_size = key.offset - search_start;
962 if (hole_size > max_hole_size) {
963 max_hole_start = search_start;
964 max_hole_size = hole_size;
968 * If this free space is greater than which we need,
969 * it must be the max free space that we have found
970 * until now, so max_hole_start must point to the start
971 * of this free space and the length of this free space
972 * is stored in max_hole_size. Thus, we return
973 * max_hole_start and max_hole_size and go back to the
976 if (hole_size >= num_bytes) {
982 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
983 extent_end = key.offset + btrfs_dev_extent_length(l,
985 if (extent_end > search_start)
986 search_start = extent_end;
993 * At this point, search_start should be the end of
994 * allocated dev extents, and when shrinking the device,
995 * search_end may be smaller than search_start.
997 if (search_end > search_start)
998 hole_size = search_end - search_start;
1000 if (hole_size > max_hole_size) {
1001 max_hole_start = search_start;
1002 max_hole_size = hole_size;
1006 if (hole_size < num_bytes)
1012 btrfs_free_path(path);
1014 *start = max_hole_start;
1016 *len = max_hole_size;
1020 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1021 struct btrfs_device *device,
1025 struct btrfs_path *path;
1026 struct btrfs_root *root = device->dev_root;
1027 struct btrfs_key key;
1028 struct btrfs_key found_key;
1029 struct extent_buffer *leaf = NULL;
1030 struct btrfs_dev_extent *extent = NULL;
1032 path = btrfs_alloc_path();
1036 key.objectid = device->devid;
1038 key.type = BTRFS_DEV_EXTENT_KEY;
1040 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1042 ret = btrfs_previous_item(root, path, key.objectid,
1043 BTRFS_DEV_EXTENT_KEY);
1046 leaf = path->nodes[0];
1047 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1048 extent = btrfs_item_ptr(leaf, path->slots[0],
1049 struct btrfs_dev_extent);
1050 BUG_ON(found_key.offset > start || found_key.offset +
1051 btrfs_dev_extent_length(leaf, extent) < start);
1053 btrfs_release_path(path);
1055 } else if (ret == 0) {
1056 leaf = path->nodes[0];
1057 extent = btrfs_item_ptr(leaf, path->slots[0],
1058 struct btrfs_dev_extent);
1060 btrfs_error(root->fs_info, ret, "Slot search failed");
1064 if (device->bytes_used > 0) {
1065 u64 len = btrfs_dev_extent_length(leaf, extent);
1066 device->bytes_used -= len;
1067 spin_lock(&root->fs_info->free_chunk_lock);
1068 root->fs_info->free_chunk_space += len;
1069 spin_unlock(&root->fs_info->free_chunk_lock);
1071 ret = btrfs_del_item(trans, root, path);
1073 btrfs_error(root->fs_info, ret,
1074 "Failed to remove dev extent item");
1077 btrfs_free_path(path);
1081 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1082 struct btrfs_device *device,
1083 u64 chunk_tree, u64 chunk_objectid,
1084 u64 chunk_offset, u64 start, u64 num_bytes)
1087 struct btrfs_path *path;
1088 struct btrfs_root *root = device->dev_root;
1089 struct btrfs_dev_extent *extent;
1090 struct extent_buffer *leaf;
1091 struct btrfs_key key;
1093 WARN_ON(!device->in_fs_metadata);
1094 path = btrfs_alloc_path();
1098 key.objectid = device->devid;
1100 key.type = BTRFS_DEV_EXTENT_KEY;
1101 ret = btrfs_insert_empty_item(trans, root, path, &key,
1106 leaf = path->nodes[0];
1107 extent = btrfs_item_ptr(leaf, path->slots[0],
1108 struct btrfs_dev_extent);
1109 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1110 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1111 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1113 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1114 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1117 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1118 btrfs_mark_buffer_dirty(leaf);
1120 btrfs_free_path(path);
1124 static noinline int find_next_chunk(struct btrfs_root *root,
1125 u64 objectid, u64 *offset)
1127 struct btrfs_path *path;
1129 struct btrfs_key key;
1130 struct btrfs_chunk *chunk;
1131 struct btrfs_key found_key;
1133 path = btrfs_alloc_path();
1137 key.objectid = objectid;
1138 key.offset = (u64)-1;
1139 key.type = BTRFS_CHUNK_ITEM_KEY;
1141 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1145 BUG_ON(ret == 0); /* Corruption */
1147 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1151 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1153 if (found_key.objectid != objectid)
1156 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1157 struct btrfs_chunk);
1158 *offset = found_key.offset +
1159 btrfs_chunk_length(path->nodes[0], chunk);
1164 btrfs_free_path(path);
1168 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1171 struct btrfs_key key;
1172 struct btrfs_key found_key;
1173 struct btrfs_path *path;
1175 root = root->fs_info->chunk_root;
1177 path = btrfs_alloc_path();
1181 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1182 key.type = BTRFS_DEV_ITEM_KEY;
1183 key.offset = (u64)-1;
1185 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1189 BUG_ON(ret == 0); /* Corruption */
1191 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1192 BTRFS_DEV_ITEM_KEY);
1196 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1198 *objectid = found_key.offset + 1;
1202 btrfs_free_path(path);
1207 * the device information is stored in the chunk root
1208 * the btrfs_device struct should be fully filled in
1210 int btrfs_add_device(struct btrfs_trans_handle *trans,
1211 struct btrfs_root *root,
1212 struct btrfs_device *device)
1215 struct btrfs_path *path;
1216 struct btrfs_dev_item *dev_item;
1217 struct extent_buffer *leaf;
1218 struct btrfs_key key;
1221 root = root->fs_info->chunk_root;
1223 path = btrfs_alloc_path();
1227 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1228 key.type = BTRFS_DEV_ITEM_KEY;
1229 key.offset = device->devid;
1231 ret = btrfs_insert_empty_item(trans, root, path, &key,
1236 leaf = path->nodes[0];
1237 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1239 btrfs_set_device_id(leaf, dev_item, device->devid);
1240 btrfs_set_device_generation(leaf, dev_item, 0);
1241 btrfs_set_device_type(leaf, dev_item, device->type);
1242 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1243 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1244 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1245 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1246 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1247 btrfs_set_device_group(leaf, dev_item, 0);
1248 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1249 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1250 btrfs_set_device_start_offset(leaf, dev_item, 0);
1252 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1253 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1254 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1255 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1256 btrfs_mark_buffer_dirty(leaf);
1260 btrfs_free_path(path);
1264 static int btrfs_rm_dev_item(struct btrfs_root *root,
1265 struct btrfs_device *device)
1268 struct btrfs_path *path;
1269 struct btrfs_key key;
1270 struct btrfs_trans_handle *trans;
1272 root = root->fs_info->chunk_root;
1274 path = btrfs_alloc_path();
1278 trans = btrfs_start_transaction(root, 0);
1279 if (IS_ERR(trans)) {
1280 btrfs_free_path(path);
1281 return PTR_ERR(trans);
1283 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1284 key.type = BTRFS_DEV_ITEM_KEY;
1285 key.offset = device->devid;
1288 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1297 ret = btrfs_del_item(trans, root, path);
1301 btrfs_free_path(path);
1302 unlock_chunks(root);
1303 btrfs_commit_transaction(trans, root);
1307 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1309 struct btrfs_device *device;
1310 struct btrfs_device *next_device;
1311 struct block_device *bdev;
1312 struct buffer_head *bh = NULL;
1313 struct btrfs_super_block *disk_super;
1314 struct btrfs_fs_devices *cur_devices;
1320 bool clear_super = false;
1322 mutex_lock(&uuid_mutex);
1324 all_avail = root->fs_info->avail_data_alloc_bits |
1325 root->fs_info->avail_system_alloc_bits |
1326 root->fs_info->avail_metadata_alloc_bits;
1328 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1329 root->fs_info->fs_devices->num_devices <= 4) {
1330 printk(KERN_ERR "btrfs: unable to go below four devices "
1336 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1337 root->fs_info->fs_devices->num_devices <= 2) {
1338 printk(KERN_ERR "btrfs: unable to go below two "
1339 "devices on raid1\n");
1344 if (strcmp(device_path, "missing") == 0) {
1345 struct list_head *devices;
1346 struct btrfs_device *tmp;
1349 devices = &root->fs_info->fs_devices->devices;
1351 * It is safe to read the devices since the volume_mutex
1354 list_for_each_entry(tmp, devices, dev_list) {
1355 if (tmp->in_fs_metadata && !tmp->bdev) {
1364 printk(KERN_ERR "btrfs: no missing devices found to "
1369 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1370 root->fs_info->bdev_holder);
1372 ret = PTR_ERR(bdev);
1376 set_blocksize(bdev, 4096);
1377 invalidate_bdev(bdev);
1378 bh = btrfs_read_dev_super(bdev);
1383 disk_super = (struct btrfs_super_block *)bh->b_data;
1384 devid = btrfs_stack_device_id(&disk_super->dev_item);
1385 dev_uuid = disk_super->dev_item.uuid;
1386 device = btrfs_find_device(root, devid, dev_uuid,
1394 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1395 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1401 if (device->writeable) {
1403 list_del_init(&device->dev_alloc_list);
1404 unlock_chunks(root);
1405 root->fs_info->fs_devices->rw_devices--;
1409 ret = btrfs_shrink_device(device, 0);
1413 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1417 spin_lock(&root->fs_info->free_chunk_lock);
1418 root->fs_info->free_chunk_space = device->total_bytes -
1420 spin_unlock(&root->fs_info->free_chunk_lock);
1422 device->in_fs_metadata = 0;
1423 btrfs_scrub_cancel_dev(root, device);
1426 * the device list mutex makes sure that we don't change
1427 * the device list while someone else is writing out all
1428 * the device supers.
1431 cur_devices = device->fs_devices;
1432 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1433 list_del_rcu(&device->dev_list);
1435 device->fs_devices->num_devices--;
1437 if (device->missing)
1438 root->fs_info->fs_devices->missing_devices--;
1440 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1441 struct btrfs_device, dev_list);
1442 if (device->bdev == root->fs_info->sb->s_bdev)
1443 root->fs_info->sb->s_bdev = next_device->bdev;
1444 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1445 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1448 device->fs_devices->open_devices--;
1450 call_rcu(&device->rcu, free_device);
1451 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1453 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1454 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1456 if (cur_devices->open_devices == 0) {
1457 struct btrfs_fs_devices *fs_devices;
1458 fs_devices = root->fs_info->fs_devices;
1459 while (fs_devices) {
1460 if (fs_devices->seed == cur_devices)
1462 fs_devices = fs_devices->seed;
1464 fs_devices->seed = cur_devices->seed;
1465 cur_devices->seed = NULL;
1467 __btrfs_close_devices(cur_devices);
1468 unlock_chunks(root);
1469 free_fs_devices(cur_devices);
1473 * at this point, the device is zero sized. We want to
1474 * remove it from the devices list and zero out the old super
1477 /* make sure this device isn't detected as part of
1480 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1481 set_buffer_dirty(bh);
1482 sync_dirty_buffer(bh);
1491 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1493 mutex_unlock(&uuid_mutex);
1496 if (device->writeable) {
1498 list_add(&device->dev_alloc_list,
1499 &root->fs_info->fs_devices->alloc_list);
1500 unlock_chunks(root);
1501 root->fs_info->fs_devices->rw_devices++;
1507 * does all the dirty work required for changing file system's UUID.
1509 static int btrfs_prepare_sprout(struct btrfs_root *root)
1511 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1512 struct btrfs_fs_devices *old_devices;
1513 struct btrfs_fs_devices *seed_devices;
1514 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1515 struct btrfs_device *device;
1518 BUG_ON(!mutex_is_locked(&uuid_mutex));
1519 if (!fs_devices->seeding)
1522 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1526 old_devices = clone_fs_devices(fs_devices);
1527 if (IS_ERR(old_devices)) {
1528 kfree(seed_devices);
1529 return PTR_ERR(old_devices);
1532 list_add(&old_devices->list, &fs_uuids);
1534 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1535 seed_devices->opened = 1;
1536 INIT_LIST_HEAD(&seed_devices->devices);
1537 INIT_LIST_HEAD(&seed_devices->alloc_list);
1538 mutex_init(&seed_devices->device_list_mutex);
1540 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1541 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1543 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1545 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1546 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1547 device->fs_devices = seed_devices;
1550 fs_devices->seeding = 0;
1551 fs_devices->num_devices = 0;
1552 fs_devices->open_devices = 0;
1553 fs_devices->seed = seed_devices;
1555 generate_random_uuid(fs_devices->fsid);
1556 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1557 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1558 super_flags = btrfs_super_flags(disk_super) &
1559 ~BTRFS_SUPER_FLAG_SEEDING;
1560 btrfs_set_super_flags(disk_super, super_flags);
1566 * strore the expected generation for seed devices in device items.
1568 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1569 struct btrfs_root *root)
1571 struct btrfs_path *path;
1572 struct extent_buffer *leaf;
1573 struct btrfs_dev_item *dev_item;
1574 struct btrfs_device *device;
1575 struct btrfs_key key;
1576 u8 fs_uuid[BTRFS_UUID_SIZE];
1577 u8 dev_uuid[BTRFS_UUID_SIZE];
1581 path = btrfs_alloc_path();
1585 root = root->fs_info->chunk_root;
1586 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1588 key.type = BTRFS_DEV_ITEM_KEY;
1591 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1595 leaf = path->nodes[0];
1597 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1598 ret = btrfs_next_leaf(root, path);
1603 leaf = path->nodes[0];
1604 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1605 btrfs_release_path(path);
1609 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1610 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1611 key.type != BTRFS_DEV_ITEM_KEY)
1614 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1615 struct btrfs_dev_item);
1616 devid = btrfs_device_id(leaf, dev_item);
1617 read_extent_buffer(leaf, dev_uuid,
1618 (unsigned long)btrfs_device_uuid(dev_item),
1620 read_extent_buffer(leaf, fs_uuid,
1621 (unsigned long)btrfs_device_fsid(dev_item),
1623 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1624 BUG_ON(!device); /* Logic error */
1626 if (device->fs_devices->seeding) {
1627 btrfs_set_device_generation(leaf, dev_item,
1628 device->generation);
1629 btrfs_mark_buffer_dirty(leaf);
1637 btrfs_free_path(path);
1641 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1643 struct request_queue *q;
1644 struct btrfs_trans_handle *trans;
1645 struct btrfs_device *device;
1646 struct block_device *bdev;
1647 struct list_head *devices;
1648 struct super_block *sb = root->fs_info->sb;
1649 struct rcu_string *name;
1651 int seeding_dev = 0;
1654 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1657 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1658 root->fs_info->bdev_holder);
1660 return PTR_ERR(bdev);
1662 if (root->fs_info->fs_devices->seeding) {
1664 down_write(&sb->s_umount);
1665 mutex_lock(&uuid_mutex);
1668 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1670 devices = &root->fs_info->fs_devices->devices;
1672 * we have the volume lock, so we don't need the extra
1673 * device list mutex while reading the list here.
1675 list_for_each_entry(device, devices, dev_list) {
1676 if (device->bdev == bdev) {
1682 device = kzalloc(sizeof(*device), GFP_NOFS);
1684 /* we can safely leave the fs_devices entry around */
1689 name = rcu_string_strdup(device_path, GFP_NOFS);
1695 rcu_assign_pointer(device->name, name);
1697 ret = find_next_devid(root, &device->devid);
1699 rcu_string_free(device->name);
1704 trans = btrfs_start_transaction(root, 0);
1705 if (IS_ERR(trans)) {
1706 rcu_string_free(device->name);
1708 ret = PTR_ERR(trans);
1714 q = bdev_get_queue(bdev);
1715 if (blk_queue_discard(q))
1716 device->can_discard = 1;
1717 device->writeable = 1;
1718 device->work.func = pending_bios_fn;
1719 generate_random_uuid(device->uuid);
1720 spin_lock_init(&device->io_lock);
1721 device->generation = trans->transid;
1722 device->io_width = root->sectorsize;
1723 device->io_align = root->sectorsize;
1724 device->sector_size = root->sectorsize;
1725 device->total_bytes = i_size_read(bdev->bd_inode);
1726 device->disk_total_bytes = device->total_bytes;
1727 device->dev_root = root->fs_info->dev_root;
1728 device->bdev = bdev;
1729 device->in_fs_metadata = 1;
1730 device->mode = FMODE_EXCL;
1731 set_blocksize(device->bdev, 4096);
1734 sb->s_flags &= ~MS_RDONLY;
1735 ret = btrfs_prepare_sprout(root);
1736 BUG_ON(ret); /* -ENOMEM */
1739 device->fs_devices = root->fs_info->fs_devices;
1742 * we don't want write_supers to jump in here with our device
1745 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1746 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1747 list_add(&device->dev_alloc_list,
1748 &root->fs_info->fs_devices->alloc_list);
1749 root->fs_info->fs_devices->num_devices++;
1750 root->fs_info->fs_devices->open_devices++;
1751 root->fs_info->fs_devices->rw_devices++;
1752 if (device->can_discard)
1753 root->fs_info->fs_devices->num_can_discard++;
1754 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1756 spin_lock(&root->fs_info->free_chunk_lock);
1757 root->fs_info->free_chunk_space += device->total_bytes;
1758 spin_unlock(&root->fs_info->free_chunk_lock);
1760 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1761 root->fs_info->fs_devices->rotating = 1;
1763 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1764 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1765 total_bytes + device->total_bytes);
1767 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1768 btrfs_set_super_num_devices(root->fs_info->super_copy,
1770 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1773 ret = init_first_rw_device(trans, root, device);
1776 ret = btrfs_finish_sprout(trans, root);
1780 ret = btrfs_add_device(trans, root, device);
1786 * we've got more storage, clear any full flags on the space
1789 btrfs_clear_space_info_full(root->fs_info);
1791 unlock_chunks(root);
1792 ret = btrfs_commit_transaction(trans, root);
1795 mutex_unlock(&uuid_mutex);
1796 up_write(&sb->s_umount);
1798 if (ret) /* transaction commit */
1801 ret = btrfs_relocate_sys_chunks(root);
1803 btrfs_error(root->fs_info, ret,
1804 "Failed to relocate sys chunks after "
1805 "device initialization. This can be fixed "
1806 "using the \"btrfs balance\" command.");
1812 unlock_chunks(root);
1813 btrfs_abort_transaction(trans, root, ret);
1814 btrfs_end_transaction(trans, root);
1815 rcu_string_free(device->name);
1818 blkdev_put(bdev, FMODE_EXCL);
1820 mutex_unlock(&uuid_mutex);
1821 up_write(&sb->s_umount);
1826 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1827 struct btrfs_device *device)
1830 struct btrfs_path *path;
1831 struct btrfs_root *root;
1832 struct btrfs_dev_item *dev_item;
1833 struct extent_buffer *leaf;
1834 struct btrfs_key key;
1836 root = device->dev_root->fs_info->chunk_root;
1838 path = btrfs_alloc_path();
1842 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1843 key.type = BTRFS_DEV_ITEM_KEY;
1844 key.offset = device->devid;
1846 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1855 leaf = path->nodes[0];
1856 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1858 btrfs_set_device_id(leaf, dev_item, device->devid);
1859 btrfs_set_device_type(leaf, dev_item, device->type);
1860 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1861 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1862 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1863 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1864 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1865 btrfs_mark_buffer_dirty(leaf);
1868 btrfs_free_path(path);
1872 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1873 struct btrfs_device *device, u64 new_size)
1875 struct btrfs_super_block *super_copy =
1876 device->dev_root->fs_info->super_copy;
1877 u64 old_total = btrfs_super_total_bytes(super_copy);
1878 u64 diff = new_size - device->total_bytes;
1880 if (!device->writeable)
1882 if (new_size <= device->total_bytes)
1885 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1886 device->fs_devices->total_rw_bytes += diff;
1888 device->total_bytes = new_size;
1889 device->disk_total_bytes = new_size;
1890 btrfs_clear_space_info_full(device->dev_root->fs_info);
1892 return btrfs_update_device(trans, device);
1895 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1896 struct btrfs_device *device, u64 new_size)
1899 lock_chunks(device->dev_root);
1900 ret = __btrfs_grow_device(trans, device, new_size);
1901 unlock_chunks(device->dev_root);
1905 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1906 struct btrfs_root *root,
1907 u64 chunk_tree, u64 chunk_objectid,
1911 struct btrfs_path *path;
1912 struct btrfs_key key;
1914 root = root->fs_info->chunk_root;
1915 path = btrfs_alloc_path();
1919 key.objectid = chunk_objectid;
1920 key.offset = chunk_offset;
1921 key.type = BTRFS_CHUNK_ITEM_KEY;
1923 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1926 else if (ret > 0) { /* Logic error or corruption */
1927 btrfs_error(root->fs_info, -ENOENT,
1928 "Failed lookup while freeing chunk.");
1933 ret = btrfs_del_item(trans, root, path);
1935 btrfs_error(root->fs_info, ret,
1936 "Failed to delete chunk item.");
1938 btrfs_free_path(path);
1942 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1945 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1946 struct btrfs_disk_key *disk_key;
1947 struct btrfs_chunk *chunk;
1954 struct btrfs_key key;
1956 array_size = btrfs_super_sys_array_size(super_copy);
1958 ptr = super_copy->sys_chunk_array;
1961 while (cur < array_size) {
1962 disk_key = (struct btrfs_disk_key *)ptr;
1963 btrfs_disk_key_to_cpu(&key, disk_key);
1965 len = sizeof(*disk_key);
1967 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1968 chunk = (struct btrfs_chunk *)(ptr + len);
1969 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1970 len += btrfs_chunk_item_size(num_stripes);
1975 if (key.objectid == chunk_objectid &&
1976 key.offset == chunk_offset) {
1977 memmove(ptr, ptr + len, array_size - (cur + len));
1979 btrfs_set_super_sys_array_size(super_copy, array_size);
1988 static int btrfs_relocate_chunk(struct btrfs_root *root,
1989 u64 chunk_tree, u64 chunk_objectid,
1992 struct extent_map_tree *em_tree;
1993 struct btrfs_root *extent_root;
1994 struct btrfs_trans_handle *trans;
1995 struct extent_map *em;
1996 struct map_lookup *map;
2000 root = root->fs_info->chunk_root;
2001 extent_root = root->fs_info->extent_root;
2002 em_tree = &root->fs_info->mapping_tree.map_tree;
2004 ret = btrfs_can_relocate(extent_root, chunk_offset);
2008 /* step one, relocate all the extents inside this chunk */
2009 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2013 trans = btrfs_start_transaction(root, 0);
2014 BUG_ON(IS_ERR(trans));
2019 * step two, delete the device extents and the
2020 * chunk tree entries
2022 read_lock(&em_tree->lock);
2023 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2024 read_unlock(&em_tree->lock);
2026 BUG_ON(!em || em->start > chunk_offset ||
2027 em->start + em->len < chunk_offset);
2028 map = (struct map_lookup *)em->bdev;
2030 for (i = 0; i < map->num_stripes; i++) {
2031 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2032 map->stripes[i].physical);
2035 if (map->stripes[i].dev) {
2036 ret = btrfs_update_device(trans, map->stripes[i].dev);
2040 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2045 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2047 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2048 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2052 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2055 write_lock(&em_tree->lock);
2056 remove_extent_mapping(em_tree, em);
2057 write_unlock(&em_tree->lock);
2062 /* once for the tree */
2063 free_extent_map(em);
2065 free_extent_map(em);
2067 unlock_chunks(root);
2068 btrfs_end_transaction(trans, root);
2072 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2074 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2075 struct btrfs_path *path;
2076 struct extent_buffer *leaf;
2077 struct btrfs_chunk *chunk;
2078 struct btrfs_key key;
2079 struct btrfs_key found_key;
2080 u64 chunk_tree = chunk_root->root_key.objectid;
2082 bool retried = false;
2086 path = btrfs_alloc_path();
2091 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2092 key.offset = (u64)-1;
2093 key.type = BTRFS_CHUNK_ITEM_KEY;
2096 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2099 BUG_ON(ret == 0); /* Corruption */
2101 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2108 leaf = path->nodes[0];
2109 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2111 chunk = btrfs_item_ptr(leaf, path->slots[0],
2112 struct btrfs_chunk);
2113 chunk_type = btrfs_chunk_type(leaf, chunk);
2114 btrfs_release_path(path);
2116 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2117 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2126 if (found_key.offset == 0)
2128 key.offset = found_key.offset - 1;
2131 if (failed && !retried) {
2135 } else if (failed && retried) {
2140 btrfs_free_path(path);
2144 static int insert_balance_item(struct btrfs_root *root,
2145 struct btrfs_balance_control *bctl)
2147 struct btrfs_trans_handle *trans;
2148 struct btrfs_balance_item *item;
2149 struct btrfs_disk_balance_args disk_bargs;
2150 struct btrfs_path *path;
2151 struct extent_buffer *leaf;
2152 struct btrfs_key key;
2155 path = btrfs_alloc_path();
2159 trans = btrfs_start_transaction(root, 0);
2160 if (IS_ERR(trans)) {
2161 btrfs_free_path(path);
2162 return PTR_ERR(trans);
2165 key.objectid = BTRFS_BALANCE_OBJECTID;
2166 key.type = BTRFS_BALANCE_ITEM_KEY;
2169 ret = btrfs_insert_empty_item(trans, root, path, &key,
2174 leaf = path->nodes[0];
2175 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2177 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2179 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2180 btrfs_set_balance_data(leaf, item, &disk_bargs);
2181 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2182 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2183 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2184 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2186 btrfs_set_balance_flags(leaf, item, bctl->flags);
2188 btrfs_mark_buffer_dirty(leaf);
2190 btrfs_free_path(path);
2191 err = btrfs_commit_transaction(trans, root);
2197 static int del_balance_item(struct btrfs_root *root)
2199 struct btrfs_trans_handle *trans;
2200 struct btrfs_path *path;
2201 struct btrfs_key key;
2204 path = btrfs_alloc_path();
2208 trans = btrfs_start_transaction(root, 0);
2209 if (IS_ERR(trans)) {
2210 btrfs_free_path(path);
2211 return PTR_ERR(trans);
2214 key.objectid = BTRFS_BALANCE_OBJECTID;
2215 key.type = BTRFS_BALANCE_ITEM_KEY;
2218 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2226 ret = btrfs_del_item(trans, root, path);
2228 btrfs_free_path(path);
2229 err = btrfs_commit_transaction(trans, root);
2236 * This is a heuristic used to reduce the number of chunks balanced on
2237 * resume after balance was interrupted.
2239 static void update_balance_args(struct btrfs_balance_control *bctl)
2242 * Turn on soft mode for chunk types that were being converted.
2244 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2245 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2246 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2247 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2248 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2249 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2252 * Turn on usage filter if is not already used. The idea is
2253 * that chunks that we have already balanced should be
2254 * reasonably full. Don't do it for chunks that are being
2255 * converted - that will keep us from relocating unconverted
2256 * (albeit full) chunks.
2258 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2259 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2260 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2261 bctl->data.usage = 90;
2263 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2264 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2265 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2266 bctl->sys.usage = 90;
2268 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2269 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2270 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2271 bctl->meta.usage = 90;
2276 * Should be called with both balance and volume mutexes held to
2277 * serialize other volume operations (add_dev/rm_dev/resize) with
2278 * restriper. Same goes for unset_balance_control.
2280 static void set_balance_control(struct btrfs_balance_control *bctl)
2282 struct btrfs_fs_info *fs_info = bctl->fs_info;
2284 BUG_ON(fs_info->balance_ctl);
2286 spin_lock(&fs_info->balance_lock);
2287 fs_info->balance_ctl = bctl;
2288 spin_unlock(&fs_info->balance_lock);
2291 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2293 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2295 BUG_ON(!fs_info->balance_ctl);
2297 spin_lock(&fs_info->balance_lock);
2298 fs_info->balance_ctl = NULL;
2299 spin_unlock(&fs_info->balance_lock);
2305 * Balance filters. Return 1 if chunk should be filtered out
2306 * (should not be balanced).
2308 static int chunk_profiles_filter(u64 chunk_type,
2309 struct btrfs_balance_args *bargs)
2311 chunk_type = chunk_to_extended(chunk_type) &
2312 BTRFS_EXTENDED_PROFILE_MASK;
2314 if (bargs->profiles & chunk_type)
2320 static u64 div_factor_fine(u64 num, int factor)
2332 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2333 struct btrfs_balance_args *bargs)
2335 struct btrfs_block_group_cache *cache;
2336 u64 chunk_used, user_thresh;
2339 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2340 chunk_used = btrfs_block_group_used(&cache->item);
2342 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2343 if (chunk_used < user_thresh)
2346 btrfs_put_block_group(cache);
2350 static int chunk_devid_filter(struct extent_buffer *leaf,
2351 struct btrfs_chunk *chunk,
2352 struct btrfs_balance_args *bargs)
2354 struct btrfs_stripe *stripe;
2355 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2358 for (i = 0; i < num_stripes; i++) {
2359 stripe = btrfs_stripe_nr(chunk, i);
2360 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2367 /* [pstart, pend) */
2368 static int chunk_drange_filter(struct extent_buffer *leaf,
2369 struct btrfs_chunk *chunk,
2371 struct btrfs_balance_args *bargs)
2373 struct btrfs_stripe *stripe;
2374 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2380 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2383 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2384 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2388 factor = num_stripes / factor;
2390 for (i = 0; i < num_stripes; i++) {
2391 stripe = btrfs_stripe_nr(chunk, i);
2392 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2395 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2396 stripe_length = btrfs_chunk_length(leaf, chunk);
2397 do_div(stripe_length, factor);
2399 if (stripe_offset < bargs->pend &&
2400 stripe_offset + stripe_length > bargs->pstart)
2407 /* [vstart, vend) */
2408 static int chunk_vrange_filter(struct extent_buffer *leaf,
2409 struct btrfs_chunk *chunk,
2411 struct btrfs_balance_args *bargs)
2413 if (chunk_offset < bargs->vend &&
2414 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2415 /* at least part of the chunk is inside this vrange */
2421 static int chunk_soft_convert_filter(u64 chunk_type,
2422 struct btrfs_balance_args *bargs)
2424 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2427 chunk_type = chunk_to_extended(chunk_type) &
2428 BTRFS_EXTENDED_PROFILE_MASK;
2430 if (bargs->target == chunk_type)
2436 static int should_balance_chunk(struct btrfs_root *root,
2437 struct extent_buffer *leaf,
2438 struct btrfs_chunk *chunk, u64 chunk_offset)
2440 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2441 struct btrfs_balance_args *bargs = NULL;
2442 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2445 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2446 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2450 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2451 bargs = &bctl->data;
2452 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2454 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2455 bargs = &bctl->meta;
2457 /* profiles filter */
2458 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2459 chunk_profiles_filter(chunk_type, bargs)) {
2464 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2465 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2470 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2471 chunk_devid_filter(leaf, chunk, bargs)) {
2475 /* drange filter, makes sense only with devid filter */
2476 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2477 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2482 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2483 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2487 /* soft profile changing mode */
2488 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2489 chunk_soft_convert_filter(chunk_type, bargs)) {
2496 static u64 div_factor(u64 num, int factor)
2505 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2507 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2508 struct btrfs_root *chunk_root = fs_info->chunk_root;
2509 struct btrfs_root *dev_root = fs_info->dev_root;
2510 struct list_head *devices;
2511 struct btrfs_device *device;
2514 struct btrfs_chunk *chunk;
2515 struct btrfs_path *path;
2516 struct btrfs_key key;
2517 struct btrfs_key found_key;
2518 struct btrfs_trans_handle *trans;
2519 struct extent_buffer *leaf;
2522 int enospc_errors = 0;
2523 bool counting = true;
2525 /* step one make some room on all the devices */
2526 devices = &fs_info->fs_devices->devices;
2527 list_for_each_entry(device, devices, dev_list) {
2528 old_size = device->total_bytes;
2529 size_to_free = div_factor(old_size, 1);
2530 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2531 if (!device->writeable ||
2532 device->total_bytes - device->bytes_used > size_to_free)
2535 ret = btrfs_shrink_device(device, old_size - size_to_free);
2540 trans = btrfs_start_transaction(dev_root, 0);
2541 BUG_ON(IS_ERR(trans));
2543 ret = btrfs_grow_device(trans, device, old_size);
2546 btrfs_end_transaction(trans, dev_root);
2549 /* step two, relocate all the chunks */
2550 path = btrfs_alloc_path();
2556 /* zero out stat counters */
2557 spin_lock(&fs_info->balance_lock);
2558 memset(&bctl->stat, 0, sizeof(bctl->stat));
2559 spin_unlock(&fs_info->balance_lock);
2561 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2562 key.offset = (u64)-1;
2563 key.type = BTRFS_CHUNK_ITEM_KEY;
2566 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2567 atomic_read(&fs_info->balance_cancel_req)) {
2572 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2577 * this shouldn't happen, it means the last relocate
2581 BUG(); /* FIXME break ? */
2583 ret = btrfs_previous_item(chunk_root, path, 0,
2584 BTRFS_CHUNK_ITEM_KEY);
2590 leaf = path->nodes[0];
2591 slot = path->slots[0];
2592 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2594 if (found_key.objectid != key.objectid)
2597 /* chunk zero is special */
2598 if (found_key.offset == 0)
2601 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2604 spin_lock(&fs_info->balance_lock);
2605 bctl->stat.considered++;
2606 spin_unlock(&fs_info->balance_lock);
2609 ret = should_balance_chunk(chunk_root, leaf, chunk,
2611 btrfs_release_path(path);
2616 spin_lock(&fs_info->balance_lock);
2617 bctl->stat.expected++;
2618 spin_unlock(&fs_info->balance_lock);
2622 ret = btrfs_relocate_chunk(chunk_root,
2623 chunk_root->root_key.objectid,
2626 if (ret && ret != -ENOSPC)
2628 if (ret == -ENOSPC) {
2631 spin_lock(&fs_info->balance_lock);
2632 bctl->stat.completed++;
2633 spin_unlock(&fs_info->balance_lock);
2636 key.offset = found_key.offset - 1;
2640 btrfs_release_path(path);
2645 btrfs_free_path(path);
2646 if (enospc_errors) {
2647 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2657 * alloc_profile_is_valid - see if a given profile is valid and reduced
2658 * @flags: profile to validate
2659 * @extended: if true @flags is treated as an extended profile
2661 static int alloc_profile_is_valid(u64 flags, int extended)
2663 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2664 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2666 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2668 /* 1) check that all other bits are zeroed */
2672 /* 2) see if profile is reduced */
2674 return !extended; /* "0" is valid for usual profiles */
2676 /* true if exactly one bit set */
2677 return (flags & (flags - 1)) == 0;
2680 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2682 /* cancel requested || normal exit path */
2683 return atomic_read(&fs_info->balance_cancel_req) ||
2684 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2685 atomic_read(&fs_info->balance_cancel_req) == 0);
2688 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2692 unset_balance_control(fs_info);
2693 ret = del_balance_item(fs_info->tree_root);
2697 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2698 struct btrfs_ioctl_balance_args *bargs);
2701 * Should be called with both balance and volume mutexes held
2703 int btrfs_balance(struct btrfs_balance_control *bctl,
2704 struct btrfs_ioctl_balance_args *bargs)
2706 struct btrfs_fs_info *fs_info = bctl->fs_info;
2711 if (btrfs_fs_closing(fs_info) ||
2712 atomic_read(&fs_info->balance_pause_req) ||
2713 atomic_read(&fs_info->balance_cancel_req)) {
2718 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2719 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2723 * In case of mixed groups both data and meta should be picked,
2724 * and identical options should be given for both of them.
2726 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2727 if (mixed && (bctl->flags & allowed)) {
2728 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2729 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2730 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2731 printk(KERN_ERR "btrfs: with mixed groups data and "
2732 "metadata balance options must be the same\n");
2738 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2739 if (fs_info->fs_devices->num_devices == 1)
2740 allowed |= BTRFS_BLOCK_GROUP_DUP;
2741 else if (fs_info->fs_devices->num_devices < 4)
2742 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2744 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2745 BTRFS_BLOCK_GROUP_RAID10);
2747 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2748 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2749 (bctl->data.target & ~allowed))) {
2750 printk(KERN_ERR "btrfs: unable to start balance with target "
2751 "data profile %llu\n",
2752 (unsigned long long)bctl->data.target);
2756 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2757 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2758 (bctl->meta.target & ~allowed))) {
2759 printk(KERN_ERR "btrfs: unable to start balance with target "
2760 "metadata profile %llu\n",
2761 (unsigned long long)bctl->meta.target);
2765 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2766 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2767 (bctl->sys.target & ~allowed))) {
2768 printk(KERN_ERR "btrfs: unable to start balance with target "
2769 "system profile %llu\n",
2770 (unsigned long long)bctl->sys.target);
2775 /* allow dup'ed data chunks only in mixed mode */
2776 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2777 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2778 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2783 /* allow to reduce meta or sys integrity only if force set */
2784 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2785 BTRFS_BLOCK_GROUP_RAID10;
2786 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2787 (fs_info->avail_system_alloc_bits & allowed) &&
2788 !(bctl->sys.target & allowed)) ||
2789 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2790 (fs_info->avail_metadata_alloc_bits & allowed) &&
2791 !(bctl->meta.target & allowed))) {
2792 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2793 printk(KERN_INFO "btrfs: force reducing metadata "
2796 printk(KERN_ERR "btrfs: balance will reduce metadata "
2797 "integrity, use force if you want this\n");
2803 ret = insert_balance_item(fs_info->tree_root, bctl);
2804 if (ret && ret != -EEXIST)
2807 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2808 BUG_ON(ret == -EEXIST);
2809 set_balance_control(bctl);
2811 BUG_ON(ret != -EEXIST);
2812 spin_lock(&fs_info->balance_lock);
2813 update_balance_args(bctl);
2814 spin_unlock(&fs_info->balance_lock);
2817 atomic_inc(&fs_info->balance_running);
2818 mutex_unlock(&fs_info->balance_mutex);
2820 ret = __btrfs_balance(fs_info);
2822 mutex_lock(&fs_info->balance_mutex);
2823 atomic_dec(&fs_info->balance_running);
2826 memset(bargs, 0, sizeof(*bargs));
2827 update_ioctl_balance_args(fs_info, 0, bargs);
2830 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2831 balance_need_close(fs_info)) {
2832 __cancel_balance(fs_info);
2835 wake_up(&fs_info->balance_wait_q);
2839 if (bctl->flags & BTRFS_BALANCE_RESUME)
2840 __cancel_balance(fs_info);
2846 static int balance_kthread(void *data)
2848 struct btrfs_balance_control *bctl =
2849 (struct btrfs_balance_control *)data;
2850 struct btrfs_fs_info *fs_info = bctl->fs_info;
2853 mutex_lock(&fs_info->volume_mutex);
2854 mutex_lock(&fs_info->balance_mutex);
2856 set_balance_control(bctl);
2858 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2859 printk(KERN_INFO "btrfs: force skipping balance\n");
2861 printk(KERN_INFO "btrfs: continuing balance\n");
2862 ret = btrfs_balance(bctl, NULL);
2865 mutex_unlock(&fs_info->balance_mutex);
2866 mutex_unlock(&fs_info->volume_mutex);
2870 int btrfs_recover_balance(struct btrfs_root *tree_root)
2872 struct task_struct *tsk;
2873 struct btrfs_balance_control *bctl;
2874 struct btrfs_balance_item *item;
2875 struct btrfs_disk_balance_args disk_bargs;
2876 struct btrfs_path *path;
2877 struct extent_buffer *leaf;
2878 struct btrfs_key key;
2881 path = btrfs_alloc_path();
2885 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2891 key.objectid = BTRFS_BALANCE_OBJECTID;
2892 key.type = BTRFS_BALANCE_ITEM_KEY;
2895 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2898 if (ret > 0) { /* ret = -ENOENT; */
2903 leaf = path->nodes[0];
2904 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2906 bctl->fs_info = tree_root->fs_info;
2907 bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
2909 btrfs_balance_data(leaf, item, &disk_bargs);
2910 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2911 btrfs_balance_meta(leaf, item, &disk_bargs);
2912 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2913 btrfs_balance_sys(leaf, item, &disk_bargs);
2914 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2916 tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
2925 btrfs_free_path(path);
2929 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2933 mutex_lock(&fs_info->balance_mutex);
2934 if (!fs_info->balance_ctl) {
2935 mutex_unlock(&fs_info->balance_mutex);
2939 if (atomic_read(&fs_info->balance_running)) {
2940 atomic_inc(&fs_info->balance_pause_req);
2941 mutex_unlock(&fs_info->balance_mutex);
2943 wait_event(fs_info->balance_wait_q,
2944 atomic_read(&fs_info->balance_running) == 0);
2946 mutex_lock(&fs_info->balance_mutex);
2947 /* we are good with balance_ctl ripped off from under us */
2948 BUG_ON(atomic_read(&fs_info->balance_running));
2949 atomic_dec(&fs_info->balance_pause_req);
2954 mutex_unlock(&fs_info->balance_mutex);
2958 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
2960 mutex_lock(&fs_info->balance_mutex);
2961 if (!fs_info->balance_ctl) {
2962 mutex_unlock(&fs_info->balance_mutex);
2966 atomic_inc(&fs_info->balance_cancel_req);
2968 * if we are running just wait and return, balance item is
2969 * deleted in btrfs_balance in this case
2971 if (atomic_read(&fs_info->balance_running)) {
2972 mutex_unlock(&fs_info->balance_mutex);
2973 wait_event(fs_info->balance_wait_q,
2974 atomic_read(&fs_info->balance_running) == 0);
2975 mutex_lock(&fs_info->balance_mutex);
2977 /* __cancel_balance needs volume_mutex */
2978 mutex_unlock(&fs_info->balance_mutex);
2979 mutex_lock(&fs_info->volume_mutex);
2980 mutex_lock(&fs_info->balance_mutex);
2982 if (fs_info->balance_ctl)
2983 __cancel_balance(fs_info);
2985 mutex_unlock(&fs_info->volume_mutex);
2988 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
2989 atomic_dec(&fs_info->balance_cancel_req);
2990 mutex_unlock(&fs_info->balance_mutex);
2995 * shrinking a device means finding all of the device extents past
2996 * the new size, and then following the back refs to the chunks.
2997 * The chunk relocation code actually frees the device extent
2999 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3001 struct btrfs_trans_handle *trans;
3002 struct btrfs_root *root = device->dev_root;
3003 struct btrfs_dev_extent *dev_extent = NULL;
3004 struct btrfs_path *path;
3012 bool retried = false;
3013 struct extent_buffer *l;
3014 struct btrfs_key key;
3015 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3016 u64 old_total = btrfs_super_total_bytes(super_copy);
3017 u64 old_size = device->total_bytes;
3018 u64 diff = device->total_bytes - new_size;
3020 if (new_size >= device->total_bytes)
3023 path = btrfs_alloc_path();
3031 device->total_bytes = new_size;
3032 if (device->writeable) {
3033 device->fs_devices->total_rw_bytes -= diff;
3034 spin_lock(&root->fs_info->free_chunk_lock);
3035 root->fs_info->free_chunk_space -= diff;
3036 spin_unlock(&root->fs_info->free_chunk_lock);
3038 unlock_chunks(root);
3041 key.objectid = device->devid;
3042 key.offset = (u64)-1;
3043 key.type = BTRFS_DEV_EXTENT_KEY;
3046 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3050 ret = btrfs_previous_item(root, path, 0, key.type);
3055 btrfs_release_path(path);
3060 slot = path->slots[0];
3061 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3063 if (key.objectid != device->devid) {
3064 btrfs_release_path(path);
3068 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3069 length = btrfs_dev_extent_length(l, dev_extent);
3071 if (key.offset + length <= new_size) {
3072 btrfs_release_path(path);
3076 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3077 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3078 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3079 btrfs_release_path(path);
3081 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3083 if (ret && ret != -ENOSPC)
3087 } while (key.offset-- > 0);
3089 if (failed && !retried) {
3093 } else if (failed && retried) {
3097 device->total_bytes = old_size;
3098 if (device->writeable)
3099 device->fs_devices->total_rw_bytes += diff;
3100 spin_lock(&root->fs_info->free_chunk_lock);
3101 root->fs_info->free_chunk_space += diff;
3102 spin_unlock(&root->fs_info->free_chunk_lock);
3103 unlock_chunks(root);
3107 /* Shrinking succeeded, else we would be at "done". */
3108 trans = btrfs_start_transaction(root, 0);
3109 if (IS_ERR(trans)) {
3110 ret = PTR_ERR(trans);
3116 device->disk_total_bytes = new_size;
3117 /* Now btrfs_update_device() will change the on-disk size. */
3118 ret = btrfs_update_device(trans, device);
3120 unlock_chunks(root);
3121 btrfs_end_transaction(trans, root);
3124 WARN_ON(diff > old_total);
3125 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3126 unlock_chunks(root);
3127 btrfs_end_transaction(trans, root);
3129 btrfs_free_path(path);
3133 static int btrfs_add_system_chunk(struct btrfs_root *root,
3134 struct btrfs_key *key,
3135 struct btrfs_chunk *chunk, int item_size)
3137 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3138 struct btrfs_disk_key disk_key;
3142 array_size = btrfs_super_sys_array_size(super_copy);
3143 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3146 ptr = super_copy->sys_chunk_array + array_size;
3147 btrfs_cpu_key_to_disk(&disk_key, key);
3148 memcpy(ptr, &disk_key, sizeof(disk_key));
3149 ptr += sizeof(disk_key);
3150 memcpy(ptr, chunk, item_size);
3151 item_size += sizeof(disk_key);
3152 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3157 * sort the devices in descending order by max_avail, total_avail
3159 static int btrfs_cmp_device_info(const void *a, const void *b)
3161 const struct btrfs_device_info *di_a = a;
3162 const struct btrfs_device_info *di_b = b;
3164 if (di_a->max_avail > di_b->max_avail)
3166 if (di_a->max_avail < di_b->max_avail)
3168 if (di_a->total_avail > di_b->total_avail)
3170 if (di_a->total_avail < di_b->total_avail)
3175 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3176 struct btrfs_root *extent_root,
3177 struct map_lookup **map_ret,
3178 u64 *num_bytes_out, u64 *stripe_size_out,
3179 u64 start, u64 type)
3181 struct btrfs_fs_info *info = extent_root->fs_info;
3182 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3183 struct list_head *cur;
3184 struct map_lookup *map = NULL;
3185 struct extent_map_tree *em_tree;
3186 struct extent_map *em;
3187 struct btrfs_device_info *devices_info = NULL;
3189 int num_stripes; /* total number of stripes to allocate */
3190 int sub_stripes; /* sub_stripes info for map */
3191 int dev_stripes; /* stripes per dev */
3192 int devs_max; /* max devs to use */
3193 int devs_min; /* min devs needed */
3194 int devs_increment; /* ndevs has to be a multiple of this */
3195 int ncopies; /* how many copies to data has */
3197 u64 max_stripe_size;
3205 BUG_ON(!alloc_profile_is_valid(type, 0));
3207 if (list_empty(&fs_devices->alloc_list))
3214 devs_max = 0; /* 0 == as many as possible */
3218 * define the properties of each RAID type.
3219 * FIXME: move this to a global table and use it in all RAID
3222 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3226 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3228 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3233 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3242 if (type & BTRFS_BLOCK_GROUP_DATA) {
3243 max_stripe_size = 1024 * 1024 * 1024;
3244 max_chunk_size = 10 * max_stripe_size;
3245 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3246 /* for larger filesystems, use larger metadata chunks */
3247 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3248 max_stripe_size = 1024 * 1024 * 1024;
3250 max_stripe_size = 256 * 1024 * 1024;
3251 max_chunk_size = max_stripe_size;
3252 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3253 max_stripe_size = 32 * 1024 * 1024;
3254 max_chunk_size = 2 * max_stripe_size;
3256 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3261 /* we don't want a chunk larger than 10% of writeable space */
3262 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3265 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3270 cur = fs_devices->alloc_list.next;
3273 * in the first pass through the devices list, we gather information
3274 * about the available holes on each device.
3277 while (cur != &fs_devices->alloc_list) {
3278 struct btrfs_device *device;
3282 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3286 if (!device->writeable) {
3288 "btrfs: read-only device in alloc_list\n");
3293 if (!device->in_fs_metadata)
3296 if (device->total_bytes > device->bytes_used)
3297 total_avail = device->total_bytes - device->bytes_used;
3301 /* If there is no space on this device, skip it. */
3302 if (total_avail == 0)
3305 ret = find_free_dev_extent(device,
3306 max_stripe_size * dev_stripes,
3307 &dev_offset, &max_avail);
3308 if (ret && ret != -ENOSPC)
3312 max_avail = max_stripe_size * dev_stripes;
3314 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3317 devices_info[ndevs].dev_offset = dev_offset;
3318 devices_info[ndevs].max_avail = max_avail;
3319 devices_info[ndevs].total_avail = total_avail;
3320 devices_info[ndevs].dev = device;
3325 * now sort the devices by hole size / available space
3327 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3328 btrfs_cmp_device_info, NULL);
3330 /* round down to number of usable stripes */
3331 ndevs -= ndevs % devs_increment;
3333 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3338 if (devs_max && ndevs > devs_max)
3341 * the primary goal is to maximize the number of stripes, so use as many
3342 * devices as possible, even if the stripes are not maximum sized.
3344 stripe_size = devices_info[ndevs-1].max_avail;
3345 num_stripes = ndevs * dev_stripes;
3347 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3348 stripe_size = max_chunk_size * ncopies;
3349 do_div(stripe_size, ndevs);
3352 do_div(stripe_size, dev_stripes);
3354 /* align to BTRFS_STRIPE_LEN */
3355 do_div(stripe_size, BTRFS_STRIPE_LEN);
3356 stripe_size *= BTRFS_STRIPE_LEN;
3358 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3363 map->num_stripes = num_stripes;
3365 for (i = 0; i < ndevs; ++i) {
3366 for (j = 0; j < dev_stripes; ++j) {
3367 int s = i * dev_stripes + j;
3368 map->stripes[s].dev = devices_info[i].dev;
3369 map->stripes[s].physical = devices_info[i].dev_offset +
3373 map->sector_size = extent_root->sectorsize;
3374 map->stripe_len = BTRFS_STRIPE_LEN;
3375 map->io_align = BTRFS_STRIPE_LEN;
3376 map->io_width = BTRFS_STRIPE_LEN;
3378 map->sub_stripes = sub_stripes;
3381 num_bytes = stripe_size * (num_stripes / ncopies);
3383 *stripe_size_out = stripe_size;
3384 *num_bytes_out = num_bytes;
3386 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3388 em = alloc_extent_map();
3393 em->bdev = (struct block_device *)map;
3395 em->len = num_bytes;
3396 em->block_start = 0;
3397 em->block_len = em->len;
3399 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3400 write_lock(&em_tree->lock);
3401 ret = add_extent_mapping(em_tree, em);
3402 write_unlock(&em_tree->lock);
3403 free_extent_map(em);
3407 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3408 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3413 for (i = 0; i < map->num_stripes; ++i) {
3414 struct btrfs_device *device;
3417 device = map->stripes[i].dev;
3418 dev_offset = map->stripes[i].physical;
3420 ret = btrfs_alloc_dev_extent(trans, device,
3421 info->chunk_root->root_key.objectid,
3422 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3423 start, dev_offset, stripe_size);
3425 btrfs_abort_transaction(trans, extent_root, ret);
3430 kfree(devices_info);
3435 kfree(devices_info);
3439 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3440 struct btrfs_root *extent_root,
3441 struct map_lookup *map, u64 chunk_offset,
3442 u64 chunk_size, u64 stripe_size)
3445 struct btrfs_key key;
3446 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3447 struct btrfs_device *device;
3448 struct btrfs_chunk *chunk;
3449 struct btrfs_stripe *stripe;
3450 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3454 chunk = kzalloc(item_size, GFP_NOFS);
3459 while (index < map->num_stripes) {
3460 device = map->stripes[index].dev;
3461 device->bytes_used += stripe_size;
3462 ret = btrfs_update_device(trans, device);
3468 spin_lock(&extent_root->fs_info->free_chunk_lock);
3469 extent_root->fs_info->free_chunk_space -= (stripe_size *
3471 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3474 stripe = &chunk->stripe;
3475 while (index < map->num_stripes) {
3476 device = map->stripes[index].dev;
3477 dev_offset = map->stripes[index].physical;
3479 btrfs_set_stack_stripe_devid(stripe, device->devid);
3480 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3481 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3486 btrfs_set_stack_chunk_length(chunk, chunk_size);
3487 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3488 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3489 btrfs_set_stack_chunk_type(chunk, map->type);
3490 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3491 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3492 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3493 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3494 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3496 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3497 key.type = BTRFS_CHUNK_ITEM_KEY;
3498 key.offset = chunk_offset;
3500 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3502 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3504 * TODO: Cleanup of inserted chunk root in case of
3507 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3517 * Chunk allocation falls into two parts. The first part does works
3518 * that make the new allocated chunk useable, but not do any operation
3519 * that modifies the chunk tree. The second part does the works that
3520 * require modifying the chunk tree. This division is important for the
3521 * bootstrap process of adding storage to a seed btrfs.
3523 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3524 struct btrfs_root *extent_root, u64 type)
3529 struct map_lookup *map;
3530 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3533 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3538 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3539 &stripe_size, chunk_offset, type);
3543 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3544 chunk_size, stripe_size);
3550 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3551 struct btrfs_root *root,
3552 struct btrfs_device *device)
3555 u64 sys_chunk_offset;
3559 u64 sys_stripe_size;
3561 struct map_lookup *map;
3562 struct map_lookup *sys_map;
3563 struct btrfs_fs_info *fs_info = root->fs_info;
3564 struct btrfs_root *extent_root = fs_info->extent_root;
3567 ret = find_next_chunk(fs_info->chunk_root,
3568 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3572 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3573 fs_info->avail_metadata_alloc_bits;
3574 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3576 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3577 &stripe_size, chunk_offset, alloc_profile);
3581 sys_chunk_offset = chunk_offset + chunk_size;
3583 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3584 fs_info->avail_system_alloc_bits;
3585 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3587 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3588 &sys_chunk_size, &sys_stripe_size,
3589 sys_chunk_offset, alloc_profile);
3593 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3598 * Modifying chunk tree needs allocating new blocks from both
3599 * system block group and metadata block group. So we only can
3600 * do operations require modifying the chunk tree after both
3601 * block groups were created.
3603 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3604 chunk_size, stripe_size);
3608 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3609 sys_chunk_offset, sys_chunk_size,
3617 btrfs_abort_transaction(trans, root, ret);
3621 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3623 struct extent_map *em;
3624 struct map_lookup *map;
3625 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3629 read_lock(&map_tree->map_tree.lock);
3630 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3631 read_unlock(&map_tree->map_tree.lock);
3635 if (btrfs_test_opt(root, DEGRADED)) {
3636 free_extent_map(em);
3640 map = (struct map_lookup *)em->bdev;
3641 for (i = 0; i < map->num_stripes; i++) {
3642 if (!map->stripes[i].dev->writeable) {
3647 free_extent_map(em);
3651 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3653 extent_map_tree_init(&tree->map_tree);
3656 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3658 struct extent_map *em;
3661 write_lock(&tree->map_tree.lock);
3662 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3664 remove_extent_mapping(&tree->map_tree, em);
3665 write_unlock(&tree->map_tree.lock);
3670 free_extent_map(em);
3671 /* once for the tree */
3672 free_extent_map(em);
3676 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3678 struct extent_map *em;
3679 struct map_lookup *map;
3680 struct extent_map_tree *em_tree = &map_tree->map_tree;
3683 read_lock(&em_tree->lock);
3684 em = lookup_extent_mapping(em_tree, logical, len);
3685 read_unlock(&em_tree->lock);
3688 BUG_ON(em->start > logical || em->start + em->len < logical);
3689 map = (struct map_lookup *)em->bdev;
3690 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3691 ret = map->num_stripes;
3692 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3693 ret = map->sub_stripes;
3696 free_extent_map(em);
3700 static int find_live_mirror(struct map_lookup *map, int first, int num,
3704 if (map->stripes[optimal].dev->bdev)
3706 for (i = first; i < first + num; i++) {
3707 if (map->stripes[i].dev->bdev)
3710 /* we couldn't find one that doesn't fail. Just return something
3711 * and the io error handling code will clean up eventually
3716 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3717 u64 logical, u64 *length,
3718 struct btrfs_bio **bbio_ret,
3721 struct extent_map *em;
3722 struct map_lookup *map;
3723 struct extent_map_tree *em_tree = &map_tree->map_tree;
3726 u64 stripe_end_offset;
3735 struct btrfs_bio *bbio = NULL;
3737 read_lock(&em_tree->lock);
3738 em = lookup_extent_mapping(em_tree, logical, *length);
3739 read_unlock(&em_tree->lock);
3742 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3743 (unsigned long long)logical,
3744 (unsigned long long)*length);
3748 BUG_ON(em->start > logical || em->start + em->len < logical);
3749 map = (struct map_lookup *)em->bdev;
3750 offset = logical - em->start;
3752 if (mirror_num > map->num_stripes)
3757 * stripe_nr counts the total number of stripes we have to stride
3758 * to get to this block
3760 do_div(stripe_nr, map->stripe_len);
3762 stripe_offset = stripe_nr * map->stripe_len;
3763 BUG_ON(offset < stripe_offset);
3765 /* stripe_offset is the offset of this block in its stripe*/
3766 stripe_offset = offset - stripe_offset;
3768 if (rw & REQ_DISCARD)
3769 *length = min_t(u64, em->len - offset, *length);
3770 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3771 /* we limit the length of each bio to what fits in a stripe */
3772 *length = min_t(u64, em->len - offset,
3773 map->stripe_len - stripe_offset);
3775 *length = em->len - offset;
3783 stripe_nr_orig = stripe_nr;
3784 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3785 (~(map->stripe_len - 1));
3786 do_div(stripe_nr_end, map->stripe_len);
3787 stripe_end_offset = stripe_nr_end * map->stripe_len -
3789 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3790 if (rw & REQ_DISCARD)
3791 num_stripes = min_t(u64, map->num_stripes,
3792 stripe_nr_end - stripe_nr_orig);
3793 stripe_index = do_div(stripe_nr, map->num_stripes);
3794 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3795 if (rw & (REQ_WRITE | REQ_DISCARD))
3796 num_stripes = map->num_stripes;
3797 else if (mirror_num)
3798 stripe_index = mirror_num - 1;
3800 stripe_index = find_live_mirror(map, 0,
3802 current->pid % map->num_stripes);
3803 mirror_num = stripe_index + 1;
3806 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3807 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3808 num_stripes = map->num_stripes;
3809 } else if (mirror_num) {
3810 stripe_index = mirror_num - 1;
3815 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3816 int factor = map->num_stripes / map->sub_stripes;
3818 stripe_index = do_div(stripe_nr, factor);
3819 stripe_index *= map->sub_stripes;
3822 num_stripes = map->sub_stripes;
3823 else if (rw & REQ_DISCARD)
3824 num_stripes = min_t(u64, map->sub_stripes *
3825 (stripe_nr_end - stripe_nr_orig),
3827 else if (mirror_num)
3828 stripe_index += mirror_num - 1;
3830 int old_stripe_index = stripe_index;
3831 stripe_index = find_live_mirror(map, stripe_index,
3832 map->sub_stripes, stripe_index +
3833 current->pid % map->sub_stripes);
3834 mirror_num = stripe_index - old_stripe_index + 1;
3838 * after this do_div call, stripe_nr is the number of stripes
3839 * on this device we have to walk to find the data, and
3840 * stripe_index is the number of our device in the stripe array
3842 stripe_index = do_div(stripe_nr, map->num_stripes);
3843 mirror_num = stripe_index + 1;
3845 BUG_ON(stripe_index >= map->num_stripes);
3847 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3852 atomic_set(&bbio->error, 0);
3854 if (rw & REQ_DISCARD) {
3856 int sub_stripes = 0;
3857 u64 stripes_per_dev = 0;
3858 u32 remaining_stripes = 0;
3859 u32 last_stripe = 0;
3862 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3863 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3866 sub_stripes = map->sub_stripes;
3868 factor = map->num_stripes / sub_stripes;
3869 stripes_per_dev = div_u64_rem(stripe_nr_end -
3872 &remaining_stripes);
3873 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3874 last_stripe *= sub_stripes;
3877 for (i = 0; i < num_stripes; i++) {
3878 bbio->stripes[i].physical =
3879 map->stripes[stripe_index].physical +
3880 stripe_offset + stripe_nr * map->stripe_len;
3881 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3883 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3884 BTRFS_BLOCK_GROUP_RAID10)) {
3885 bbio->stripes[i].length = stripes_per_dev *
3888 if (i / sub_stripes < remaining_stripes)
3889 bbio->stripes[i].length +=
3893 * Special for the first stripe and
3896 * |-------|...|-------|
3900 if (i < sub_stripes)
3901 bbio->stripes[i].length -=
3904 if (stripe_index >= last_stripe &&
3905 stripe_index <= (last_stripe +
3907 bbio->stripes[i].length -=
3910 if (i == sub_stripes - 1)
3913 bbio->stripes[i].length = *length;
3916 if (stripe_index == map->num_stripes) {
3917 /* This could only happen for RAID0/10 */
3923 for (i = 0; i < num_stripes; i++) {
3924 bbio->stripes[i].physical =
3925 map->stripes[stripe_index].physical +
3927 stripe_nr * map->stripe_len;
3928 bbio->stripes[i].dev =
3929 map->stripes[stripe_index].dev;
3934 if (rw & REQ_WRITE) {
3935 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3936 BTRFS_BLOCK_GROUP_RAID10 |
3937 BTRFS_BLOCK_GROUP_DUP)) {
3943 bbio->num_stripes = num_stripes;
3944 bbio->max_errors = max_errors;
3945 bbio->mirror_num = mirror_num;
3947 free_extent_map(em);
3951 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3952 u64 logical, u64 *length,
3953 struct btrfs_bio **bbio_ret, int mirror_num)
3955 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3959 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3960 u64 chunk_start, u64 physical, u64 devid,
3961 u64 **logical, int *naddrs, int *stripe_len)
3963 struct extent_map_tree *em_tree = &map_tree->map_tree;
3964 struct extent_map *em;
3965 struct map_lookup *map;
3972 read_lock(&em_tree->lock);
3973 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3974 read_unlock(&em_tree->lock);
3976 BUG_ON(!em || em->start != chunk_start);
3977 map = (struct map_lookup *)em->bdev;
3980 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3981 do_div(length, map->num_stripes / map->sub_stripes);
3982 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3983 do_div(length, map->num_stripes);
3985 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3986 BUG_ON(!buf); /* -ENOMEM */
3988 for (i = 0; i < map->num_stripes; i++) {
3989 if (devid && map->stripes[i].dev->devid != devid)
3991 if (map->stripes[i].physical > physical ||
3992 map->stripes[i].physical + length <= physical)
3995 stripe_nr = physical - map->stripes[i].physical;
3996 do_div(stripe_nr, map->stripe_len);
3998 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3999 stripe_nr = stripe_nr * map->num_stripes + i;
4000 do_div(stripe_nr, map->sub_stripes);
4001 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4002 stripe_nr = stripe_nr * map->num_stripes + i;
4004 bytenr = chunk_start + stripe_nr * map->stripe_len;
4005 WARN_ON(nr >= map->num_stripes);
4006 for (j = 0; j < nr; j++) {
4007 if (buf[j] == bytenr)
4011 WARN_ON(nr >= map->num_stripes);
4018 *stripe_len = map->stripe_len;
4020 free_extent_map(em);
4024 static void *merge_stripe_index_into_bio_private(void *bi_private,
4025 unsigned int stripe_index)
4028 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4030 * The alternative solution (instead of stealing bits from the
4031 * pointer) would be to allocate an intermediate structure
4032 * that contains the old private pointer plus the stripe_index.
4034 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4035 BUG_ON(stripe_index > 3);
4036 return (void *)(((uintptr_t)bi_private) | stripe_index);
4039 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4041 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4044 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4046 return (unsigned int)((uintptr_t)bi_private) & 3;
4049 static void btrfs_end_bio(struct bio *bio, int err)
4051 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4052 int is_orig_bio = 0;
4055 atomic_inc(&bbio->error);
4056 if (err == -EIO || err == -EREMOTEIO) {
4057 unsigned int stripe_index =
4058 extract_stripe_index_from_bio_private(
4060 struct btrfs_device *dev;
4062 BUG_ON(stripe_index >= bbio->num_stripes);
4063 dev = bbio->stripes[stripe_index].dev;
4064 if (bio->bi_rw & WRITE)
4065 btrfs_dev_stat_inc(dev,
4066 BTRFS_DEV_STAT_WRITE_ERRS);
4068 btrfs_dev_stat_inc(dev,
4069 BTRFS_DEV_STAT_READ_ERRS);
4070 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4071 btrfs_dev_stat_inc(dev,
4072 BTRFS_DEV_STAT_FLUSH_ERRS);
4073 btrfs_dev_stat_print_on_error(dev);
4077 if (bio == bbio->orig_bio)
4080 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4083 bio = bbio->orig_bio;
4085 bio->bi_private = bbio->private;
4086 bio->bi_end_io = bbio->end_io;
4087 bio->bi_bdev = (struct block_device *)
4088 (unsigned long)bbio->mirror_num;
4089 /* only send an error to the higher layers if it is
4090 * beyond the tolerance of the multi-bio
4092 if (atomic_read(&bbio->error) > bbio->max_errors) {
4096 * this bio is actually up to date, we didn't
4097 * go over the max number of errors
4099 set_bit(BIO_UPTODATE, &bio->bi_flags);
4104 bio_endio(bio, err);
4105 } else if (!is_orig_bio) {
4110 struct async_sched {
4113 struct btrfs_fs_info *info;
4114 struct btrfs_work work;
4118 * see run_scheduled_bios for a description of why bios are collected for
4121 * This will add one bio to the pending list for a device and make sure
4122 * the work struct is scheduled.
4124 static noinline void schedule_bio(struct btrfs_root *root,
4125 struct btrfs_device *device,
4126 int rw, struct bio *bio)
4128 int should_queue = 1;
4129 struct btrfs_pending_bios *pending_bios;
4131 /* don't bother with additional async steps for reads, right now */
4132 if (!(rw & REQ_WRITE)) {
4134 btrfsic_submit_bio(rw, bio);
4140 * nr_async_bios allows us to reliably return congestion to the
4141 * higher layers. Otherwise, the async bio makes it appear we have
4142 * made progress against dirty pages when we've really just put it
4143 * on a queue for later
4145 atomic_inc(&root->fs_info->nr_async_bios);
4146 WARN_ON(bio->bi_next);
4147 bio->bi_next = NULL;
4150 spin_lock(&device->io_lock);
4151 if (bio->bi_rw & REQ_SYNC)
4152 pending_bios = &device->pending_sync_bios;
4154 pending_bios = &device->pending_bios;
4156 if (pending_bios->tail)
4157 pending_bios->tail->bi_next = bio;
4159 pending_bios->tail = bio;
4160 if (!pending_bios->head)
4161 pending_bios->head = bio;
4162 if (device->running_pending)
4165 spin_unlock(&device->io_lock);
4168 btrfs_queue_worker(&root->fs_info->submit_workers,
4172 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4173 int mirror_num, int async_submit)
4175 struct btrfs_mapping_tree *map_tree;
4176 struct btrfs_device *dev;
4177 struct bio *first_bio = bio;
4178 u64 logical = (u64)bio->bi_sector << 9;
4184 struct btrfs_bio *bbio = NULL;
4186 length = bio->bi_size;
4187 map_tree = &root->fs_info->mapping_tree;
4188 map_length = length;
4190 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4192 if (ret) /* -ENOMEM */
4195 total_devs = bbio->num_stripes;
4196 if (map_length < length) {
4197 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
4198 "len %llu\n", (unsigned long long)logical,
4199 (unsigned long long)length,
4200 (unsigned long long)map_length);
4204 bbio->orig_bio = first_bio;
4205 bbio->private = first_bio->bi_private;
4206 bbio->end_io = first_bio->bi_end_io;
4207 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4209 while (dev_nr < total_devs) {
4210 if (dev_nr < total_devs - 1) {
4211 bio = bio_clone(first_bio, GFP_NOFS);
4212 BUG_ON(!bio); /* -ENOMEM */
4216 bio->bi_private = bbio;
4217 bio->bi_private = merge_stripe_index_into_bio_private(
4218 bio->bi_private, (unsigned int)dev_nr);
4219 bio->bi_end_io = btrfs_end_bio;
4220 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4221 dev = bbio->stripes[dev_nr].dev;
4222 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4224 struct rcu_string *name;
4227 name = rcu_dereference(dev->name);
4228 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4229 "(%s id %llu), size=%u\n", rw,
4230 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4231 name->str, dev->devid, bio->bi_size);
4234 bio->bi_bdev = dev->bdev;
4236 schedule_bio(root, dev, rw, bio);
4238 btrfsic_submit_bio(rw, bio);
4240 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4241 bio->bi_sector = logical >> 9;
4242 bio_endio(bio, -EIO);
4249 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4252 struct btrfs_device *device;
4253 struct btrfs_fs_devices *cur_devices;
4255 cur_devices = root->fs_info->fs_devices;
4256 while (cur_devices) {
4258 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4259 device = __find_device(&cur_devices->devices,
4264 cur_devices = cur_devices->seed;
4269 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4270 u64 devid, u8 *dev_uuid)
4272 struct btrfs_device *device;
4273 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4275 device = kzalloc(sizeof(*device), GFP_NOFS);
4278 list_add(&device->dev_list,
4279 &fs_devices->devices);
4280 device->dev_root = root->fs_info->dev_root;
4281 device->devid = devid;
4282 device->work.func = pending_bios_fn;
4283 device->fs_devices = fs_devices;
4284 device->missing = 1;
4285 fs_devices->num_devices++;
4286 fs_devices->missing_devices++;
4287 spin_lock_init(&device->io_lock);
4288 INIT_LIST_HEAD(&device->dev_alloc_list);
4289 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4293 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4294 struct extent_buffer *leaf,
4295 struct btrfs_chunk *chunk)
4297 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4298 struct map_lookup *map;
4299 struct extent_map *em;
4303 u8 uuid[BTRFS_UUID_SIZE];
4308 logical = key->offset;
4309 length = btrfs_chunk_length(leaf, chunk);
4311 read_lock(&map_tree->map_tree.lock);
4312 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4313 read_unlock(&map_tree->map_tree.lock);
4315 /* already mapped? */
4316 if (em && em->start <= logical && em->start + em->len > logical) {
4317 free_extent_map(em);
4320 free_extent_map(em);
4323 em = alloc_extent_map();
4326 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4327 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4329 free_extent_map(em);
4333 em->bdev = (struct block_device *)map;
4334 em->start = logical;
4336 em->block_start = 0;
4337 em->block_len = em->len;
4339 map->num_stripes = num_stripes;
4340 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4341 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4342 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4343 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4344 map->type = btrfs_chunk_type(leaf, chunk);
4345 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4346 for (i = 0; i < num_stripes; i++) {
4347 map->stripes[i].physical =
4348 btrfs_stripe_offset_nr(leaf, chunk, i);
4349 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4350 read_extent_buffer(leaf, uuid, (unsigned long)
4351 btrfs_stripe_dev_uuid_nr(chunk, i),
4353 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4355 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4357 free_extent_map(em);
4360 if (!map->stripes[i].dev) {
4361 map->stripes[i].dev =
4362 add_missing_dev(root, devid, uuid);
4363 if (!map->stripes[i].dev) {
4365 free_extent_map(em);
4369 map->stripes[i].dev->in_fs_metadata = 1;
4372 write_lock(&map_tree->map_tree.lock);
4373 ret = add_extent_mapping(&map_tree->map_tree, em);
4374 write_unlock(&map_tree->map_tree.lock);
4375 BUG_ON(ret); /* Tree corruption */
4376 free_extent_map(em);
4381 static void fill_device_from_item(struct extent_buffer *leaf,
4382 struct btrfs_dev_item *dev_item,
4383 struct btrfs_device *device)
4387 device->devid = btrfs_device_id(leaf, dev_item);
4388 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4389 device->total_bytes = device->disk_total_bytes;
4390 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4391 device->type = btrfs_device_type(leaf, dev_item);
4392 device->io_align = btrfs_device_io_align(leaf, dev_item);
4393 device->io_width = btrfs_device_io_width(leaf, dev_item);
4394 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4396 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4397 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4400 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4402 struct btrfs_fs_devices *fs_devices;
4405 BUG_ON(!mutex_is_locked(&uuid_mutex));
4407 fs_devices = root->fs_info->fs_devices->seed;
4408 while (fs_devices) {
4409 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4413 fs_devices = fs_devices->seed;
4416 fs_devices = find_fsid(fsid);
4422 fs_devices = clone_fs_devices(fs_devices);
4423 if (IS_ERR(fs_devices)) {
4424 ret = PTR_ERR(fs_devices);
4428 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4429 root->fs_info->bdev_holder);
4431 free_fs_devices(fs_devices);
4435 if (!fs_devices->seeding) {
4436 __btrfs_close_devices(fs_devices);
4437 free_fs_devices(fs_devices);
4442 fs_devices->seed = root->fs_info->fs_devices->seed;
4443 root->fs_info->fs_devices->seed = fs_devices;
4448 static int read_one_dev(struct btrfs_root *root,
4449 struct extent_buffer *leaf,
4450 struct btrfs_dev_item *dev_item)
4452 struct btrfs_device *device;
4455 u8 fs_uuid[BTRFS_UUID_SIZE];
4456 u8 dev_uuid[BTRFS_UUID_SIZE];
4458 devid = btrfs_device_id(leaf, dev_item);
4459 read_extent_buffer(leaf, dev_uuid,
4460 (unsigned long)btrfs_device_uuid(dev_item),
4462 read_extent_buffer(leaf, fs_uuid,
4463 (unsigned long)btrfs_device_fsid(dev_item),
4466 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4467 ret = open_seed_devices(root, fs_uuid);
4468 if (ret && !btrfs_test_opt(root, DEGRADED))
4472 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4473 if (!device || !device->bdev) {
4474 if (!btrfs_test_opt(root, DEGRADED))
4478 printk(KERN_WARNING "warning devid %llu missing\n",
4479 (unsigned long long)devid);
4480 device = add_missing_dev(root, devid, dev_uuid);
4483 } else if (!device->missing) {
4485 * this happens when a device that was properly setup
4486 * in the device info lists suddenly goes bad.
4487 * device->bdev is NULL, and so we have to set
4488 * device->missing to one here
4490 root->fs_info->fs_devices->missing_devices++;
4491 device->missing = 1;
4495 if (device->fs_devices != root->fs_info->fs_devices) {
4496 BUG_ON(device->writeable);
4497 if (device->generation !=
4498 btrfs_device_generation(leaf, dev_item))
4502 fill_device_from_item(leaf, dev_item, device);
4503 device->dev_root = root->fs_info->dev_root;
4504 device->in_fs_metadata = 1;
4505 if (device->writeable) {
4506 device->fs_devices->total_rw_bytes += device->total_bytes;
4507 spin_lock(&root->fs_info->free_chunk_lock);
4508 root->fs_info->free_chunk_space += device->total_bytes -
4510 spin_unlock(&root->fs_info->free_chunk_lock);
4516 int btrfs_read_sys_array(struct btrfs_root *root)
4518 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4519 struct extent_buffer *sb;
4520 struct btrfs_disk_key *disk_key;
4521 struct btrfs_chunk *chunk;
4523 unsigned long sb_ptr;
4529 struct btrfs_key key;
4531 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4532 BTRFS_SUPER_INFO_SIZE);
4535 btrfs_set_buffer_uptodate(sb);
4536 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4538 * The sb extent buffer is artifical and just used to read the system array.
4539 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4540 * pages up-to-date when the page is larger: extent does not cover the
4541 * whole page and consequently check_page_uptodate does not find all
4542 * the page's extents up-to-date (the hole beyond sb),
4543 * write_extent_buffer then triggers a WARN_ON.
4545 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4546 * but sb spans only this function. Add an explicit SetPageUptodate call
4547 * to silence the warning eg. on PowerPC 64.
4549 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4550 SetPageUptodate(sb->pages[0]);
4552 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4553 array_size = btrfs_super_sys_array_size(super_copy);
4555 ptr = super_copy->sys_chunk_array;
4556 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4559 while (cur < array_size) {
4560 disk_key = (struct btrfs_disk_key *)ptr;
4561 btrfs_disk_key_to_cpu(&key, disk_key);
4563 len = sizeof(*disk_key); ptr += len;
4567 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4568 chunk = (struct btrfs_chunk *)sb_ptr;
4569 ret = read_one_chunk(root, &key, sb, chunk);
4572 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4573 len = btrfs_chunk_item_size(num_stripes);
4582 free_extent_buffer(sb);
4586 struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
4587 u64 logical, int mirror_num)
4589 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4592 struct btrfs_bio *bbio = NULL;
4593 struct btrfs_device *device;
4595 BUG_ON(mirror_num == 0);
4596 ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
4599 BUG_ON(bbio != NULL);
4602 BUG_ON(mirror_num != bbio->mirror_num);
4603 device = bbio->stripes[mirror_num - 1].dev;
4608 int btrfs_read_chunk_tree(struct btrfs_root *root)
4610 struct btrfs_path *path;
4611 struct extent_buffer *leaf;
4612 struct btrfs_key key;
4613 struct btrfs_key found_key;
4617 root = root->fs_info->chunk_root;
4619 path = btrfs_alloc_path();
4623 mutex_lock(&uuid_mutex);
4626 /* first we search for all of the device items, and then we
4627 * read in all of the chunk items. This way we can create chunk
4628 * mappings that reference all of the devices that are afound
4630 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4634 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4638 leaf = path->nodes[0];
4639 slot = path->slots[0];
4640 if (slot >= btrfs_header_nritems(leaf)) {
4641 ret = btrfs_next_leaf(root, path);
4648 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4649 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4650 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4652 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4653 struct btrfs_dev_item *dev_item;
4654 dev_item = btrfs_item_ptr(leaf, slot,
4655 struct btrfs_dev_item);
4656 ret = read_one_dev(root, leaf, dev_item);
4660 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4661 struct btrfs_chunk *chunk;
4662 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4663 ret = read_one_chunk(root, &found_key, leaf, chunk);
4669 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4671 btrfs_release_path(path);
4676 unlock_chunks(root);
4677 mutex_unlock(&uuid_mutex);
4679 btrfs_free_path(path);
4683 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
4687 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4688 btrfs_dev_stat_reset(dev, i);
4691 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
4693 struct btrfs_key key;
4694 struct btrfs_key found_key;
4695 struct btrfs_root *dev_root = fs_info->dev_root;
4696 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4697 struct extent_buffer *eb;
4700 struct btrfs_device *device;
4701 struct btrfs_path *path = NULL;
4704 path = btrfs_alloc_path();
4710 mutex_lock(&fs_devices->device_list_mutex);
4711 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4713 struct btrfs_dev_stats_item *ptr;
4716 key.type = BTRFS_DEV_STATS_KEY;
4717 key.offset = device->devid;
4718 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
4720 printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
4721 rcu_str_deref(device->name),
4722 (unsigned long long)device->devid);
4723 __btrfs_reset_dev_stats(device);
4724 device->dev_stats_valid = 1;
4725 btrfs_release_path(path);
4728 slot = path->slots[0];
4729 eb = path->nodes[0];
4730 btrfs_item_key_to_cpu(eb, &found_key, slot);
4731 item_size = btrfs_item_size_nr(eb, slot);
4733 ptr = btrfs_item_ptr(eb, slot,
4734 struct btrfs_dev_stats_item);
4736 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4737 if (item_size >= (1 + i) * sizeof(__le64))
4738 btrfs_dev_stat_set(device, i,
4739 btrfs_dev_stats_value(eb, ptr, i));
4741 btrfs_dev_stat_reset(device, i);
4744 device->dev_stats_valid = 1;
4745 btrfs_dev_stat_print_on_load(device);
4746 btrfs_release_path(path);
4748 mutex_unlock(&fs_devices->device_list_mutex);
4751 btrfs_free_path(path);
4752 return ret < 0 ? ret : 0;
4755 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
4756 struct btrfs_root *dev_root,
4757 struct btrfs_device *device)
4759 struct btrfs_path *path;
4760 struct btrfs_key key;
4761 struct extent_buffer *eb;
4762 struct btrfs_dev_stats_item *ptr;
4767 key.type = BTRFS_DEV_STATS_KEY;
4768 key.offset = device->devid;
4770 path = btrfs_alloc_path();
4772 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
4774 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
4775 ret, rcu_str_deref(device->name));
4780 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
4781 /* need to delete old one and insert a new one */
4782 ret = btrfs_del_item(trans, dev_root, path);
4784 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
4785 rcu_str_deref(device->name), ret);
4792 /* need to insert a new item */
4793 btrfs_release_path(path);
4794 ret = btrfs_insert_empty_item(trans, dev_root, path,
4795 &key, sizeof(*ptr));
4797 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
4798 rcu_str_deref(device->name), ret);
4803 eb = path->nodes[0];
4804 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
4805 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4806 btrfs_set_dev_stats_value(eb, ptr, i,
4807 btrfs_dev_stat_read(device, i));
4808 btrfs_mark_buffer_dirty(eb);
4811 btrfs_free_path(path);
4816 * called from commit_transaction. Writes all changed device stats to disk.
4818 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
4819 struct btrfs_fs_info *fs_info)
4821 struct btrfs_root *dev_root = fs_info->dev_root;
4822 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4823 struct btrfs_device *device;
4826 mutex_lock(&fs_devices->device_list_mutex);
4827 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4828 if (!device->dev_stats_valid || !device->dev_stats_dirty)
4831 ret = update_dev_stat_item(trans, dev_root, device);
4833 device->dev_stats_dirty = 0;
4835 mutex_unlock(&fs_devices->device_list_mutex);
4840 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
4842 btrfs_dev_stat_inc(dev, index);
4843 btrfs_dev_stat_print_on_error(dev);
4846 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
4848 if (!dev->dev_stats_valid)
4850 printk_ratelimited_in_rcu(KERN_ERR
4851 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4852 rcu_str_deref(dev->name),
4853 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4854 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4855 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4856 btrfs_dev_stat_read(dev,
4857 BTRFS_DEV_STAT_CORRUPTION_ERRS),
4858 btrfs_dev_stat_read(dev,
4859 BTRFS_DEV_STAT_GENERATION_ERRS));
4862 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
4864 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4865 rcu_str_deref(dev->name),
4866 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4867 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4868 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4869 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
4870 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
4873 int btrfs_get_dev_stats(struct btrfs_root *root,
4874 struct btrfs_ioctl_get_dev_stats *stats,
4875 int reset_after_read)
4877 struct btrfs_device *dev;
4878 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4881 mutex_lock(&fs_devices->device_list_mutex);
4882 dev = btrfs_find_device(root, stats->devid, NULL, NULL);
4883 mutex_unlock(&fs_devices->device_list_mutex);
4887 "btrfs: get dev_stats failed, device not found\n");
4889 } else if (!dev->dev_stats_valid) {
4891 "btrfs: get dev_stats failed, not yet valid\n");
4893 } else if (reset_after_read) {
4894 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4895 if (stats->nr_items > i)
4897 btrfs_dev_stat_read_and_reset(dev, i);
4899 btrfs_dev_stat_reset(dev, i);
4902 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4903 if (stats->nr_items > i)
4904 stats->values[i] = btrfs_dev_stat_read(dev, i);
4906 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
4907 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;