2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <asm/div64.h>
32 #include "extent_map.h"
34 #include "transaction.h"
35 #include "print-tree.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
42 #include "dev-replace.h"
44 static int init_first_rw_device(struct btrfs_trans_handle *trans,
45 struct btrfs_root *root,
46 struct btrfs_device *device);
47 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
48 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
49 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
50 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
52 static DEFINE_MUTEX(uuid_mutex);
53 static LIST_HEAD(fs_uuids);
55 static void lock_chunks(struct btrfs_root *root)
57 mutex_lock(&root->fs_info->chunk_mutex);
60 static void unlock_chunks(struct btrfs_root *root)
62 mutex_unlock(&root->fs_info->chunk_mutex);
65 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
67 struct btrfs_device *device;
68 WARN_ON(fs_devices->opened);
69 while (!list_empty(&fs_devices->devices)) {
70 device = list_entry(fs_devices->devices.next,
71 struct btrfs_device, dev_list);
72 list_del(&device->dev_list);
73 rcu_string_free(device->name);
79 static void btrfs_kobject_uevent(struct block_device *bdev,
80 enum kobject_action action)
84 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
86 pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
88 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
89 &disk_to_dev(bdev->bd_disk)->kobj);
92 void btrfs_cleanup_fs_uuids(void)
94 struct btrfs_fs_devices *fs_devices;
96 while (!list_empty(&fs_uuids)) {
97 fs_devices = list_entry(fs_uuids.next,
98 struct btrfs_fs_devices, list);
99 list_del(&fs_devices->list);
100 free_fs_devices(fs_devices);
104 static noinline struct btrfs_device *__find_device(struct list_head *head,
107 struct btrfs_device *dev;
109 list_for_each_entry(dev, head, dev_list) {
110 if (dev->devid == devid &&
111 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
118 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
120 struct btrfs_fs_devices *fs_devices;
122 list_for_each_entry(fs_devices, &fs_uuids, list) {
123 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
130 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
131 int flush, struct block_device **bdev,
132 struct buffer_head **bh)
136 *bdev = blkdev_get_by_path(device_path, flags, holder);
139 ret = PTR_ERR(*bdev);
140 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
145 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
146 ret = set_blocksize(*bdev, 4096);
148 blkdev_put(*bdev, flags);
151 invalidate_bdev(*bdev);
152 *bh = btrfs_read_dev_super(*bdev);
155 blkdev_put(*bdev, flags);
167 static void requeue_list(struct btrfs_pending_bios *pending_bios,
168 struct bio *head, struct bio *tail)
171 struct bio *old_head;
173 old_head = pending_bios->head;
174 pending_bios->head = head;
175 if (pending_bios->tail)
176 tail->bi_next = old_head;
178 pending_bios->tail = tail;
182 * we try to collect pending bios for a device so we don't get a large
183 * number of procs sending bios down to the same device. This greatly
184 * improves the schedulers ability to collect and merge the bios.
186 * But, it also turns into a long list of bios to process and that is sure
187 * to eventually make the worker thread block. The solution here is to
188 * make some progress and then put this work struct back at the end of
189 * the list if the block device is congested. This way, multiple devices
190 * can make progress from a single worker thread.
192 static noinline void run_scheduled_bios(struct btrfs_device *device)
195 struct backing_dev_info *bdi;
196 struct btrfs_fs_info *fs_info;
197 struct btrfs_pending_bios *pending_bios;
201 unsigned long num_run;
202 unsigned long batch_run = 0;
204 unsigned long last_waited = 0;
206 int sync_pending = 0;
207 struct blk_plug plug;
210 * this function runs all the bios we've collected for
211 * a particular device. We don't want to wander off to
212 * another device without first sending all of these down.
213 * So, setup a plug here and finish it off before we return
215 blk_start_plug(&plug);
217 bdi = blk_get_backing_dev_info(device->bdev);
218 fs_info = device->dev_root->fs_info;
219 limit = btrfs_async_submit_limit(fs_info);
220 limit = limit * 2 / 3;
223 spin_lock(&device->io_lock);
228 /* take all the bios off the list at once and process them
229 * later on (without the lock held). But, remember the
230 * tail and other pointers so the bios can be properly reinserted
231 * into the list if we hit congestion
233 if (!force_reg && device->pending_sync_bios.head) {
234 pending_bios = &device->pending_sync_bios;
237 pending_bios = &device->pending_bios;
241 pending = pending_bios->head;
242 tail = pending_bios->tail;
243 WARN_ON(pending && !tail);
246 * if pending was null this time around, no bios need processing
247 * at all and we can stop. Otherwise it'll loop back up again
248 * and do an additional check so no bios are missed.
250 * device->running_pending is used to synchronize with the
253 if (device->pending_sync_bios.head == NULL &&
254 device->pending_bios.head == NULL) {
256 device->running_pending = 0;
259 device->running_pending = 1;
262 pending_bios->head = NULL;
263 pending_bios->tail = NULL;
265 spin_unlock(&device->io_lock);
270 /* we want to work on both lists, but do more bios on the
271 * sync list than the regular list
274 pending_bios != &device->pending_sync_bios &&
275 device->pending_sync_bios.head) ||
276 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
277 device->pending_bios.head)) {
278 spin_lock(&device->io_lock);
279 requeue_list(pending_bios, pending, tail);
284 pending = pending->bi_next;
287 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
288 waitqueue_active(&fs_info->async_submit_wait))
289 wake_up(&fs_info->async_submit_wait);
291 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
294 * if we're doing the sync list, record that our
295 * plug has some sync requests on it
297 * If we're doing the regular list and there are
298 * sync requests sitting around, unplug before
301 if (pending_bios == &device->pending_sync_bios) {
303 } else if (sync_pending) {
304 blk_finish_plug(&plug);
305 blk_start_plug(&plug);
309 btrfsic_submit_bio(cur->bi_rw, cur);
316 * we made progress, there is more work to do and the bdi
317 * is now congested. Back off and let other work structs
320 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
321 fs_info->fs_devices->open_devices > 1) {
322 struct io_context *ioc;
324 ioc = current->io_context;
327 * the main goal here is that we don't want to
328 * block if we're going to be able to submit
329 * more requests without blocking.
331 * This code does two great things, it pokes into
332 * the elevator code from a filesystem _and_
333 * it makes assumptions about how batching works.
335 if (ioc && ioc->nr_batch_requests > 0 &&
336 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
338 ioc->last_waited == last_waited)) {
340 * we want to go through our batch of
341 * requests and stop. So, we copy out
342 * the ioc->last_waited time and test
343 * against it before looping
345 last_waited = ioc->last_waited;
350 spin_lock(&device->io_lock);
351 requeue_list(pending_bios, pending, tail);
352 device->running_pending = 1;
354 spin_unlock(&device->io_lock);
355 btrfs_requeue_work(&device->work);
358 /* unplug every 64 requests just for good measure */
359 if (batch_run % 64 == 0) {
360 blk_finish_plug(&plug);
361 blk_start_plug(&plug);
370 spin_lock(&device->io_lock);
371 if (device->pending_bios.head || device->pending_sync_bios.head)
373 spin_unlock(&device->io_lock);
376 blk_finish_plug(&plug);
379 static void pending_bios_fn(struct btrfs_work *work)
381 struct btrfs_device *device;
383 device = container_of(work, struct btrfs_device, work);
384 run_scheduled_bios(device);
387 static noinline int device_list_add(const char *path,
388 struct btrfs_super_block *disk_super,
389 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
391 struct btrfs_device *device;
392 struct btrfs_fs_devices *fs_devices;
393 struct rcu_string *name;
394 u64 found_transid = btrfs_super_generation(disk_super);
396 fs_devices = find_fsid(disk_super->fsid);
398 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
401 INIT_LIST_HEAD(&fs_devices->devices);
402 INIT_LIST_HEAD(&fs_devices->alloc_list);
403 list_add(&fs_devices->list, &fs_uuids);
404 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
405 fs_devices->latest_devid = devid;
406 fs_devices->latest_trans = found_transid;
407 mutex_init(&fs_devices->device_list_mutex);
410 device = __find_device(&fs_devices->devices, devid,
411 disk_super->dev_item.uuid);
414 if (fs_devices->opened)
417 device = kzalloc(sizeof(*device), GFP_NOFS);
419 /* we can safely leave the fs_devices entry around */
422 device->devid = devid;
423 device->dev_stats_valid = 0;
424 device->work.func = pending_bios_fn;
425 memcpy(device->uuid, disk_super->dev_item.uuid,
427 spin_lock_init(&device->io_lock);
429 name = rcu_string_strdup(path, GFP_NOFS);
434 rcu_assign_pointer(device->name, name);
435 INIT_LIST_HEAD(&device->dev_alloc_list);
437 /* init readahead state */
438 spin_lock_init(&device->reada_lock);
439 device->reada_curr_zone = NULL;
440 atomic_set(&device->reada_in_flight, 0);
441 device->reada_next = 0;
442 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
443 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
445 mutex_lock(&fs_devices->device_list_mutex);
446 list_add_rcu(&device->dev_list, &fs_devices->devices);
447 mutex_unlock(&fs_devices->device_list_mutex);
449 device->fs_devices = fs_devices;
450 fs_devices->num_devices++;
451 } else if (!device->name || strcmp(device->name->str, path)) {
452 name = rcu_string_strdup(path, GFP_NOFS);
455 rcu_string_free(device->name);
456 rcu_assign_pointer(device->name, name);
457 if (device->missing) {
458 fs_devices->missing_devices--;
463 if (found_transid > fs_devices->latest_trans) {
464 fs_devices->latest_devid = devid;
465 fs_devices->latest_trans = found_transid;
467 *fs_devices_ret = fs_devices;
471 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
473 struct btrfs_fs_devices *fs_devices;
474 struct btrfs_device *device;
475 struct btrfs_device *orig_dev;
477 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
479 return ERR_PTR(-ENOMEM);
481 INIT_LIST_HEAD(&fs_devices->devices);
482 INIT_LIST_HEAD(&fs_devices->alloc_list);
483 INIT_LIST_HEAD(&fs_devices->list);
484 mutex_init(&fs_devices->device_list_mutex);
485 fs_devices->latest_devid = orig->latest_devid;
486 fs_devices->latest_trans = orig->latest_trans;
487 fs_devices->total_devices = orig->total_devices;
488 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
490 /* We have held the volume lock, it is safe to get the devices. */
491 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
492 struct rcu_string *name;
494 device = kzalloc(sizeof(*device), GFP_NOFS);
499 * This is ok to do without rcu read locked because we hold the
500 * uuid mutex so nothing we touch in here is going to disappear.
502 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
507 rcu_assign_pointer(device->name, name);
509 device->devid = orig_dev->devid;
510 device->work.func = pending_bios_fn;
511 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
512 spin_lock_init(&device->io_lock);
513 INIT_LIST_HEAD(&device->dev_list);
514 INIT_LIST_HEAD(&device->dev_alloc_list);
516 list_add(&device->dev_list, &fs_devices->devices);
517 device->fs_devices = fs_devices;
518 fs_devices->num_devices++;
522 free_fs_devices(fs_devices);
523 return ERR_PTR(-ENOMEM);
526 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
527 struct btrfs_fs_devices *fs_devices, int step)
529 struct btrfs_device *device, *next;
531 struct block_device *latest_bdev = NULL;
532 u64 latest_devid = 0;
533 u64 latest_transid = 0;
535 mutex_lock(&uuid_mutex);
537 /* This is the initialized path, it is safe to release the devices. */
538 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
539 if (device->in_fs_metadata) {
540 if (!device->is_tgtdev_for_dev_replace &&
542 device->generation > latest_transid)) {
543 latest_devid = device->devid;
544 latest_transid = device->generation;
545 latest_bdev = device->bdev;
550 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
552 * In the first step, keep the device which has
553 * the correct fsid and the devid that is used
554 * for the dev_replace procedure.
555 * In the second step, the dev_replace state is
556 * read from the device tree and it is known
557 * whether the procedure is really active or
558 * not, which means whether this device is
559 * used or whether it should be removed.
561 if (step == 0 || device->is_tgtdev_for_dev_replace) {
566 blkdev_put(device->bdev, device->mode);
568 fs_devices->open_devices--;
570 if (device->writeable) {
571 list_del_init(&device->dev_alloc_list);
572 device->writeable = 0;
573 if (!device->is_tgtdev_for_dev_replace)
574 fs_devices->rw_devices--;
576 list_del_init(&device->dev_list);
577 fs_devices->num_devices--;
578 rcu_string_free(device->name);
582 if (fs_devices->seed) {
583 fs_devices = fs_devices->seed;
587 fs_devices->latest_bdev = latest_bdev;
588 fs_devices->latest_devid = latest_devid;
589 fs_devices->latest_trans = latest_transid;
591 mutex_unlock(&uuid_mutex);
594 static void __free_device(struct work_struct *work)
596 struct btrfs_device *device;
598 device = container_of(work, struct btrfs_device, rcu_work);
601 blkdev_put(device->bdev, device->mode);
603 rcu_string_free(device->name);
607 static void free_device(struct rcu_head *head)
609 struct btrfs_device *device;
611 device = container_of(head, struct btrfs_device, rcu);
613 INIT_WORK(&device->rcu_work, __free_device);
614 schedule_work(&device->rcu_work);
617 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
619 struct btrfs_device *device;
621 if (--fs_devices->opened > 0)
624 mutex_lock(&fs_devices->device_list_mutex);
625 list_for_each_entry(device, &fs_devices->devices, dev_list) {
626 struct btrfs_device *new_device;
627 struct rcu_string *name;
630 fs_devices->open_devices--;
632 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
633 list_del_init(&device->dev_alloc_list);
634 fs_devices->rw_devices--;
637 if (device->can_discard)
638 fs_devices->num_can_discard--;
640 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
641 BUG_ON(!new_device); /* -ENOMEM */
642 memcpy(new_device, device, sizeof(*new_device));
644 /* Safe because we are under uuid_mutex */
646 name = rcu_string_strdup(device->name->str, GFP_NOFS);
647 BUG_ON(device->name && !name); /* -ENOMEM */
648 rcu_assign_pointer(new_device->name, name);
650 new_device->bdev = NULL;
651 new_device->writeable = 0;
652 new_device->in_fs_metadata = 0;
653 new_device->can_discard = 0;
654 spin_lock_init(&new_device->io_lock);
655 list_replace_rcu(&device->dev_list, &new_device->dev_list);
657 call_rcu(&device->rcu, free_device);
659 mutex_unlock(&fs_devices->device_list_mutex);
661 WARN_ON(fs_devices->open_devices);
662 WARN_ON(fs_devices->rw_devices);
663 fs_devices->opened = 0;
664 fs_devices->seeding = 0;
669 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
671 struct btrfs_fs_devices *seed_devices = NULL;
674 mutex_lock(&uuid_mutex);
675 ret = __btrfs_close_devices(fs_devices);
676 if (!fs_devices->opened) {
677 seed_devices = fs_devices->seed;
678 fs_devices->seed = NULL;
680 mutex_unlock(&uuid_mutex);
682 while (seed_devices) {
683 fs_devices = seed_devices;
684 seed_devices = fs_devices->seed;
685 __btrfs_close_devices(fs_devices);
686 free_fs_devices(fs_devices);
689 * Wait for rcu kworkers under __btrfs_close_devices
690 * to finish all blkdev_puts so device is really
691 * free when umount is done.
697 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
698 fmode_t flags, void *holder)
700 struct request_queue *q;
701 struct block_device *bdev;
702 struct list_head *head = &fs_devices->devices;
703 struct btrfs_device *device;
704 struct block_device *latest_bdev = NULL;
705 struct buffer_head *bh;
706 struct btrfs_super_block *disk_super;
707 u64 latest_devid = 0;
708 u64 latest_transid = 0;
715 list_for_each_entry(device, head, dev_list) {
721 /* Just open everything we can; ignore failures here */
722 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
726 disk_super = (struct btrfs_super_block *)bh->b_data;
727 devid = btrfs_stack_device_id(&disk_super->dev_item);
728 if (devid != device->devid)
731 if (memcmp(device->uuid, disk_super->dev_item.uuid,
735 device->generation = btrfs_super_generation(disk_super);
736 if (!latest_transid || device->generation > latest_transid) {
737 latest_devid = devid;
738 latest_transid = device->generation;
742 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
743 device->writeable = 0;
745 device->writeable = !bdev_read_only(bdev);
749 q = bdev_get_queue(bdev);
750 if (blk_queue_discard(q)) {
751 device->can_discard = 1;
752 fs_devices->num_can_discard++;
756 device->in_fs_metadata = 0;
757 device->mode = flags;
759 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
760 fs_devices->rotating = 1;
762 fs_devices->open_devices++;
763 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
764 fs_devices->rw_devices++;
765 list_add(&device->dev_alloc_list,
766 &fs_devices->alloc_list);
773 blkdev_put(bdev, flags);
776 if (fs_devices->open_devices == 0) {
780 fs_devices->seeding = seeding;
781 fs_devices->opened = 1;
782 fs_devices->latest_bdev = latest_bdev;
783 fs_devices->latest_devid = latest_devid;
784 fs_devices->latest_trans = latest_transid;
785 fs_devices->total_rw_bytes = 0;
790 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
791 fmode_t flags, void *holder)
795 mutex_lock(&uuid_mutex);
796 if (fs_devices->opened) {
797 fs_devices->opened++;
800 ret = __btrfs_open_devices(fs_devices, flags, holder);
802 mutex_unlock(&uuid_mutex);
807 * Look for a btrfs signature on a device. This may be called out of the mount path
808 * and we are not allowed to call set_blocksize during the scan. The superblock
809 * is read via pagecache
811 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
812 struct btrfs_fs_devices **fs_devices_ret)
814 struct btrfs_super_block *disk_super;
815 struct block_device *bdev;
826 * we would like to check all the supers, but that would make
827 * a btrfs mount succeed after a mkfs from a different FS.
828 * So, we need to add a special mount option to scan for
829 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
831 bytenr = btrfs_sb_offset(0);
833 mutex_lock(&uuid_mutex);
835 bdev = blkdev_get_by_path(path, flags, holder);
842 /* make sure our super fits in the device */
843 if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
846 /* make sure our super fits in the page */
847 if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
850 /* make sure our super doesn't straddle pages on disk */
851 index = bytenr >> PAGE_CACHE_SHIFT;
852 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
855 /* pull in the page with our super */
856 page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
859 if (IS_ERR_OR_NULL(page))
864 /* align our pointer to the offset of the super block */
865 disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
867 if (btrfs_super_bytenr(disk_super) != bytenr ||
868 disk_super->magic != cpu_to_le64(BTRFS_MAGIC))
871 devid = btrfs_stack_device_id(&disk_super->dev_item);
872 transid = btrfs_super_generation(disk_super);
873 total_devices = btrfs_super_num_devices(disk_super);
875 if (disk_super->label[0]) {
876 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
877 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
878 printk(KERN_INFO "device label %s ", disk_super->label);
880 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
883 printk(KERN_CONT "devid %llu transid %llu %s\n",
884 (unsigned long long)devid, (unsigned long long)transid, path);
886 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
887 if (!ret && fs_devices_ret)
888 (*fs_devices_ret)->total_devices = total_devices;
892 page_cache_release(page);
895 blkdev_put(bdev, flags);
897 mutex_unlock(&uuid_mutex);
901 /* helper to account the used device space in the range */
902 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
903 u64 end, u64 *length)
905 struct btrfs_key key;
906 struct btrfs_root *root = device->dev_root;
907 struct btrfs_dev_extent *dev_extent;
908 struct btrfs_path *path;
912 struct extent_buffer *l;
916 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
919 path = btrfs_alloc_path();
924 key.objectid = device->devid;
926 key.type = BTRFS_DEV_EXTENT_KEY;
928 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
932 ret = btrfs_previous_item(root, path, key.objectid, key.type);
939 slot = path->slots[0];
940 if (slot >= btrfs_header_nritems(l)) {
941 ret = btrfs_next_leaf(root, path);
949 btrfs_item_key_to_cpu(l, &key, slot);
951 if (key.objectid < device->devid)
954 if (key.objectid > device->devid)
957 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
960 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
961 extent_end = key.offset + btrfs_dev_extent_length(l,
963 if (key.offset <= start && extent_end > end) {
964 *length = end - start + 1;
966 } else if (key.offset <= start && extent_end > start)
967 *length += extent_end - start;
968 else if (key.offset > start && extent_end <= end)
969 *length += extent_end - key.offset;
970 else if (key.offset > start && key.offset <= end) {
971 *length += end - key.offset + 1;
973 } else if (key.offset > end)
981 btrfs_free_path(path);
986 * find_free_dev_extent - find free space in the specified device
987 * @device: the device which we search the free space in
988 * @num_bytes: the size of the free space that we need
989 * @start: store the start of the free space.
990 * @len: the size of the free space. that we find, or the size of the max
991 * free space if we don't find suitable free space
993 * this uses a pretty simple search, the expectation is that it is
994 * called very infrequently and that a given device has a small number
997 * @start is used to store the start of the free space if we find. But if we
998 * don't find suitable free space, it will be used to store the start position
999 * of the max free space.
1001 * @len is used to store the size of the free space that we find.
1002 * But if we don't find suitable free space, it is used to store the size of
1003 * the max free space.
1005 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1006 u64 *start, u64 *len)
1008 struct btrfs_key key;
1009 struct btrfs_root *root = device->dev_root;
1010 struct btrfs_dev_extent *dev_extent;
1011 struct btrfs_path *path;
1017 u64 search_end = device->total_bytes;
1020 struct extent_buffer *l;
1022 /* FIXME use last free of some kind */
1024 /* we don't want to overwrite the superblock on the drive,
1025 * so we make sure to start at an offset of at least 1MB
1027 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1029 max_hole_start = search_start;
1033 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1038 path = btrfs_alloc_path();
1045 key.objectid = device->devid;
1046 key.offset = search_start;
1047 key.type = BTRFS_DEV_EXTENT_KEY;
1049 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1053 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1060 slot = path->slots[0];
1061 if (slot >= btrfs_header_nritems(l)) {
1062 ret = btrfs_next_leaf(root, path);
1070 btrfs_item_key_to_cpu(l, &key, slot);
1072 if (key.objectid < device->devid)
1075 if (key.objectid > device->devid)
1078 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1081 if (key.offset > search_start) {
1082 hole_size = key.offset - search_start;
1084 if (hole_size > max_hole_size) {
1085 max_hole_start = search_start;
1086 max_hole_size = hole_size;
1090 * If this free space is greater than which we need,
1091 * it must be the max free space that we have found
1092 * until now, so max_hole_start must point to the start
1093 * of this free space and the length of this free space
1094 * is stored in max_hole_size. Thus, we return
1095 * max_hole_start and max_hole_size and go back to the
1098 if (hole_size >= num_bytes) {
1104 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1105 extent_end = key.offset + btrfs_dev_extent_length(l,
1107 if (extent_end > search_start)
1108 search_start = extent_end;
1115 * At this point, search_start should be the end of
1116 * allocated dev extents, and when shrinking the device,
1117 * search_end may be smaller than search_start.
1119 if (search_end > search_start)
1120 hole_size = search_end - search_start;
1122 if (hole_size > max_hole_size) {
1123 max_hole_start = search_start;
1124 max_hole_size = hole_size;
1128 if (hole_size < num_bytes)
1134 btrfs_free_path(path);
1136 *start = max_hole_start;
1138 *len = max_hole_size;
1142 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1143 struct btrfs_device *device,
1147 struct btrfs_path *path;
1148 struct btrfs_root *root = device->dev_root;
1149 struct btrfs_key key;
1150 struct btrfs_key found_key;
1151 struct extent_buffer *leaf = NULL;
1152 struct btrfs_dev_extent *extent = NULL;
1154 path = btrfs_alloc_path();
1158 key.objectid = device->devid;
1160 key.type = BTRFS_DEV_EXTENT_KEY;
1162 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1164 ret = btrfs_previous_item(root, path, key.objectid,
1165 BTRFS_DEV_EXTENT_KEY);
1168 leaf = path->nodes[0];
1169 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1170 extent = btrfs_item_ptr(leaf, path->slots[0],
1171 struct btrfs_dev_extent);
1172 BUG_ON(found_key.offset > start || found_key.offset +
1173 btrfs_dev_extent_length(leaf, extent) < start);
1175 btrfs_release_path(path);
1177 } else if (ret == 0) {
1178 leaf = path->nodes[0];
1179 extent = btrfs_item_ptr(leaf, path->slots[0],
1180 struct btrfs_dev_extent);
1182 btrfs_error(root->fs_info, ret, "Slot search failed");
1186 if (device->bytes_used > 0) {
1187 u64 len = btrfs_dev_extent_length(leaf, extent);
1188 device->bytes_used -= len;
1189 spin_lock(&root->fs_info->free_chunk_lock);
1190 root->fs_info->free_chunk_space += len;
1191 spin_unlock(&root->fs_info->free_chunk_lock);
1193 ret = btrfs_del_item(trans, root, path);
1195 btrfs_error(root->fs_info, ret,
1196 "Failed to remove dev extent item");
1199 btrfs_free_path(path);
1203 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1204 struct btrfs_device *device,
1205 u64 chunk_tree, u64 chunk_objectid,
1206 u64 chunk_offset, u64 start, u64 num_bytes)
1209 struct btrfs_path *path;
1210 struct btrfs_root *root = device->dev_root;
1211 struct btrfs_dev_extent *extent;
1212 struct extent_buffer *leaf;
1213 struct btrfs_key key;
1215 WARN_ON(!device->in_fs_metadata);
1216 WARN_ON(device->is_tgtdev_for_dev_replace);
1217 path = btrfs_alloc_path();
1221 key.objectid = device->devid;
1223 key.type = BTRFS_DEV_EXTENT_KEY;
1224 ret = btrfs_insert_empty_item(trans, root, path, &key,
1229 leaf = path->nodes[0];
1230 extent = btrfs_item_ptr(leaf, path->slots[0],
1231 struct btrfs_dev_extent);
1232 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1233 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1234 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1236 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1237 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1240 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1241 btrfs_mark_buffer_dirty(leaf);
1243 btrfs_free_path(path);
1247 static noinline int find_next_chunk(struct btrfs_root *root,
1248 u64 objectid, u64 *offset)
1250 struct btrfs_path *path;
1252 struct btrfs_key key;
1253 struct btrfs_chunk *chunk;
1254 struct btrfs_key found_key;
1256 path = btrfs_alloc_path();
1260 key.objectid = objectid;
1261 key.offset = (u64)-1;
1262 key.type = BTRFS_CHUNK_ITEM_KEY;
1264 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1268 BUG_ON(ret == 0); /* Corruption */
1270 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1274 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1276 if (found_key.objectid != objectid)
1279 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1280 struct btrfs_chunk);
1281 *offset = found_key.offset +
1282 btrfs_chunk_length(path->nodes[0], chunk);
1287 btrfs_free_path(path);
1291 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1294 struct btrfs_key key;
1295 struct btrfs_key found_key;
1296 struct btrfs_path *path;
1298 root = root->fs_info->chunk_root;
1300 path = btrfs_alloc_path();
1304 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1305 key.type = BTRFS_DEV_ITEM_KEY;
1306 key.offset = (u64)-1;
1308 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1312 BUG_ON(ret == 0); /* Corruption */
1314 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1315 BTRFS_DEV_ITEM_KEY);
1319 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1321 *objectid = found_key.offset + 1;
1325 btrfs_free_path(path);
1330 * the device information is stored in the chunk root
1331 * the btrfs_device struct should be fully filled in
1333 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1334 struct btrfs_root *root,
1335 struct btrfs_device *device)
1338 struct btrfs_path *path;
1339 struct btrfs_dev_item *dev_item;
1340 struct extent_buffer *leaf;
1341 struct btrfs_key key;
1344 root = root->fs_info->chunk_root;
1346 path = btrfs_alloc_path();
1350 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1351 key.type = BTRFS_DEV_ITEM_KEY;
1352 key.offset = device->devid;
1354 ret = btrfs_insert_empty_item(trans, root, path, &key,
1359 leaf = path->nodes[0];
1360 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1362 btrfs_set_device_id(leaf, dev_item, device->devid);
1363 btrfs_set_device_generation(leaf, dev_item, 0);
1364 btrfs_set_device_type(leaf, dev_item, device->type);
1365 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1366 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1367 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1368 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1369 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1370 btrfs_set_device_group(leaf, dev_item, 0);
1371 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1372 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1373 btrfs_set_device_start_offset(leaf, dev_item, 0);
1375 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1376 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1377 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1378 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1379 btrfs_mark_buffer_dirty(leaf);
1383 btrfs_free_path(path);
1388 * Function to update ctime/mtime for a given device path.
1389 * Mainly used for ctime/mtime based probe like libblkid.
1391 static void update_dev_time(char *path_name)
1395 filp = filp_open(path_name, O_RDWR, 0);
1398 file_update_time(filp);
1399 filp_close(filp, NULL);
1403 static int btrfs_rm_dev_item(struct btrfs_root *root,
1404 struct btrfs_device *device)
1407 struct btrfs_path *path;
1408 struct btrfs_key key;
1409 struct btrfs_trans_handle *trans;
1411 root = root->fs_info->chunk_root;
1413 path = btrfs_alloc_path();
1417 trans = btrfs_start_transaction(root, 0);
1418 if (IS_ERR(trans)) {
1419 btrfs_free_path(path);
1420 return PTR_ERR(trans);
1422 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1423 key.type = BTRFS_DEV_ITEM_KEY;
1424 key.offset = device->devid;
1427 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1436 ret = btrfs_del_item(trans, root, path);
1440 btrfs_free_path(path);
1441 unlock_chunks(root);
1442 btrfs_commit_transaction(trans, root);
1446 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1448 struct btrfs_device *device;
1449 struct btrfs_device *next_device;
1450 struct block_device *bdev;
1451 struct buffer_head *bh = NULL;
1452 struct btrfs_super_block *disk_super;
1453 struct btrfs_fs_devices *cur_devices;
1460 bool clear_super = false;
1462 mutex_lock(&uuid_mutex);
1465 seq = read_seqbegin(&root->fs_info->profiles_lock);
1467 all_avail = root->fs_info->avail_data_alloc_bits |
1468 root->fs_info->avail_system_alloc_bits |
1469 root->fs_info->avail_metadata_alloc_bits;
1470 } while (read_seqretry(&root->fs_info->profiles_lock, seq));
1472 num_devices = root->fs_info->fs_devices->num_devices;
1473 btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1474 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1475 WARN_ON(num_devices < 1);
1478 btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1480 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1481 printk(KERN_ERR "btrfs: unable to go below four devices "
1487 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1488 printk(KERN_ERR "btrfs: unable to go below two "
1489 "devices on raid1\n");
1494 if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1495 root->fs_info->fs_devices->rw_devices <= 2) {
1496 printk(KERN_ERR "btrfs: unable to go below two "
1497 "devices on raid5\n");
1501 if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1502 root->fs_info->fs_devices->rw_devices <= 3) {
1503 printk(KERN_ERR "btrfs: unable to go below three "
1504 "devices on raid6\n");
1509 if (strcmp(device_path, "missing") == 0) {
1510 struct list_head *devices;
1511 struct btrfs_device *tmp;
1514 devices = &root->fs_info->fs_devices->devices;
1516 * It is safe to read the devices since the volume_mutex
1519 list_for_each_entry(tmp, devices, dev_list) {
1520 if (tmp->in_fs_metadata &&
1521 !tmp->is_tgtdev_for_dev_replace &&
1531 printk(KERN_ERR "btrfs: no missing devices found to "
1536 ret = btrfs_get_bdev_and_sb(device_path,
1537 FMODE_WRITE | FMODE_EXCL,
1538 root->fs_info->bdev_holder, 0,
1542 disk_super = (struct btrfs_super_block *)bh->b_data;
1543 devid = btrfs_stack_device_id(&disk_super->dev_item);
1544 dev_uuid = disk_super->dev_item.uuid;
1545 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1553 if (device->is_tgtdev_for_dev_replace) {
1554 pr_err("btrfs: unable to remove the dev_replace target dev\n");
1559 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1560 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1566 if (device->writeable) {
1568 list_del_init(&device->dev_alloc_list);
1569 unlock_chunks(root);
1570 root->fs_info->fs_devices->rw_devices--;
1574 ret = btrfs_shrink_device(device, 0);
1579 * TODO: the superblock still includes this device in its num_devices
1580 * counter although write_all_supers() is not locked out. This
1581 * could give a filesystem state which requires a degraded mount.
1583 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1587 spin_lock(&root->fs_info->free_chunk_lock);
1588 root->fs_info->free_chunk_space = device->total_bytes -
1590 spin_unlock(&root->fs_info->free_chunk_lock);
1592 device->in_fs_metadata = 0;
1593 btrfs_scrub_cancel_dev(root->fs_info, device);
1596 * the device list mutex makes sure that we don't change
1597 * the device list while someone else is writing out all
1598 * the device supers.
1601 cur_devices = device->fs_devices;
1602 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1603 list_del_rcu(&device->dev_list);
1605 device->fs_devices->num_devices--;
1606 device->fs_devices->total_devices--;
1608 if (device->missing)
1609 root->fs_info->fs_devices->missing_devices--;
1611 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1612 struct btrfs_device, dev_list);
1613 if (device->bdev == root->fs_info->sb->s_bdev)
1614 root->fs_info->sb->s_bdev = next_device->bdev;
1615 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1616 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1619 device->fs_devices->open_devices--;
1621 call_rcu(&device->rcu, free_device);
1622 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1624 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1625 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1627 if (cur_devices->open_devices == 0) {
1628 struct btrfs_fs_devices *fs_devices;
1629 fs_devices = root->fs_info->fs_devices;
1630 while (fs_devices) {
1631 if (fs_devices->seed == cur_devices) {
1632 fs_devices->seed = cur_devices->seed;
1635 fs_devices = fs_devices->seed;
1637 cur_devices->seed = NULL;
1639 __btrfs_close_devices(cur_devices);
1640 unlock_chunks(root);
1641 free_fs_devices(cur_devices);
1644 root->fs_info->num_tolerated_disk_barrier_failures =
1645 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1648 * at this point, the device is zero sized. We want to
1649 * remove it from the devices list and zero out the old super
1651 if (clear_super && disk_super) {
1652 /* make sure this device isn't detected as part of
1655 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1656 set_buffer_dirty(bh);
1657 sync_dirty_buffer(bh);
1663 /* Notify udev that device has changed */
1664 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1666 /* Update ctime/mtime for device path for libblkid */
1667 update_dev_time(device_path);
1673 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1675 mutex_unlock(&uuid_mutex);
1678 if (device->writeable) {
1680 list_add(&device->dev_alloc_list,
1681 &root->fs_info->fs_devices->alloc_list);
1682 unlock_chunks(root);
1683 root->fs_info->fs_devices->rw_devices++;
1688 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1689 struct btrfs_device *srcdev)
1691 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1692 list_del_rcu(&srcdev->dev_list);
1693 list_del_rcu(&srcdev->dev_alloc_list);
1694 fs_info->fs_devices->num_devices--;
1695 if (srcdev->missing) {
1696 fs_info->fs_devices->missing_devices--;
1697 fs_info->fs_devices->rw_devices++;
1699 if (srcdev->can_discard)
1700 fs_info->fs_devices->num_can_discard--;
1702 fs_info->fs_devices->open_devices--;
1704 call_rcu(&srcdev->rcu, free_device);
1707 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1708 struct btrfs_device *tgtdev)
1710 struct btrfs_device *next_device;
1713 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1715 btrfs_scratch_superblock(tgtdev);
1716 fs_info->fs_devices->open_devices--;
1718 fs_info->fs_devices->num_devices--;
1719 if (tgtdev->can_discard)
1720 fs_info->fs_devices->num_can_discard++;
1722 next_device = list_entry(fs_info->fs_devices->devices.next,
1723 struct btrfs_device, dev_list);
1724 if (tgtdev->bdev == fs_info->sb->s_bdev)
1725 fs_info->sb->s_bdev = next_device->bdev;
1726 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1727 fs_info->fs_devices->latest_bdev = next_device->bdev;
1728 list_del_rcu(&tgtdev->dev_list);
1730 call_rcu(&tgtdev->rcu, free_device);
1732 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1735 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1736 struct btrfs_device **device)
1739 struct btrfs_super_block *disk_super;
1742 struct block_device *bdev;
1743 struct buffer_head *bh;
1746 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1747 root->fs_info->bdev_holder, 0, &bdev, &bh);
1750 disk_super = (struct btrfs_super_block *)bh->b_data;
1751 devid = btrfs_stack_device_id(&disk_super->dev_item);
1752 dev_uuid = disk_super->dev_item.uuid;
1753 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1758 blkdev_put(bdev, FMODE_READ);
1762 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1764 struct btrfs_device **device)
1767 if (strcmp(device_path, "missing") == 0) {
1768 struct list_head *devices;
1769 struct btrfs_device *tmp;
1771 devices = &root->fs_info->fs_devices->devices;
1773 * It is safe to read the devices since the volume_mutex
1774 * is held by the caller.
1776 list_for_each_entry(tmp, devices, dev_list) {
1777 if (tmp->in_fs_metadata && !tmp->bdev) {
1784 pr_err("btrfs: no missing device found\n");
1790 return btrfs_find_device_by_path(root, device_path, device);
1795 * does all the dirty work required for changing file system's UUID.
1797 static int btrfs_prepare_sprout(struct btrfs_root *root)
1799 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1800 struct btrfs_fs_devices *old_devices;
1801 struct btrfs_fs_devices *seed_devices;
1802 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1803 struct btrfs_device *device;
1806 BUG_ON(!mutex_is_locked(&uuid_mutex));
1807 if (!fs_devices->seeding)
1810 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1814 old_devices = clone_fs_devices(fs_devices);
1815 if (IS_ERR(old_devices)) {
1816 kfree(seed_devices);
1817 return PTR_ERR(old_devices);
1820 list_add(&old_devices->list, &fs_uuids);
1822 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1823 seed_devices->opened = 1;
1824 INIT_LIST_HEAD(&seed_devices->devices);
1825 INIT_LIST_HEAD(&seed_devices->alloc_list);
1826 mutex_init(&seed_devices->device_list_mutex);
1828 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1829 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1831 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1833 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1834 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1835 device->fs_devices = seed_devices;
1838 fs_devices->seeding = 0;
1839 fs_devices->num_devices = 0;
1840 fs_devices->open_devices = 0;
1841 fs_devices->seed = seed_devices;
1843 generate_random_uuid(fs_devices->fsid);
1844 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1845 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1846 super_flags = btrfs_super_flags(disk_super) &
1847 ~BTRFS_SUPER_FLAG_SEEDING;
1848 btrfs_set_super_flags(disk_super, super_flags);
1854 * strore the expected generation for seed devices in device items.
1856 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1857 struct btrfs_root *root)
1859 struct btrfs_path *path;
1860 struct extent_buffer *leaf;
1861 struct btrfs_dev_item *dev_item;
1862 struct btrfs_device *device;
1863 struct btrfs_key key;
1864 u8 fs_uuid[BTRFS_UUID_SIZE];
1865 u8 dev_uuid[BTRFS_UUID_SIZE];
1869 path = btrfs_alloc_path();
1873 root = root->fs_info->chunk_root;
1874 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1876 key.type = BTRFS_DEV_ITEM_KEY;
1879 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1883 leaf = path->nodes[0];
1885 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1886 ret = btrfs_next_leaf(root, path);
1891 leaf = path->nodes[0];
1892 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1893 btrfs_release_path(path);
1897 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1898 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1899 key.type != BTRFS_DEV_ITEM_KEY)
1902 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1903 struct btrfs_dev_item);
1904 devid = btrfs_device_id(leaf, dev_item);
1905 read_extent_buffer(leaf, dev_uuid,
1906 (unsigned long)btrfs_device_uuid(dev_item),
1908 read_extent_buffer(leaf, fs_uuid,
1909 (unsigned long)btrfs_device_fsid(dev_item),
1911 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1913 BUG_ON(!device); /* Logic error */
1915 if (device->fs_devices->seeding) {
1916 btrfs_set_device_generation(leaf, dev_item,
1917 device->generation);
1918 btrfs_mark_buffer_dirty(leaf);
1926 btrfs_free_path(path);
1930 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1932 struct request_queue *q;
1933 struct btrfs_trans_handle *trans;
1934 struct btrfs_device *device;
1935 struct block_device *bdev;
1936 struct list_head *devices;
1937 struct super_block *sb = root->fs_info->sb;
1938 struct rcu_string *name;
1940 int seeding_dev = 0;
1943 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1946 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1947 root->fs_info->bdev_holder);
1949 return PTR_ERR(bdev);
1951 if (root->fs_info->fs_devices->seeding) {
1953 down_write(&sb->s_umount);
1954 mutex_lock(&uuid_mutex);
1957 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1959 devices = &root->fs_info->fs_devices->devices;
1961 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1962 list_for_each_entry(device, devices, dev_list) {
1963 if (device->bdev == bdev) {
1966 &root->fs_info->fs_devices->device_list_mutex);
1970 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1972 device = kzalloc(sizeof(*device), GFP_NOFS);
1974 /* we can safely leave the fs_devices entry around */
1979 name = rcu_string_strdup(device_path, GFP_NOFS);
1985 rcu_assign_pointer(device->name, name);
1987 ret = find_next_devid(root, &device->devid);
1989 rcu_string_free(device->name);
1994 trans = btrfs_start_transaction(root, 0);
1995 if (IS_ERR(trans)) {
1996 rcu_string_free(device->name);
1998 ret = PTR_ERR(trans);
2004 q = bdev_get_queue(bdev);
2005 if (blk_queue_discard(q))
2006 device->can_discard = 1;
2007 device->writeable = 1;
2008 device->work.func = pending_bios_fn;
2009 generate_random_uuid(device->uuid);
2010 spin_lock_init(&device->io_lock);
2011 device->generation = trans->transid;
2012 device->io_width = root->sectorsize;
2013 device->io_align = root->sectorsize;
2014 device->sector_size = root->sectorsize;
2015 device->total_bytes = i_size_read(bdev->bd_inode);
2016 device->disk_total_bytes = device->total_bytes;
2017 device->dev_root = root->fs_info->dev_root;
2018 device->bdev = bdev;
2019 device->in_fs_metadata = 1;
2020 device->is_tgtdev_for_dev_replace = 0;
2021 device->mode = FMODE_EXCL;
2022 set_blocksize(device->bdev, 4096);
2025 sb->s_flags &= ~MS_RDONLY;
2026 ret = btrfs_prepare_sprout(root);
2027 BUG_ON(ret); /* -ENOMEM */
2030 device->fs_devices = root->fs_info->fs_devices;
2032 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2033 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2034 list_add(&device->dev_alloc_list,
2035 &root->fs_info->fs_devices->alloc_list);
2036 root->fs_info->fs_devices->num_devices++;
2037 root->fs_info->fs_devices->open_devices++;
2038 root->fs_info->fs_devices->rw_devices++;
2039 root->fs_info->fs_devices->total_devices++;
2040 if (device->can_discard)
2041 root->fs_info->fs_devices->num_can_discard++;
2042 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2044 spin_lock(&root->fs_info->free_chunk_lock);
2045 root->fs_info->free_chunk_space += device->total_bytes;
2046 spin_unlock(&root->fs_info->free_chunk_lock);
2048 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2049 root->fs_info->fs_devices->rotating = 1;
2051 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2052 btrfs_set_super_total_bytes(root->fs_info->super_copy,
2053 total_bytes + device->total_bytes);
2055 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2056 btrfs_set_super_num_devices(root->fs_info->super_copy,
2058 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2061 ret = init_first_rw_device(trans, root, device);
2063 btrfs_abort_transaction(trans, root, ret);
2066 ret = btrfs_finish_sprout(trans, root);
2068 btrfs_abort_transaction(trans, root, ret);
2072 ret = btrfs_add_device(trans, root, device);
2074 btrfs_abort_transaction(trans, root, ret);
2080 * we've got more storage, clear any full flags on the space
2083 btrfs_clear_space_info_full(root->fs_info);
2085 unlock_chunks(root);
2086 root->fs_info->num_tolerated_disk_barrier_failures =
2087 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2088 ret = btrfs_commit_transaction(trans, root);
2091 mutex_unlock(&uuid_mutex);
2092 up_write(&sb->s_umount);
2094 if (ret) /* transaction commit */
2097 ret = btrfs_relocate_sys_chunks(root);
2099 btrfs_error(root->fs_info, ret,
2100 "Failed to relocate sys chunks after "
2101 "device initialization. This can be fixed "
2102 "using the \"btrfs balance\" command.");
2103 trans = btrfs_attach_transaction(root);
2104 if (IS_ERR(trans)) {
2105 if (PTR_ERR(trans) == -ENOENT)
2107 return PTR_ERR(trans);
2109 ret = btrfs_commit_transaction(trans, root);
2112 /* Update ctime/mtime for libblkid */
2113 update_dev_time(device_path);
2117 unlock_chunks(root);
2118 btrfs_end_transaction(trans, root);
2119 rcu_string_free(device->name);
2122 blkdev_put(bdev, FMODE_EXCL);
2124 mutex_unlock(&uuid_mutex);
2125 up_write(&sb->s_umount);
2130 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2131 struct btrfs_device **device_out)
2133 struct request_queue *q;
2134 struct btrfs_device *device;
2135 struct block_device *bdev;
2136 struct btrfs_fs_info *fs_info = root->fs_info;
2137 struct list_head *devices;
2138 struct rcu_string *name;
2142 if (fs_info->fs_devices->seeding)
2145 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2146 fs_info->bdev_holder);
2148 return PTR_ERR(bdev);
2150 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2152 devices = &fs_info->fs_devices->devices;
2153 list_for_each_entry(device, devices, dev_list) {
2154 if (device->bdev == bdev) {
2160 device = kzalloc(sizeof(*device), GFP_NOFS);
2166 name = rcu_string_strdup(device_path, GFP_NOFS);
2172 rcu_assign_pointer(device->name, name);
2174 q = bdev_get_queue(bdev);
2175 if (blk_queue_discard(q))
2176 device->can_discard = 1;
2177 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2178 device->writeable = 1;
2179 device->work.func = pending_bios_fn;
2180 generate_random_uuid(device->uuid);
2181 device->devid = BTRFS_DEV_REPLACE_DEVID;
2182 spin_lock_init(&device->io_lock);
2183 device->generation = 0;
2184 device->io_width = root->sectorsize;
2185 device->io_align = root->sectorsize;
2186 device->sector_size = root->sectorsize;
2187 device->total_bytes = i_size_read(bdev->bd_inode);
2188 device->disk_total_bytes = device->total_bytes;
2189 device->dev_root = fs_info->dev_root;
2190 device->bdev = bdev;
2191 device->in_fs_metadata = 1;
2192 device->is_tgtdev_for_dev_replace = 1;
2193 device->mode = FMODE_EXCL;
2194 set_blocksize(device->bdev, 4096);
2195 device->fs_devices = fs_info->fs_devices;
2196 list_add(&device->dev_list, &fs_info->fs_devices->devices);
2197 fs_info->fs_devices->num_devices++;
2198 fs_info->fs_devices->open_devices++;
2199 if (device->can_discard)
2200 fs_info->fs_devices->num_can_discard++;
2201 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2203 *device_out = device;
2207 blkdev_put(bdev, FMODE_EXCL);
2211 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2212 struct btrfs_device *tgtdev)
2214 WARN_ON(fs_info->fs_devices->rw_devices == 0);
2215 tgtdev->io_width = fs_info->dev_root->sectorsize;
2216 tgtdev->io_align = fs_info->dev_root->sectorsize;
2217 tgtdev->sector_size = fs_info->dev_root->sectorsize;
2218 tgtdev->dev_root = fs_info->dev_root;
2219 tgtdev->in_fs_metadata = 1;
2222 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2223 struct btrfs_device *device)
2226 struct btrfs_path *path;
2227 struct btrfs_root *root;
2228 struct btrfs_dev_item *dev_item;
2229 struct extent_buffer *leaf;
2230 struct btrfs_key key;
2232 root = device->dev_root->fs_info->chunk_root;
2234 path = btrfs_alloc_path();
2238 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2239 key.type = BTRFS_DEV_ITEM_KEY;
2240 key.offset = device->devid;
2242 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2251 leaf = path->nodes[0];
2252 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2254 btrfs_set_device_id(leaf, dev_item, device->devid);
2255 btrfs_set_device_type(leaf, dev_item, device->type);
2256 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2257 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2258 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2259 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2260 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2261 btrfs_mark_buffer_dirty(leaf);
2264 btrfs_free_path(path);
2268 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2269 struct btrfs_device *device, u64 new_size)
2271 struct btrfs_super_block *super_copy =
2272 device->dev_root->fs_info->super_copy;
2273 u64 old_total = btrfs_super_total_bytes(super_copy);
2274 u64 diff = new_size - device->total_bytes;
2276 if (!device->writeable)
2278 if (new_size <= device->total_bytes ||
2279 device->is_tgtdev_for_dev_replace)
2282 btrfs_set_super_total_bytes(super_copy, old_total + diff);
2283 device->fs_devices->total_rw_bytes += diff;
2285 device->total_bytes = new_size;
2286 device->disk_total_bytes = new_size;
2287 btrfs_clear_space_info_full(device->dev_root->fs_info);
2289 return btrfs_update_device(trans, device);
2292 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2293 struct btrfs_device *device, u64 new_size)
2296 lock_chunks(device->dev_root);
2297 ret = __btrfs_grow_device(trans, device, new_size);
2298 unlock_chunks(device->dev_root);
2302 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2303 struct btrfs_root *root,
2304 u64 chunk_tree, u64 chunk_objectid,
2308 struct btrfs_path *path;
2309 struct btrfs_key key;
2311 root = root->fs_info->chunk_root;
2312 path = btrfs_alloc_path();
2316 key.objectid = chunk_objectid;
2317 key.offset = chunk_offset;
2318 key.type = BTRFS_CHUNK_ITEM_KEY;
2320 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2323 else if (ret > 0) { /* Logic error or corruption */
2324 btrfs_error(root->fs_info, -ENOENT,
2325 "Failed lookup while freeing chunk.");
2330 ret = btrfs_del_item(trans, root, path);
2332 btrfs_error(root->fs_info, ret,
2333 "Failed to delete chunk item.");
2335 btrfs_free_path(path);
2339 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2342 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2343 struct btrfs_disk_key *disk_key;
2344 struct btrfs_chunk *chunk;
2351 struct btrfs_key key;
2353 array_size = btrfs_super_sys_array_size(super_copy);
2355 ptr = super_copy->sys_chunk_array;
2358 while (cur < array_size) {
2359 disk_key = (struct btrfs_disk_key *)ptr;
2360 btrfs_disk_key_to_cpu(&key, disk_key);
2362 len = sizeof(*disk_key);
2364 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2365 chunk = (struct btrfs_chunk *)(ptr + len);
2366 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2367 len += btrfs_chunk_item_size(num_stripes);
2372 if (key.objectid == chunk_objectid &&
2373 key.offset == chunk_offset) {
2374 memmove(ptr, ptr + len, array_size - (cur + len));
2376 btrfs_set_super_sys_array_size(super_copy, array_size);
2385 static int btrfs_relocate_chunk(struct btrfs_root *root,
2386 u64 chunk_tree, u64 chunk_objectid,
2389 struct extent_map_tree *em_tree;
2390 struct btrfs_root *extent_root;
2391 struct btrfs_trans_handle *trans;
2392 struct extent_map *em;
2393 struct map_lookup *map;
2397 root = root->fs_info->chunk_root;
2398 extent_root = root->fs_info->extent_root;
2399 em_tree = &root->fs_info->mapping_tree.map_tree;
2401 ret = btrfs_can_relocate(extent_root, chunk_offset);
2405 /* step one, relocate all the extents inside this chunk */
2406 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2410 trans = btrfs_start_transaction(root, 0);
2411 if (IS_ERR(trans)) {
2412 ret = PTR_ERR(trans);
2413 btrfs_std_error(root->fs_info, ret);
2420 * step two, delete the device extents and the
2421 * chunk tree entries
2423 read_lock(&em_tree->lock);
2424 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2425 read_unlock(&em_tree->lock);
2427 BUG_ON(!em || em->start > chunk_offset ||
2428 em->start + em->len < chunk_offset);
2429 map = (struct map_lookup *)em->bdev;
2431 for (i = 0; i < map->num_stripes; i++) {
2432 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2433 map->stripes[i].physical);
2436 if (map->stripes[i].dev) {
2437 ret = btrfs_update_device(trans, map->stripes[i].dev);
2441 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2446 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2448 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2449 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2453 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2456 write_lock(&em_tree->lock);
2457 remove_extent_mapping(em_tree, em);
2458 write_unlock(&em_tree->lock);
2463 /* once for the tree */
2464 free_extent_map(em);
2466 free_extent_map(em);
2468 unlock_chunks(root);
2469 btrfs_end_transaction(trans, root);
2473 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2475 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2476 struct btrfs_path *path;
2477 struct extent_buffer *leaf;
2478 struct btrfs_chunk *chunk;
2479 struct btrfs_key key;
2480 struct btrfs_key found_key;
2481 u64 chunk_tree = chunk_root->root_key.objectid;
2483 bool retried = false;
2487 path = btrfs_alloc_path();
2492 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2493 key.offset = (u64)-1;
2494 key.type = BTRFS_CHUNK_ITEM_KEY;
2497 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2500 BUG_ON(ret == 0); /* Corruption */
2502 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2509 leaf = path->nodes[0];
2510 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2512 chunk = btrfs_item_ptr(leaf, path->slots[0],
2513 struct btrfs_chunk);
2514 chunk_type = btrfs_chunk_type(leaf, chunk);
2515 btrfs_release_path(path);
2517 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2518 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2527 if (found_key.offset == 0)
2529 key.offset = found_key.offset - 1;
2532 if (failed && !retried) {
2536 } else if (failed && retried) {
2541 btrfs_free_path(path);
2545 static int insert_balance_item(struct btrfs_root *root,
2546 struct btrfs_balance_control *bctl)
2548 struct btrfs_trans_handle *trans;
2549 struct btrfs_balance_item *item;
2550 struct btrfs_disk_balance_args disk_bargs;
2551 struct btrfs_path *path;
2552 struct extent_buffer *leaf;
2553 struct btrfs_key key;
2556 path = btrfs_alloc_path();
2560 trans = btrfs_start_transaction(root, 0);
2561 if (IS_ERR(trans)) {
2562 btrfs_free_path(path);
2563 return PTR_ERR(trans);
2566 key.objectid = BTRFS_BALANCE_OBJECTID;
2567 key.type = BTRFS_BALANCE_ITEM_KEY;
2570 ret = btrfs_insert_empty_item(trans, root, path, &key,
2575 leaf = path->nodes[0];
2576 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2578 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2580 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2581 btrfs_set_balance_data(leaf, item, &disk_bargs);
2582 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2583 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2584 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2585 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2587 btrfs_set_balance_flags(leaf, item, bctl->flags);
2589 btrfs_mark_buffer_dirty(leaf);
2591 btrfs_free_path(path);
2592 err = btrfs_commit_transaction(trans, root);
2598 static int del_balance_item(struct btrfs_root *root)
2600 struct btrfs_trans_handle *trans;
2601 struct btrfs_path *path;
2602 struct btrfs_key key;
2605 path = btrfs_alloc_path();
2609 trans = btrfs_start_transaction(root, 0);
2610 if (IS_ERR(trans)) {
2611 btrfs_free_path(path);
2612 return PTR_ERR(trans);
2615 key.objectid = BTRFS_BALANCE_OBJECTID;
2616 key.type = BTRFS_BALANCE_ITEM_KEY;
2619 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2627 ret = btrfs_del_item(trans, root, path);
2629 btrfs_free_path(path);
2630 err = btrfs_commit_transaction(trans, root);
2637 * This is a heuristic used to reduce the number of chunks balanced on
2638 * resume after balance was interrupted.
2640 static void update_balance_args(struct btrfs_balance_control *bctl)
2643 * Turn on soft mode for chunk types that were being converted.
2645 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2646 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2647 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2648 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2649 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2650 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2653 * Turn on usage filter if is not already used. The idea is
2654 * that chunks that we have already balanced should be
2655 * reasonably full. Don't do it for chunks that are being
2656 * converted - that will keep us from relocating unconverted
2657 * (albeit full) chunks.
2659 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2660 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2661 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2662 bctl->data.usage = 90;
2664 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2665 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2666 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2667 bctl->sys.usage = 90;
2669 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2670 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2671 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2672 bctl->meta.usage = 90;
2677 * Should be called with both balance and volume mutexes held to
2678 * serialize other volume operations (add_dev/rm_dev/resize) with
2679 * restriper. Same goes for unset_balance_control.
2681 static void set_balance_control(struct btrfs_balance_control *bctl)
2683 struct btrfs_fs_info *fs_info = bctl->fs_info;
2685 BUG_ON(fs_info->balance_ctl);
2687 spin_lock(&fs_info->balance_lock);
2688 fs_info->balance_ctl = bctl;
2689 spin_unlock(&fs_info->balance_lock);
2692 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2694 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2696 BUG_ON(!fs_info->balance_ctl);
2698 spin_lock(&fs_info->balance_lock);
2699 fs_info->balance_ctl = NULL;
2700 spin_unlock(&fs_info->balance_lock);
2706 * Balance filters. Return 1 if chunk should be filtered out
2707 * (should not be balanced).
2709 static int chunk_profiles_filter(u64 chunk_type,
2710 struct btrfs_balance_args *bargs)
2712 chunk_type = chunk_to_extended(chunk_type) &
2713 BTRFS_EXTENDED_PROFILE_MASK;
2715 if (bargs->profiles & chunk_type)
2721 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2722 struct btrfs_balance_args *bargs)
2724 struct btrfs_block_group_cache *cache;
2725 u64 chunk_used, user_thresh;
2728 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2729 chunk_used = btrfs_block_group_used(&cache->item);
2731 if (bargs->usage == 0)
2733 else if (bargs->usage > 100)
2734 user_thresh = cache->key.offset;
2736 user_thresh = div_factor_fine(cache->key.offset,
2739 if (chunk_used < user_thresh)
2742 btrfs_put_block_group(cache);
2746 static int chunk_devid_filter(struct extent_buffer *leaf,
2747 struct btrfs_chunk *chunk,
2748 struct btrfs_balance_args *bargs)
2750 struct btrfs_stripe *stripe;
2751 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2754 for (i = 0; i < num_stripes; i++) {
2755 stripe = btrfs_stripe_nr(chunk, i);
2756 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2763 /* [pstart, pend) */
2764 static int chunk_drange_filter(struct extent_buffer *leaf,
2765 struct btrfs_chunk *chunk,
2767 struct btrfs_balance_args *bargs)
2769 struct btrfs_stripe *stripe;
2770 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2776 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2779 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2780 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2781 factor = num_stripes / 2;
2782 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2783 factor = num_stripes - 1;
2784 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2785 factor = num_stripes - 2;
2787 factor = num_stripes;
2790 for (i = 0; i < num_stripes; i++) {
2791 stripe = btrfs_stripe_nr(chunk, i);
2792 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2795 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2796 stripe_length = btrfs_chunk_length(leaf, chunk);
2797 do_div(stripe_length, factor);
2799 if (stripe_offset < bargs->pend &&
2800 stripe_offset + stripe_length > bargs->pstart)
2807 /* [vstart, vend) */
2808 static int chunk_vrange_filter(struct extent_buffer *leaf,
2809 struct btrfs_chunk *chunk,
2811 struct btrfs_balance_args *bargs)
2813 if (chunk_offset < bargs->vend &&
2814 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2815 /* at least part of the chunk is inside this vrange */
2821 static int chunk_soft_convert_filter(u64 chunk_type,
2822 struct btrfs_balance_args *bargs)
2824 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2827 chunk_type = chunk_to_extended(chunk_type) &
2828 BTRFS_EXTENDED_PROFILE_MASK;
2830 if (bargs->target == chunk_type)
2836 static int should_balance_chunk(struct btrfs_root *root,
2837 struct extent_buffer *leaf,
2838 struct btrfs_chunk *chunk, u64 chunk_offset)
2840 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2841 struct btrfs_balance_args *bargs = NULL;
2842 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2845 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2846 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2850 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2851 bargs = &bctl->data;
2852 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2854 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2855 bargs = &bctl->meta;
2857 /* profiles filter */
2858 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2859 chunk_profiles_filter(chunk_type, bargs)) {
2864 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2865 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2870 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2871 chunk_devid_filter(leaf, chunk, bargs)) {
2875 /* drange filter, makes sense only with devid filter */
2876 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2877 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2882 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2883 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2887 /* soft profile changing mode */
2888 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2889 chunk_soft_convert_filter(chunk_type, bargs)) {
2896 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2898 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2899 struct btrfs_root *chunk_root = fs_info->chunk_root;
2900 struct btrfs_root *dev_root = fs_info->dev_root;
2901 struct list_head *devices;
2902 struct btrfs_device *device;
2905 struct btrfs_chunk *chunk;
2906 struct btrfs_path *path;
2907 struct btrfs_key key;
2908 struct btrfs_key found_key;
2909 struct btrfs_trans_handle *trans;
2910 struct extent_buffer *leaf;
2913 int enospc_errors = 0;
2914 bool counting = true;
2916 /* step one make some room on all the devices */
2917 devices = &fs_info->fs_devices->devices;
2918 list_for_each_entry(device, devices, dev_list) {
2919 old_size = device->total_bytes;
2920 size_to_free = div_factor(old_size, 1);
2921 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2922 if (!device->writeable ||
2923 device->total_bytes - device->bytes_used > size_to_free ||
2924 device->is_tgtdev_for_dev_replace)
2927 ret = btrfs_shrink_device(device, old_size - size_to_free);
2932 trans = btrfs_start_transaction(dev_root, 0);
2933 BUG_ON(IS_ERR(trans));
2935 ret = btrfs_grow_device(trans, device, old_size);
2938 btrfs_end_transaction(trans, dev_root);
2941 /* step two, relocate all the chunks */
2942 path = btrfs_alloc_path();
2948 /* zero out stat counters */
2949 spin_lock(&fs_info->balance_lock);
2950 memset(&bctl->stat, 0, sizeof(bctl->stat));
2951 spin_unlock(&fs_info->balance_lock);
2953 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2954 key.offset = (u64)-1;
2955 key.type = BTRFS_CHUNK_ITEM_KEY;
2958 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2959 atomic_read(&fs_info->balance_cancel_req)) {
2964 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2969 * this shouldn't happen, it means the last relocate
2973 BUG(); /* FIXME break ? */
2975 ret = btrfs_previous_item(chunk_root, path, 0,
2976 BTRFS_CHUNK_ITEM_KEY);
2982 leaf = path->nodes[0];
2983 slot = path->slots[0];
2984 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2986 if (found_key.objectid != key.objectid)
2989 /* chunk zero is special */
2990 if (found_key.offset == 0)
2993 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2996 spin_lock(&fs_info->balance_lock);
2997 bctl->stat.considered++;
2998 spin_unlock(&fs_info->balance_lock);
3001 ret = should_balance_chunk(chunk_root, leaf, chunk,
3003 btrfs_release_path(path);
3008 spin_lock(&fs_info->balance_lock);
3009 bctl->stat.expected++;
3010 spin_unlock(&fs_info->balance_lock);
3014 ret = btrfs_relocate_chunk(chunk_root,
3015 chunk_root->root_key.objectid,
3018 if (ret && ret != -ENOSPC)
3020 if (ret == -ENOSPC) {
3023 spin_lock(&fs_info->balance_lock);
3024 bctl->stat.completed++;
3025 spin_unlock(&fs_info->balance_lock);
3028 key.offset = found_key.offset - 1;
3032 btrfs_release_path(path);
3037 btrfs_free_path(path);
3038 if (enospc_errors) {
3039 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
3049 * alloc_profile_is_valid - see if a given profile is valid and reduced
3050 * @flags: profile to validate
3051 * @extended: if true @flags is treated as an extended profile
3053 static int alloc_profile_is_valid(u64 flags, int extended)
3055 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3056 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3058 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3060 /* 1) check that all other bits are zeroed */
3064 /* 2) see if profile is reduced */
3066 return !extended; /* "0" is valid for usual profiles */
3068 /* true if exactly one bit set */
3069 return (flags & (flags - 1)) == 0;
3072 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3074 /* cancel requested || normal exit path */
3075 return atomic_read(&fs_info->balance_cancel_req) ||
3076 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3077 atomic_read(&fs_info->balance_cancel_req) == 0);
3080 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3084 unset_balance_control(fs_info);
3085 ret = del_balance_item(fs_info->tree_root);
3087 btrfs_std_error(fs_info, ret);
3089 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3092 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
3093 struct btrfs_ioctl_balance_args *bargs);
3096 * Should be called with both balance and volume mutexes held
3098 int btrfs_balance(struct btrfs_balance_control *bctl,
3099 struct btrfs_ioctl_balance_args *bargs)
3101 struct btrfs_fs_info *fs_info = bctl->fs_info;
3108 if (btrfs_fs_closing(fs_info) ||
3109 atomic_read(&fs_info->balance_pause_req) ||
3110 atomic_read(&fs_info->balance_cancel_req)) {
3115 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3116 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3120 * In case of mixed groups both data and meta should be picked,
3121 * and identical options should be given for both of them.
3123 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3124 if (mixed && (bctl->flags & allowed)) {
3125 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3126 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3127 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3128 printk(KERN_ERR "btrfs: with mixed groups data and "
3129 "metadata balance options must be the same\n");
3135 num_devices = fs_info->fs_devices->num_devices;
3136 btrfs_dev_replace_lock(&fs_info->dev_replace);
3137 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3138 BUG_ON(num_devices < 1);
3141 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3142 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3143 if (num_devices == 1)
3144 allowed |= BTRFS_BLOCK_GROUP_DUP;
3145 else if (num_devices > 1)
3146 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3147 if (num_devices > 2)
3148 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3149 if (num_devices > 3)
3150 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3151 BTRFS_BLOCK_GROUP_RAID6);
3152 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3153 (!alloc_profile_is_valid(bctl->data.target, 1) ||
3154 (bctl->data.target & ~allowed))) {
3155 printk(KERN_ERR "btrfs: unable to start balance with target "
3156 "data profile %llu\n",
3157 (unsigned long long)bctl->data.target);
3161 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3162 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3163 (bctl->meta.target & ~allowed))) {
3164 printk(KERN_ERR "btrfs: unable to start balance with target "
3165 "metadata profile %llu\n",
3166 (unsigned long long)bctl->meta.target);
3170 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3171 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3172 (bctl->sys.target & ~allowed))) {
3173 printk(KERN_ERR "btrfs: unable to start balance with target "
3174 "system profile %llu\n",
3175 (unsigned long long)bctl->sys.target);
3180 /* allow dup'ed data chunks only in mixed mode */
3181 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3182 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3183 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3188 /* allow to reduce meta or sys integrity only if force set */
3189 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3190 BTRFS_BLOCK_GROUP_RAID10 |
3191 BTRFS_BLOCK_GROUP_RAID5 |
3192 BTRFS_BLOCK_GROUP_RAID6;
3194 seq = read_seqbegin(&fs_info->profiles_lock);
3196 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3197 (fs_info->avail_system_alloc_bits & allowed) &&
3198 !(bctl->sys.target & allowed)) ||
3199 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3200 (fs_info->avail_metadata_alloc_bits & allowed) &&
3201 !(bctl->meta.target & allowed))) {
3202 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3203 printk(KERN_INFO "btrfs: force reducing metadata "
3206 printk(KERN_ERR "btrfs: balance will reduce metadata "
3207 "integrity, use force if you want this\n");
3212 } while (read_seqretry(&fs_info->profiles_lock, seq));
3214 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3215 int num_tolerated_disk_barrier_failures;
3216 u64 target = bctl->sys.target;
3218 num_tolerated_disk_barrier_failures =
3219 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3220 if (num_tolerated_disk_barrier_failures > 0 &&
3222 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3223 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3224 num_tolerated_disk_barrier_failures = 0;
3225 else if (num_tolerated_disk_barrier_failures > 1 &&
3227 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3228 num_tolerated_disk_barrier_failures = 1;
3230 fs_info->num_tolerated_disk_barrier_failures =
3231 num_tolerated_disk_barrier_failures;
3234 ret = insert_balance_item(fs_info->tree_root, bctl);
3235 if (ret && ret != -EEXIST)
3238 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3239 BUG_ON(ret == -EEXIST);
3240 set_balance_control(bctl);
3242 BUG_ON(ret != -EEXIST);
3243 spin_lock(&fs_info->balance_lock);
3244 update_balance_args(bctl);
3245 spin_unlock(&fs_info->balance_lock);
3248 atomic_inc(&fs_info->balance_running);
3249 mutex_unlock(&fs_info->balance_mutex);
3251 ret = __btrfs_balance(fs_info);
3253 mutex_lock(&fs_info->balance_mutex);
3254 atomic_dec(&fs_info->balance_running);
3256 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3257 fs_info->num_tolerated_disk_barrier_failures =
3258 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3262 memset(bargs, 0, sizeof(*bargs));
3263 update_ioctl_balance_args(fs_info, 0, bargs);
3266 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3267 balance_need_close(fs_info)) {
3268 __cancel_balance(fs_info);
3271 wake_up(&fs_info->balance_wait_q);
3275 if (bctl->flags & BTRFS_BALANCE_RESUME)
3276 __cancel_balance(fs_info);
3279 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3284 static int balance_kthread(void *data)
3286 struct btrfs_fs_info *fs_info = data;
3289 mutex_lock(&fs_info->volume_mutex);
3290 mutex_lock(&fs_info->balance_mutex);
3292 if (fs_info->balance_ctl) {
3293 printk(KERN_INFO "btrfs: continuing balance\n");
3294 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3297 mutex_unlock(&fs_info->balance_mutex);
3298 mutex_unlock(&fs_info->volume_mutex);
3303 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3305 struct task_struct *tsk;
3307 spin_lock(&fs_info->balance_lock);
3308 if (!fs_info->balance_ctl) {
3309 spin_unlock(&fs_info->balance_lock);
3312 spin_unlock(&fs_info->balance_lock);
3314 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3315 printk(KERN_INFO "btrfs: force skipping balance\n");
3319 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3321 return PTR_ERR(tsk);
3326 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3328 struct btrfs_balance_control *bctl;
3329 struct btrfs_balance_item *item;
3330 struct btrfs_disk_balance_args disk_bargs;
3331 struct btrfs_path *path;
3332 struct extent_buffer *leaf;
3333 struct btrfs_key key;
3336 path = btrfs_alloc_path();
3340 key.objectid = BTRFS_BALANCE_OBJECTID;
3341 key.type = BTRFS_BALANCE_ITEM_KEY;
3344 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3347 if (ret > 0) { /* ret = -ENOENT; */
3352 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3358 leaf = path->nodes[0];
3359 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3361 bctl->fs_info = fs_info;
3362 bctl->flags = btrfs_balance_flags(leaf, item);
3363 bctl->flags |= BTRFS_BALANCE_RESUME;
3365 btrfs_balance_data(leaf, item, &disk_bargs);
3366 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3367 btrfs_balance_meta(leaf, item, &disk_bargs);
3368 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3369 btrfs_balance_sys(leaf, item, &disk_bargs);
3370 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3372 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3374 mutex_lock(&fs_info->volume_mutex);
3375 mutex_lock(&fs_info->balance_mutex);
3377 set_balance_control(bctl);
3379 mutex_unlock(&fs_info->balance_mutex);
3380 mutex_unlock(&fs_info->volume_mutex);
3382 btrfs_free_path(path);
3386 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3390 mutex_lock(&fs_info->balance_mutex);
3391 if (!fs_info->balance_ctl) {
3392 mutex_unlock(&fs_info->balance_mutex);
3396 if (atomic_read(&fs_info->balance_running)) {
3397 atomic_inc(&fs_info->balance_pause_req);
3398 mutex_unlock(&fs_info->balance_mutex);
3400 wait_event(fs_info->balance_wait_q,
3401 atomic_read(&fs_info->balance_running) == 0);
3403 mutex_lock(&fs_info->balance_mutex);
3404 /* we are good with balance_ctl ripped off from under us */
3405 BUG_ON(atomic_read(&fs_info->balance_running));
3406 atomic_dec(&fs_info->balance_pause_req);
3411 mutex_unlock(&fs_info->balance_mutex);
3415 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3417 mutex_lock(&fs_info->balance_mutex);
3418 if (!fs_info->balance_ctl) {
3419 mutex_unlock(&fs_info->balance_mutex);
3423 atomic_inc(&fs_info->balance_cancel_req);
3425 * if we are running just wait and return, balance item is
3426 * deleted in btrfs_balance in this case
3428 if (atomic_read(&fs_info->balance_running)) {
3429 mutex_unlock(&fs_info->balance_mutex);
3430 wait_event(fs_info->balance_wait_q,
3431 atomic_read(&fs_info->balance_running) == 0);
3432 mutex_lock(&fs_info->balance_mutex);
3434 /* __cancel_balance needs volume_mutex */
3435 mutex_unlock(&fs_info->balance_mutex);
3436 mutex_lock(&fs_info->volume_mutex);
3437 mutex_lock(&fs_info->balance_mutex);
3439 if (fs_info->balance_ctl)
3440 __cancel_balance(fs_info);
3442 mutex_unlock(&fs_info->volume_mutex);
3445 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3446 atomic_dec(&fs_info->balance_cancel_req);
3447 mutex_unlock(&fs_info->balance_mutex);
3452 * shrinking a device means finding all of the device extents past
3453 * the new size, and then following the back refs to the chunks.
3454 * The chunk relocation code actually frees the device extent
3456 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3458 struct btrfs_trans_handle *trans;
3459 struct btrfs_root *root = device->dev_root;
3460 struct btrfs_dev_extent *dev_extent = NULL;
3461 struct btrfs_path *path;
3469 bool retried = false;
3470 struct extent_buffer *l;
3471 struct btrfs_key key;
3472 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3473 u64 old_total = btrfs_super_total_bytes(super_copy);
3474 u64 old_size = device->total_bytes;
3475 u64 diff = device->total_bytes - new_size;
3477 if (device->is_tgtdev_for_dev_replace)
3480 path = btrfs_alloc_path();
3488 device->total_bytes = new_size;
3489 if (device->writeable) {
3490 device->fs_devices->total_rw_bytes -= diff;
3491 spin_lock(&root->fs_info->free_chunk_lock);
3492 root->fs_info->free_chunk_space -= diff;
3493 spin_unlock(&root->fs_info->free_chunk_lock);
3495 unlock_chunks(root);
3498 key.objectid = device->devid;
3499 key.offset = (u64)-1;
3500 key.type = BTRFS_DEV_EXTENT_KEY;
3503 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3507 ret = btrfs_previous_item(root, path, 0, key.type);
3512 btrfs_release_path(path);
3517 slot = path->slots[0];
3518 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3520 if (key.objectid != device->devid) {
3521 btrfs_release_path(path);
3525 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3526 length = btrfs_dev_extent_length(l, dev_extent);
3528 if (key.offset + length <= new_size) {
3529 btrfs_release_path(path);
3533 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3534 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3535 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3536 btrfs_release_path(path);
3538 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3540 if (ret && ret != -ENOSPC)
3544 } while (key.offset-- > 0);
3546 if (failed && !retried) {
3550 } else if (failed && retried) {
3554 device->total_bytes = old_size;
3555 if (device->writeable)
3556 device->fs_devices->total_rw_bytes += diff;
3557 spin_lock(&root->fs_info->free_chunk_lock);
3558 root->fs_info->free_chunk_space += diff;
3559 spin_unlock(&root->fs_info->free_chunk_lock);
3560 unlock_chunks(root);
3564 /* Shrinking succeeded, else we would be at "done". */
3565 trans = btrfs_start_transaction(root, 0);
3566 if (IS_ERR(trans)) {
3567 ret = PTR_ERR(trans);
3573 device->disk_total_bytes = new_size;
3574 /* Now btrfs_update_device() will change the on-disk size. */
3575 ret = btrfs_update_device(trans, device);
3577 unlock_chunks(root);
3578 btrfs_end_transaction(trans, root);
3581 WARN_ON(diff > old_total);
3582 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3583 unlock_chunks(root);
3584 btrfs_end_transaction(trans, root);
3586 btrfs_free_path(path);
3590 static int btrfs_add_system_chunk(struct btrfs_root *root,
3591 struct btrfs_key *key,
3592 struct btrfs_chunk *chunk, int item_size)
3594 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3595 struct btrfs_disk_key disk_key;
3599 array_size = btrfs_super_sys_array_size(super_copy);
3600 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3603 ptr = super_copy->sys_chunk_array + array_size;
3604 btrfs_cpu_key_to_disk(&disk_key, key);
3605 memcpy(ptr, &disk_key, sizeof(disk_key));
3606 ptr += sizeof(disk_key);
3607 memcpy(ptr, chunk, item_size);
3608 item_size += sizeof(disk_key);
3609 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3614 * sort the devices in descending order by max_avail, total_avail
3616 static int btrfs_cmp_device_info(const void *a, const void *b)
3618 const struct btrfs_device_info *di_a = a;
3619 const struct btrfs_device_info *di_b = b;
3621 if (di_a->max_avail > di_b->max_avail)
3623 if (di_a->max_avail < di_b->max_avail)
3625 if (di_a->total_avail > di_b->total_avail)
3627 if (di_a->total_avail < di_b->total_avail)
3632 static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3633 [BTRFS_RAID_RAID10] = {
3636 .devs_max = 0, /* 0 == as many as possible */
3638 .devs_increment = 2,
3641 [BTRFS_RAID_RAID1] = {
3646 .devs_increment = 2,
3649 [BTRFS_RAID_DUP] = {
3654 .devs_increment = 1,
3657 [BTRFS_RAID_RAID0] = {
3662 .devs_increment = 1,
3665 [BTRFS_RAID_SINGLE] = {
3670 .devs_increment = 1,
3673 [BTRFS_RAID_RAID5] = {
3678 .devs_increment = 1,
3681 [BTRFS_RAID_RAID6] = {
3686 .devs_increment = 1,
3691 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3693 /* TODO allow them to set a preferred stripe size */
3697 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3699 if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3702 btrfs_set_fs_incompat(info, RAID56);
3705 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3706 struct btrfs_root *extent_root,
3707 struct map_lookup **map_ret,
3708 u64 *num_bytes_out, u64 *stripe_size_out,
3709 u64 start, u64 type)
3711 struct btrfs_fs_info *info = extent_root->fs_info;
3712 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3713 struct list_head *cur;
3714 struct map_lookup *map = NULL;
3715 struct extent_map_tree *em_tree;
3716 struct extent_map *em;
3717 struct btrfs_device_info *devices_info = NULL;
3719 int num_stripes; /* total number of stripes to allocate */
3720 int data_stripes; /* number of stripes that count for
3722 int sub_stripes; /* sub_stripes info for map */
3723 int dev_stripes; /* stripes per dev */
3724 int devs_max; /* max devs to use */
3725 int devs_min; /* min devs needed */
3726 int devs_increment; /* ndevs has to be a multiple of this */
3727 int ncopies; /* how many copies to data has */
3729 u64 max_stripe_size;
3733 u64 raid_stripe_len = BTRFS_STRIPE_LEN;
3739 BUG_ON(!alloc_profile_is_valid(type, 0));
3741 if (list_empty(&fs_devices->alloc_list))
3744 index = __get_raid_index(type);
3746 sub_stripes = btrfs_raid_array[index].sub_stripes;
3747 dev_stripes = btrfs_raid_array[index].dev_stripes;
3748 devs_max = btrfs_raid_array[index].devs_max;
3749 devs_min = btrfs_raid_array[index].devs_min;
3750 devs_increment = btrfs_raid_array[index].devs_increment;
3751 ncopies = btrfs_raid_array[index].ncopies;
3753 if (type & BTRFS_BLOCK_GROUP_DATA) {
3754 max_stripe_size = 1024 * 1024 * 1024;
3755 max_chunk_size = 10 * max_stripe_size;
3756 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3757 /* for larger filesystems, use larger metadata chunks */
3758 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3759 max_stripe_size = 1024 * 1024 * 1024;
3761 max_stripe_size = 256 * 1024 * 1024;
3762 max_chunk_size = max_stripe_size;
3763 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3764 max_stripe_size = 32 * 1024 * 1024;
3765 max_chunk_size = 2 * max_stripe_size;
3767 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3772 /* we don't want a chunk larger than 10% of writeable space */
3773 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3776 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3781 cur = fs_devices->alloc_list.next;
3784 * in the first pass through the devices list, we gather information
3785 * about the available holes on each device.
3788 while (cur != &fs_devices->alloc_list) {
3789 struct btrfs_device *device;
3793 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3797 if (!device->writeable) {
3799 "btrfs: read-only device in alloc_list\n");
3803 if (!device->in_fs_metadata ||
3804 device->is_tgtdev_for_dev_replace)
3807 if (device->total_bytes > device->bytes_used)
3808 total_avail = device->total_bytes - device->bytes_used;
3812 /* If there is no space on this device, skip it. */
3813 if (total_avail == 0)
3816 ret = find_free_dev_extent(device,
3817 max_stripe_size * dev_stripes,
3818 &dev_offset, &max_avail);
3819 if (ret && ret != -ENOSPC)
3823 max_avail = max_stripe_size * dev_stripes;
3825 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3828 if (ndevs == fs_devices->rw_devices) {
3829 WARN(1, "%s: found more than %llu devices\n",
3830 __func__, fs_devices->rw_devices);
3833 devices_info[ndevs].dev_offset = dev_offset;
3834 devices_info[ndevs].max_avail = max_avail;
3835 devices_info[ndevs].total_avail = total_avail;
3836 devices_info[ndevs].dev = device;
3841 * now sort the devices by hole size / available space
3843 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3844 btrfs_cmp_device_info, NULL);
3846 /* round down to number of usable stripes */
3847 ndevs -= ndevs % devs_increment;
3849 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3854 if (devs_max && ndevs > devs_max)
3857 * the primary goal is to maximize the number of stripes, so use as many
3858 * devices as possible, even if the stripes are not maximum sized.
3860 stripe_size = devices_info[ndevs-1].max_avail;
3861 num_stripes = ndevs * dev_stripes;
3864 * this will have to be fixed for RAID1 and RAID10 over
3867 data_stripes = num_stripes / ncopies;
3869 if (type & BTRFS_BLOCK_GROUP_RAID5) {
3870 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
3871 btrfs_super_stripesize(info->super_copy));
3872 data_stripes = num_stripes - 1;
3874 if (type & BTRFS_BLOCK_GROUP_RAID6) {
3875 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
3876 btrfs_super_stripesize(info->super_copy));
3877 data_stripes = num_stripes - 2;
3881 * Use the number of data stripes to figure out how big this chunk
3882 * is really going to be in terms of logical address space,
3883 * and compare that answer with the max chunk size
3885 if (stripe_size * data_stripes > max_chunk_size) {
3886 u64 mask = (1ULL << 24) - 1;
3887 stripe_size = max_chunk_size;
3888 do_div(stripe_size, data_stripes);
3890 /* bump the answer up to a 16MB boundary */
3891 stripe_size = (stripe_size + mask) & ~mask;
3893 /* but don't go higher than the limits we found
3894 * while searching for free extents
3896 if (stripe_size > devices_info[ndevs-1].max_avail)
3897 stripe_size = devices_info[ndevs-1].max_avail;
3900 do_div(stripe_size, dev_stripes);
3902 /* align to BTRFS_STRIPE_LEN */
3903 do_div(stripe_size, raid_stripe_len);
3904 stripe_size *= raid_stripe_len;
3906 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3911 map->num_stripes = num_stripes;
3913 for (i = 0; i < ndevs; ++i) {
3914 for (j = 0; j < dev_stripes; ++j) {
3915 int s = i * dev_stripes + j;
3916 map->stripes[s].dev = devices_info[i].dev;
3917 map->stripes[s].physical = devices_info[i].dev_offset +
3921 map->sector_size = extent_root->sectorsize;
3922 map->stripe_len = raid_stripe_len;
3923 map->io_align = raid_stripe_len;
3924 map->io_width = raid_stripe_len;
3926 map->sub_stripes = sub_stripes;
3929 num_bytes = stripe_size * data_stripes;
3931 *stripe_size_out = stripe_size;
3932 *num_bytes_out = num_bytes;
3934 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3936 em = alloc_extent_map();
3941 em->bdev = (struct block_device *)map;
3943 em->len = num_bytes;
3944 em->block_start = 0;
3945 em->block_len = em->len;
3947 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3948 write_lock(&em_tree->lock);
3949 ret = add_extent_mapping(em_tree, em, 0);
3950 write_unlock(&em_tree->lock);
3952 free_extent_map(em);
3956 for (i = 0; i < map->num_stripes; ++i) {
3957 struct btrfs_device *device;
3960 device = map->stripes[i].dev;
3961 dev_offset = map->stripes[i].physical;
3963 ret = btrfs_alloc_dev_extent(trans, device,
3964 info->chunk_root->root_key.objectid,
3965 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3966 start, dev_offset, stripe_size);
3968 goto error_dev_extent;
3971 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3972 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3975 i = map->num_stripes - 1;
3976 goto error_dev_extent;
3979 free_extent_map(em);
3980 check_raid56_incompat_flag(extent_root->fs_info, type);
3982 kfree(devices_info);
3986 for (; i >= 0; i--) {
3987 struct btrfs_device *device;
3990 device = map->stripes[i].dev;
3991 err = btrfs_free_dev_extent(trans, device, start);
3993 btrfs_abort_transaction(trans, extent_root, err);
3997 write_lock(&em_tree->lock);
3998 remove_extent_mapping(em_tree, em);
3999 write_unlock(&em_tree->lock);
4001 /* One for our allocation */
4002 free_extent_map(em);
4003 /* One for the tree reference */
4004 free_extent_map(em);
4007 kfree(devices_info);
4011 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
4012 struct btrfs_root *extent_root,
4013 struct map_lookup *map, u64 chunk_offset,
4014 u64 chunk_size, u64 stripe_size)
4017 struct btrfs_key key;
4018 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4019 struct btrfs_device *device;
4020 struct btrfs_chunk *chunk;
4021 struct btrfs_stripe *stripe;
4022 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
4026 chunk = kzalloc(item_size, GFP_NOFS);
4031 while (index < map->num_stripes) {
4032 device = map->stripes[index].dev;
4033 device->bytes_used += stripe_size;
4034 ret = btrfs_update_device(trans, device);
4040 spin_lock(&extent_root->fs_info->free_chunk_lock);
4041 extent_root->fs_info->free_chunk_space -= (stripe_size *
4043 spin_unlock(&extent_root->fs_info->free_chunk_lock);
4046 stripe = &chunk->stripe;
4047 while (index < map->num_stripes) {
4048 device = map->stripes[index].dev;
4049 dev_offset = map->stripes[index].physical;
4051 btrfs_set_stack_stripe_devid(stripe, device->devid);
4052 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4053 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4058 btrfs_set_stack_chunk_length(chunk, chunk_size);
4059 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4060 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4061 btrfs_set_stack_chunk_type(chunk, map->type);
4062 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4063 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4064 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4065 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4066 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4068 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4069 key.type = BTRFS_CHUNK_ITEM_KEY;
4070 key.offset = chunk_offset;
4072 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4074 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4076 * TODO: Cleanup of inserted chunk root in case of
4079 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4089 * Chunk allocation falls into two parts. The first part does works
4090 * that make the new allocated chunk useable, but not do any operation
4091 * that modifies the chunk tree. The second part does the works that
4092 * require modifying the chunk tree. This division is important for the
4093 * bootstrap process of adding storage to a seed btrfs.
4095 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4096 struct btrfs_root *extent_root, u64 type)
4101 struct map_lookup *map;
4102 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4105 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4110 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
4111 &stripe_size, chunk_offset, type);
4115 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
4116 chunk_size, stripe_size);
4122 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4123 struct btrfs_root *root,
4124 struct btrfs_device *device)
4127 u64 sys_chunk_offset;
4131 u64 sys_stripe_size;
4133 struct map_lookup *map;
4134 struct map_lookup *sys_map;
4135 struct btrfs_fs_info *fs_info = root->fs_info;
4136 struct btrfs_root *extent_root = fs_info->extent_root;
4139 ret = find_next_chunk(fs_info->chunk_root,
4140 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
4144 alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4145 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
4146 &stripe_size, chunk_offset, alloc_profile);
4150 sys_chunk_offset = chunk_offset + chunk_size;
4152 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4153 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
4154 &sys_chunk_size, &sys_stripe_size,
4155 sys_chunk_offset, alloc_profile);
4157 btrfs_abort_transaction(trans, root, ret);
4161 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4163 btrfs_abort_transaction(trans, root, ret);
4168 * Modifying chunk tree needs allocating new blocks from both
4169 * system block group and metadata block group. So we only can
4170 * do operations require modifying the chunk tree after both
4171 * block groups were created.
4173 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
4174 chunk_size, stripe_size);
4176 btrfs_abort_transaction(trans, root, ret);
4180 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
4181 sys_chunk_offset, sys_chunk_size,
4184 btrfs_abort_transaction(trans, root, ret);
4191 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4193 struct extent_map *em;
4194 struct map_lookup *map;
4195 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4199 read_lock(&map_tree->map_tree.lock);
4200 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4201 read_unlock(&map_tree->map_tree.lock);
4205 if (btrfs_test_opt(root, DEGRADED)) {
4206 free_extent_map(em);
4210 map = (struct map_lookup *)em->bdev;
4211 for (i = 0; i < map->num_stripes; i++) {
4212 if (!map->stripes[i].dev->writeable) {
4217 free_extent_map(em);
4221 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4223 extent_map_tree_init(&tree->map_tree);
4226 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4228 struct extent_map *em;
4231 write_lock(&tree->map_tree.lock);
4232 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4234 remove_extent_mapping(&tree->map_tree, em);
4235 write_unlock(&tree->map_tree.lock);
4240 free_extent_map(em);
4241 /* once for the tree */
4242 free_extent_map(em);
4246 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4248 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4249 struct extent_map *em;
4250 struct map_lookup *map;
4251 struct extent_map_tree *em_tree = &map_tree->map_tree;
4254 read_lock(&em_tree->lock);
4255 em = lookup_extent_mapping(em_tree, logical, len);
4256 read_unlock(&em_tree->lock);
4259 * We could return errors for these cases, but that could get ugly and
4260 * we'd probably do the same thing which is just not do anything else
4261 * and exit, so return 1 so the callers don't try to use other copies.
4264 btrfs_emerg(fs_info, "No mapping for %Lu-%Lu\n", logical,
4269 if (em->start > logical || em->start + em->len < logical) {
4270 btrfs_emerg(fs_info, "Invalid mapping for %Lu-%Lu, got "
4271 "%Lu-%Lu\n", logical, logical+len, em->start,
4272 em->start + em->len);
4273 free_extent_map(em);
4277 map = (struct map_lookup *)em->bdev;
4278 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4279 ret = map->num_stripes;
4280 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4281 ret = map->sub_stripes;
4282 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4284 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4288 free_extent_map(em);
4290 btrfs_dev_replace_lock(&fs_info->dev_replace);
4291 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4293 btrfs_dev_replace_unlock(&fs_info->dev_replace);
4298 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4299 struct btrfs_mapping_tree *map_tree,
4302 struct extent_map *em;
4303 struct map_lookup *map;
4304 struct extent_map_tree *em_tree = &map_tree->map_tree;
4305 unsigned long len = root->sectorsize;
4307 read_lock(&em_tree->lock);
4308 em = lookup_extent_mapping(em_tree, logical, len);
4309 read_unlock(&em_tree->lock);
4312 BUG_ON(em->start > logical || em->start + em->len < logical);
4313 map = (struct map_lookup *)em->bdev;
4314 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4315 BTRFS_BLOCK_GROUP_RAID6)) {
4316 len = map->stripe_len * nr_data_stripes(map);
4318 free_extent_map(em);
4322 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4323 u64 logical, u64 len, int mirror_num)
4325 struct extent_map *em;
4326 struct map_lookup *map;
4327 struct extent_map_tree *em_tree = &map_tree->map_tree;
4330 read_lock(&em_tree->lock);
4331 em = lookup_extent_mapping(em_tree, logical, len);
4332 read_unlock(&em_tree->lock);
4335 BUG_ON(em->start > logical || em->start + em->len < logical);
4336 map = (struct map_lookup *)em->bdev;
4337 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4338 BTRFS_BLOCK_GROUP_RAID6))
4340 free_extent_map(em);
4344 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4345 struct map_lookup *map, int first, int num,
4346 int optimal, int dev_replace_is_ongoing)
4350 struct btrfs_device *srcdev;
4352 if (dev_replace_is_ongoing &&
4353 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4354 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4355 srcdev = fs_info->dev_replace.srcdev;
4360 * try to avoid the drive that is the source drive for a
4361 * dev-replace procedure, only choose it if no other non-missing
4362 * mirror is available
4364 for (tolerance = 0; tolerance < 2; tolerance++) {
4365 if (map->stripes[optimal].dev->bdev &&
4366 (tolerance || map->stripes[optimal].dev != srcdev))
4368 for (i = first; i < first + num; i++) {
4369 if (map->stripes[i].dev->bdev &&
4370 (tolerance || map->stripes[i].dev != srcdev))
4375 /* we couldn't find one that doesn't fail. Just return something
4376 * and the io error handling code will clean up eventually
4381 static inline int parity_smaller(u64 a, u64 b)
4386 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4387 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4389 struct btrfs_bio_stripe s;
4396 for (i = 0; i < bbio->num_stripes - 1; i++) {
4397 if (parity_smaller(raid_map[i], raid_map[i+1])) {
4398 s = bbio->stripes[i];
4400 bbio->stripes[i] = bbio->stripes[i+1];
4401 raid_map[i] = raid_map[i+1];
4402 bbio->stripes[i+1] = s;
4410 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4411 u64 logical, u64 *length,
4412 struct btrfs_bio **bbio_ret,
4413 int mirror_num, u64 **raid_map_ret)
4415 struct extent_map *em;
4416 struct map_lookup *map;
4417 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4418 struct extent_map_tree *em_tree = &map_tree->map_tree;
4421 u64 stripe_end_offset;
4426 u64 *raid_map = NULL;
4432 struct btrfs_bio *bbio = NULL;
4433 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4434 int dev_replace_is_ongoing = 0;
4435 int num_alloc_stripes;
4436 int patch_the_first_stripe_for_dev_replace = 0;
4437 u64 physical_to_patch_in_first_stripe = 0;
4438 u64 raid56_full_stripe_start = (u64)-1;
4440 read_lock(&em_tree->lock);
4441 em = lookup_extent_mapping(em_tree, logical, *length);
4442 read_unlock(&em_tree->lock);
4445 btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4446 (unsigned long long)logical,
4447 (unsigned long long)*length);
4451 if (em->start > logical || em->start + em->len < logical) {
4452 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4453 "found %Lu-%Lu\n", logical, em->start,
4454 em->start + em->len);
4455 free_extent_map(em);
4459 map = (struct map_lookup *)em->bdev;
4460 offset = logical - em->start;
4462 if (mirror_num > map->num_stripes)
4465 stripe_len = map->stripe_len;
4468 * stripe_nr counts the total number of stripes we have to stride
4469 * to get to this block
4471 do_div(stripe_nr, stripe_len);
4473 stripe_offset = stripe_nr * stripe_len;
4474 BUG_ON(offset < stripe_offset);
4476 /* stripe_offset is the offset of this block in its stripe*/
4477 stripe_offset = offset - stripe_offset;
4479 /* if we're here for raid56, we need to know the stripe aligned start */
4480 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4481 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4482 raid56_full_stripe_start = offset;
4484 /* allow a write of a full stripe, but make sure we don't
4485 * allow straddling of stripes
4487 do_div(raid56_full_stripe_start, full_stripe_len);
4488 raid56_full_stripe_start *= full_stripe_len;
4491 if (rw & REQ_DISCARD) {
4492 /* we don't discard raid56 yet */
4494 (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4498 *length = min_t(u64, em->len - offset, *length);
4499 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4501 /* For writes to RAID[56], allow a full stripeset across all disks.
4502 For other RAID types and for RAID[56] reads, just allow a single
4503 stripe (on a single disk). */
4504 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4506 max_len = stripe_len * nr_data_stripes(map) -
4507 (offset - raid56_full_stripe_start);
4509 /* we limit the length of each bio to what fits in a stripe */
4510 max_len = stripe_len - stripe_offset;
4512 *length = min_t(u64, em->len - offset, max_len);
4514 *length = em->len - offset;
4517 /* This is for when we're called from btrfs_merge_bio_hook() and all
4518 it cares about is the length */
4522 btrfs_dev_replace_lock(dev_replace);
4523 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4524 if (!dev_replace_is_ongoing)
4525 btrfs_dev_replace_unlock(dev_replace);
4527 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4528 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4529 dev_replace->tgtdev != NULL) {
4531 * in dev-replace case, for repair case (that's the only
4532 * case where the mirror is selected explicitly when
4533 * calling btrfs_map_block), blocks left of the left cursor
4534 * can also be read from the target drive.
4535 * For REQ_GET_READ_MIRRORS, the target drive is added as
4536 * the last one to the array of stripes. For READ, it also
4537 * needs to be supported using the same mirror number.
4538 * If the requested block is not left of the left cursor,
4539 * EIO is returned. This can happen because btrfs_num_copies()
4540 * returns one more in the dev-replace case.
4542 u64 tmp_length = *length;
4543 struct btrfs_bio *tmp_bbio = NULL;
4544 int tmp_num_stripes;
4545 u64 srcdev_devid = dev_replace->srcdev->devid;
4546 int index_srcdev = 0;
4548 u64 physical_of_found = 0;
4550 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4551 logical, &tmp_length, &tmp_bbio, 0, NULL);
4553 WARN_ON(tmp_bbio != NULL);
4557 tmp_num_stripes = tmp_bbio->num_stripes;
4558 if (mirror_num > tmp_num_stripes) {
4560 * REQ_GET_READ_MIRRORS does not contain this
4561 * mirror, that means that the requested area
4562 * is not left of the left cursor
4570 * process the rest of the function using the mirror_num
4571 * of the source drive. Therefore look it up first.
4572 * At the end, patch the device pointer to the one of the
4575 for (i = 0; i < tmp_num_stripes; i++) {
4576 if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4578 * In case of DUP, in order to keep it
4579 * simple, only add the mirror with the
4580 * lowest physical address
4583 physical_of_found <=
4584 tmp_bbio->stripes[i].physical)
4589 tmp_bbio->stripes[i].physical;
4594 mirror_num = index_srcdev + 1;
4595 patch_the_first_stripe_for_dev_replace = 1;
4596 physical_to_patch_in_first_stripe = physical_of_found;
4605 } else if (mirror_num > map->num_stripes) {
4611 stripe_nr_orig = stripe_nr;
4612 stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4613 do_div(stripe_nr_end, map->stripe_len);
4614 stripe_end_offset = stripe_nr_end * map->stripe_len -
4617 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4618 if (rw & REQ_DISCARD)
4619 num_stripes = min_t(u64, map->num_stripes,
4620 stripe_nr_end - stripe_nr_orig);
4621 stripe_index = do_div(stripe_nr, map->num_stripes);
4622 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4623 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4624 num_stripes = map->num_stripes;
4625 else if (mirror_num)
4626 stripe_index = mirror_num - 1;
4628 stripe_index = find_live_mirror(fs_info, map, 0,
4630 current->pid % map->num_stripes,
4631 dev_replace_is_ongoing);
4632 mirror_num = stripe_index + 1;
4635 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4636 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4637 num_stripes = map->num_stripes;
4638 } else if (mirror_num) {
4639 stripe_index = mirror_num - 1;
4644 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4645 int factor = map->num_stripes / map->sub_stripes;
4647 stripe_index = do_div(stripe_nr, factor);
4648 stripe_index *= map->sub_stripes;
4650 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4651 num_stripes = map->sub_stripes;
4652 else if (rw & REQ_DISCARD)
4653 num_stripes = min_t(u64, map->sub_stripes *
4654 (stripe_nr_end - stripe_nr_orig),
4656 else if (mirror_num)
4657 stripe_index += mirror_num - 1;
4659 int old_stripe_index = stripe_index;
4660 stripe_index = find_live_mirror(fs_info, map,
4662 map->sub_stripes, stripe_index +
4663 current->pid % map->sub_stripes,
4664 dev_replace_is_ongoing);
4665 mirror_num = stripe_index - old_stripe_index + 1;
4668 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4669 BTRFS_BLOCK_GROUP_RAID6)) {
4672 if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4676 /* push stripe_nr back to the start of the full stripe */
4677 stripe_nr = raid56_full_stripe_start;
4678 do_div(stripe_nr, stripe_len);
4680 stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4682 /* RAID[56] write or recovery. Return all stripes */
4683 num_stripes = map->num_stripes;
4684 max_errors = nr_parity_stripes(map);
4686 raid_map = kmalloc(sizeof(u64) * num_stripes,
4693 /* Work out the disk rotation on this stripe-set */
4695 rot = do_div(tmp, num_stripes);
4697 /* Fill in the logical address of each stripe */
4698 tmp = stripe_nr * nr_data_stripes(map);
4699 for (i = 0; i < nr_data_stripes(map); i++)
4700 raid_map[(i+rot) % num_stripes] =
4701 em->start + (tmp + i) * map->stripe_len;
4703 raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4704 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4705 raid_map[(i+rot+1) % num_stripes] =
4708 *length = map->stripe_len;
4713 * Mirror #0 or #1 means the original data block.
4714 * Mirror #2 is RAID5 parity block.
4715 * Mirror #3 is RAID6 Q block.
4717 stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4719 stripe_index = nr_data_stripes(map) +
4722 /* We distribute the parity blocks across stripes */
4723 tmp = stripe_nr + stripe_index;
4724 stripe_index = do_div(tmp, map->num_stripes);
4728 * after this do_div call, stripe_nr is the number of stripes
4729 * on this device we have to walk to find the data, and
4730 * stripe_index is the number of our device in the stripe array
4732 stripe_index = do_div(stripe_nr, map->num_stripes);
4733 mirror_num = stripe_index + 1;
4735 BUG_ON(stripe_index >= map->num_stripes);
4737 num_alloc_stripes = num_stripes;
4738 if (dev_replace_is_ongoing) {
4739 if (rw & (REQ_WRITE | REQ_DISCARD))
4740 num_alloc_stripes <<= 1;
4741 if (rw & REQ_GET_READ_MIRRORS)
4742 num_alloc_stripes++;
4744 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4749 atomic_set(&bbio->error, 0);
4751 if (rw & REQ_DISCARD) {
4753 int sub_stripes = 0;
4754 u64 stripes_per_dev = 0;
4755 u32 remaining_stripes = 0;
4756 u32 last_stripe = 0;
4759 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4760 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4763 sub_stripes = map->sub_stripes;
4765 factor = map->num_stripes / sub_stripes;
4766 stripes_per_dev = div_u64_rem(stripe_nr_end -
4769 &remaining_stripes);
4770 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4771 last_stripe *= sub_stripes;
4774 for (i = 0; i < num_stripes; i++) {
4775 bbio->stripes[i].physical =
4776 map->stripes[stripe_index].physical +
4777 stripe_offset + stripe_nr * map->stripe_len;
4778 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4780 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4781 BTRFS_BLOCK_GROUP_RAID10)) {
4782 bbio->stripes[i].length = stripes_per_dev *
4785 if (i / sub_stripes < remaining_stripes)
4786 bbio->stripes[i].length +=
4790 * Special for the first stripe and
4793 * |-------|...|-------|
4797 if (i < sub_stripes)
4798 bbio->stripes[i].length -=
4801 if (stripe_index >= last_stripe &&
4802 stripe_index <= (last_stripe +
4804 bbio->stripes[i].length -=
4807 if (i == sub_stripes - 1)
4810 bbio->stripes[i].length = *length;
4813 if (stripe_index == map->num_stripes) {
4814 /* This could only happen for RAID0/10 */
4820 for (i = 0; i < num_stripes; i++) {
4821 bbio->stripes[i].physical =
4822 map->stripes[stripe_index].physical +
4824 stripe_nr * map->stripe_len;
4825 bbio->stripes[i].dev =
4826 map->stripes[stripe_index].dev;
4831 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
4832 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4833 BTRFS_BLOCK_GROUP_RAID10 |
4834 BTRFS_BLOCK_GROUP_RAID5 |
4835 BTRFS_BLOCK_GROUP_DUP)) {
4837 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4842 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
4843 dev_replace->tgtdev != NULL) {
4844 int index_where_to_add;
4845 u64 srcdev_devid = dev_replace->srcdev->devid;
4848 * duplicate the write operations while the dev replace
4849 * procedure is running. Since the copying of the old disk
4850 * to the new disk takes place at run time while the
4851 * filesystem is mounted writable, the regular write
4852 * operations to the old disk have to be duplicated to go
4853 * to the new disk as well.
4854 * Note that device->missing is handled by the caller, and
4855 * that the write to the old disk is already set up in the
4858 index_where_to_add = num_stripes;
4859 for (i = 0; i < num_stripes; i++) {
4860 if (bbio->stripes[i].dev->devid == srcdev_devid) {
4861 /* write to new disk, too */
4862 struct btrfs_bio_stripe *new =
4863 bbio->stripes + index_where_to_add;
4864 struct btrfs_bio_stripe *old =
4867 new->physical = old->physical;
4868 new->length = old->length;
4869 new->dev = dev_replace->tgtdev;
4870 index_where_to_add++;
4874 num_stripes = index_where_to_add;
4875 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
4876 dev_replace->tgtdev != NULL) {
4877 u64 srcdev_devid = dev_replace->srcdev->devid;
4878 int index_srcdev = 0;
4880 u64 physical_of_found = 0;
4883 * During the dev-replace procedure, the target drive can
4884 * also be used to read data in case it is needed to repair
4885 * a corrupt block elsewhere. This is possible if the
4886 * requested area is left of the left cursor. In this area,
4887 * the target drive is a full copy of the source drive.
4889 for (i = 0; i < num_stripes; i++) {
4890 if (bbio->stripes[i].dev->devid == srcdev_devid) {
4892 * In case of DUP, in order to keep it
4893 * simple, only add the mirror with the
4894 * lowest physical address
4897 physical_of_found <=
4898 bbio->stripes[i].physical)
4902 physical_of_found = bbio->stripes[i].physical;
4906 u64 length = map->stripe_len;
4908 if (physical_of_found + length <=
4909 dev_replace->cursor_left) {
4910 struct btrfs_bio_stripe *tgtdev_stripe =
4911 bbio->stripes + num_stripes;
4913 tgtdev_stripe->physical = physical_of_found;
4914 tgtdev_stripe->length =
4915 bbio->stripes[index_srcdev].length;
4916 tgtdev_stripe->dev = dev_replace->tgtdev;
4924 bbio->num_stripes = num_stripes;
4925 bbio->max_errors = max_errors;
4926 bbio->mirror_num = mirror_num;
4929 * this is the case that REQ_READ && dev_replace_is_ongoing &&
4930 * mirror_num == num_stripes + 1 && dev_replace target drive is
4931 * available as a mirror
4933 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
4934 WARN_ON(num_stripes > 1);
4935 bbio->stripes[0].dev = dev_replace->tgtdev;
4936 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
4937 bbio->mirror_num = map->num_stripes + 1;
4940 sort_parity_stripes(bbio, raid_map);
4941 *raid_map_ret = raid_map;
4944 if (dev_replace_is_ongoing)
4945 btrfs_dev_replace_unlock(dev_replace);
4946 free_extent_map(em);
4950 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4951 u64 logical, u64 *length,
4952 struct btrfs_bio **bbio_ret, int mirror_num)
4954 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
4958 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4959 u64 chunk_start, u64 physical, u64 devid,
4960 u64 **logical, int *naddrs, int *stripe_len)
4962 struct extent_map_tree *em_tree = &map_tree->map_tree;
4963 struct extent_map *em;
4964 struct map_lookup *map;
4972 read_lock(&em_tree->lock);
4973 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4974 read_unlock(&em_tree->lock);
4977 printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
4982 if (em->start != chunk_start) {
4983 printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
4984 em->start, chunk_start);
4985 free_extent_map(em);
4988 map = (struct map_lookup *)em->bdev;
4991 rmap_len = map->stripe_len;
4993 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4994 do_div(length, map->num_stripes / map->sub_stripes);
4995 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4996 do_div(length, map->num_stripes);
4997 else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4998 BTRFS_BLOCK_GROUP_RAID6)) {
4999 do_div(length, nr_data_stripes(map));
5000 rmap_len = map->stripe_len * nr_data_stripes(map);
5003 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
5004 BUG_ON(!buf); /* -ENOMEM */
5006 for (i = 0; i < map->num_stripes; i++) {
5007 if (devid && map->stripes[i].dev->devid != devid)
5009 if (map->stripes[i].physical > physical ||
5010 map->stripes[i].physical + length <= physical)
5013 stripe_nr = physical - map->stripes[i].physical;
5014 do_div(stripe_nr, map->stripe_len);
5016 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5017 stripe_nr = stripe_nr * map->num_stripes + i;
5018 do_div(stripe_nr, map->sub_stripes);
5019 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5020 stripe_nr = stripe_nr * map->num_stripes + i;
5021 } /* else if RAID[56], multiply by nr_data_stripes().
5022 * Alternatively, just use rmap_len below instead of
5023 * map->stripe_len */
5025 bytenr = chunk_start + stripe_nr * rmap_len;
5026 WARN_ON(nr >= map->num_stripes);
5027 for (j = 0; j < nr; j++) {
5028 if (buf[j] == bytenr)
5032 WARN_ON(nr >= map->num_stripes);
5039 *stripe_len = rmap_len;
5041 free_extent_map(em);
5045 static void btrfs_end_bio(struct bio *bio, int err)
5047 struct btrfs_bio *bbio = bio->bi_private;
5048 int is_orig_bio = 0;
5051 atomic_inc(&bbio->error);
5052 if (err == -EIO || err == -EREMOTEIO) {
5053 unsigned int stripe_index =
5054 btrfs_io_bio(bio)->stripe_index;
5055 struct btrfs_device *dev;
5057 BUG_ON(stripe_index >= bbio->num_stripes);
5058 dev = bbio->stripes[stripe_index].dev;
5060 if (bio->bi_rw & WRITE)
5061 btrfs_dev_stat_inc(dev,
5062 BTRFS_DEV_STAT_WRITE_ERRS);
5064 btrfs_dev_stat_inc(dev,
5065 BTRFS_DEV_STAT_READ_ERRS);
5066 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5067 btrfs_dev_stat_inc(dev,
5068 BTRFS_DEV_STAT_FLUSH_ERRS);
5069 btrfs_dev_stat_print_on_error(dev);
5074 if (bio == bbio->orig_bio)
5077 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5080 bio = bbio->orig_bio;
5082 bio->bi_private = bbio->private;
5083 bio->bi_end_io = bbio->end_io;
5084 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5085 /* only send an error to the higher layers if it is
5086 * beyond the tolerance of the btrfs bio
5088 if (atomic_read(&bbio->error) > bbio->max_errors) {
5092 * this bio is actually up to date, we didn't
5093 * go over the max number of errors
5095 set_bit(BIO_UPTODATE, &bio->bi_flags);
5100 bio_endio(bio, err);
5101 } else if (!is_orig_bio) {
5106 struct async_sched {
5109 struct btrfs_fs_info *info;
5110 struct btrfs_work work;
5114 * see run_scheduled_bios for a description of why bios are collected for
5117 * This will add one bio to the pending list for a device and make sure
5118 * the work struct is scheduled.
5120 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5121 struct btrfs_device *device,
5122 int rw, struct bio *bio)
5124 int should_queue = 1;
5125 struct btrfs_pending_bios *pending_bios;
5127 if (device->missing || !device->bdev) {
5128 bio_endio(bio, -EIO);
5132 /* don't bother with additional async steps for reads, right now */
5133 if (!(rw & REQ_WRITE)) {
5135 btrfsic_submit_bio(rw, bio);
5141 * nr_async_bios allows us to reliably return congestion to the
5142 * higher layers. Otherwise, the async bio makes it appear we have
5143 * made progress against dirty pages when we've really just put it
5144 * on a queue for later
5146 atomic_inc(&root->fs_info->nr_async_bios);
5147 WARN_ON(bio->bi_next);
5148 bio->bi_next = NULL;
5151 spin_lock(&device->io_lock);
5152 if (bio->bi_rw & REQ_SYNC)
5153 pending_bios = &device->pending_sync_bios;
5155 pending_bios = &device->pending_bios;
5157 if (pending_bios->tail)
5158 pending_bios->tail->bi_next = bio;
5160 pending_bios->tail = bio;
5161 if (!pending_bios->head)
5162 pending_bios->head = bio;
5163 if (device->running_pending)
5166 spin_unlock(&device->io_lock);
5169 btrfs_queue_worker(&root->fs_info->submit_workers,
5173 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5176 struct bio_vec *prev;
5177 struct request_queue *q = bdev_get_queue(bdev);
5178 unsigned short max_sectors = queue_max_sectors(q);
5179 struct bvec_merge_data bvm = {
5181 .bi_sector = sector,
5182 .bi_rw = bio->bi_rw,
5185 if (bio->bi_vcnt == 0) {
5190 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5191 if (bio_sectors(bio) > max_sectors)
5194 if (!q->merge_bvec_fn)
5197 bvm.bi_size = bio->bi_size - prev->bv_len;
5198 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5203 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5204 struct bio *bio, u64 physical, int dev_nr,
5207 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5209 bio->bi_private = bbio;
5210 btrfs_io_bio(bio)->stripe_index = dev_nr;
5211 bio->bi_end_io = btrfs_end_bio;
5212 bio->bi_sector = physical >> 9;
5215 struct rcu_string *name;
5218 name = rcu_dereference(dev->name);
5219 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5220 "(%s id %llu), size=%u\n", rw,
5221 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5222 name->str, dev->devid, bio->bi_size);
5226 bio->bi_bdev = dev->bdev;
5228 btrfs_schedule_bio(root, dev, rw, bio);
5230 btrfsic_submit_bio(rw, bio);
5233 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5234 struct bio *first_bio, struct btrfs_device *dev,
5235 int dev_nr, int rw, int async)
5237 struct bio_vec *bvec = first_bio->bi_io_vec;
5239 int nr_vecs = bio_get_nr_vecs(dev->bdev);
5240 u64 physical = bbio->stripes[dev_nr].physical;
5243 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5247 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5248 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5249 bvec->bv_offset) < bvec->bv_len) {
5250 u64 len = bio->bi_size;
5252 atomic_inc(&bbio->stripes_pending);
5253 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5261 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5265 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5267 atomic_inc(&bbio->error);
5268 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5269 bio->bi_private = bbio->private;
5270 bio->bi_end_io = bbio->end_io;
5271 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5272 bio->bi_sector = logical >> 9;
5274 bio_endio(bio, -EIO);
5278 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5279 int mirror_num, int async_submit)
5281 struct btrfs_device *dev;
5282 struct bio *first_bio = bio;
5283 u64 logical = (u64)bio->bi_sector << 9;
5286 u64 *raid_map = NULL;
5290 struct btrfs_bio *bbio = NULL;
5292 length = bio->bi_size;
5293 map_length = length;
5295 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5296 mirror_num, &raid_map);
5297 if (ret) /* -ENOMEM */
5300 total_devs = bbio->num_stripes;
5301 bbio->orig_bio = first_bio;
5302 bbio->private = first_bio->bi_private;
5303 bbio->end_io = first_bio->bi_end_io;
5304 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5307 /* In this case, map_length has been set to the length of
5308 a single stripe; not the whole write */
5310 return raid56_parity_write(root, bio, bbio,
5311 raid_map, map_length);
5313 return raid56_parity_recover(root, bio, bbio,
5314 raid_map, map_length,
5319 if (map_length < length) {
5320 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5321 (unsigned long long)logical,
5322 (unsigned long long)length,
5323 (unsigned long long)map_length);
5327 while (dev_nr < total_devs) {
5328 dev = bbio->stripes[dev_nr].dev;
5329 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5330 bbio_error(bbio, first_bio, logical);
5336 * Check and see if we're ok with this bio based on it's size
5337 * and offset with the given device.
5339 if (!bio_size_ok(dev->bdev, first_bio,
5340 bbio->stripes[dev_nr].physical >> 9)) {
5341 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5342 dev_nr, rw, async_submit);
5348 if (dev_nr < total_devs - 1) {
5349 bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5350 BUG_ON(!bio); /* -ENOMEM */
5355 submit_stripe_bio(root, bbio, bio,
5356 bbio->stripes[dev_nr].physical, dev_nr, rw,
5363 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5366 struct btrfs_device *device;
5367 struct btrfs_fs_devices *cur_devices;
5369 cur_devices = fs_info->fs_devices;
5370 while (cur_devices) {
5372 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5373 device = __find_device(&cur_devices->devices,
5378 cur_devices = cur_devices->seed;
5383 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5384 u64 devid, u8 *dev_uuid)
5386 struct btrfs_device *device;
5387 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5389 device = kzalloc(sizeof(*device), GFP_NOFS);
5392 list_add(&device->dev_list,
5393 &fs_devices->devices);
5394 device->dev_root = root->fs_info->dev_root;
5395 device->devid = devid;
5396 device->work.func = pending_bios_fn;
5397 device->fs_devices = fs_devices;
5398 device->missing = 1;
5399 fs_devices->num_devices++;
5400 fs_devices->missing_devices++;
5401 spin_lock_init(&device->io_lock);
5402 INIT_LIST_HEAD(&device->dev_alloc_list);
5403 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
5407 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5408 struct extent_buffer *leaf,
5409 struct btrfs_chunk *chunk)
5411 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5412 struct map_lookup *map;
5413 struct extent_map *em;
5417 u8 uuid[BTRFS_UUID_SIZE];
5422 logical = key->offset;
5423 length = btrfs_chunk_length(leaf, chunk);
5425 read_lock(&map_tree->map_tree.lock);
5426 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5427 read_unlock(&map_tree->map_tree.lock);
5429 /* already mapped? */
5430 if (em && em->start <= logical && em->start + em->len > logical) {
5431 free_extent_map(em);
5434 free_extent_map(em);
5437 em = alloc_extent_map();
5440 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5441 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5443 free_extent_map(em);
5447 em->bdev = (struct block_device *)map;
5448 em->start = logical;
5451 em->block_start = 0;
5452 em->block_len = em->len;
5454 map->num_stripes = num_stripes;
5455 map->io_width = btrfs_chunk_io_width(leaf, chunk);
5456 map->io_align = btrfs_chunk_io_align(leaf, chunk);
5457 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5458 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5459 map->type = btrfs_chunk_type(leaf, chunk);
5460 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5461 for (i = 0; i < num_stripes; i++) {
5462 map->stripes[i].physical =
5463 btrfs_stripe_offset_nr(leaf, chunk, i);
5464 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5465 read_extent_buffer(leaf, uuid, (unsigned long)
5466 btrfs_stripe_dev_uuid_nr(chunk, i),
5468 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5470 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5472 free_extent_map(em);
5475 if (!map->stripes[i].dev) {
5476 map->stripes[i].dev =
5477 add_missing_dev(root, devid, uuid);
5478 if (!map->stripes[i].dev) {
5480 free_extent_map(em);
5484 map->stripes[i].dev->in_fs_metadata = 1;
5487 write_lock(&map_tree->map_tree.lock);
5488 ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5489 write_unlock(&map_tree->map_tree.lock);
5490 BUG_ON(ret); /* Tree corruption */
5491 free_extent_map(em);
5496 static void fill_device_from_item(struct extent_buffer *leaf,
5497 struct btrfs_dev_item *dev_item,
5498 struct btrfs_device *device)
5502 device->devid = btrfs_device_id(leaf, dev_item);
5503 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5504 device->total_bytes = device->disk_total_bytes;
5505 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5506 device->type = btrfs_device_type(leaf, dev_item);
5507 device->io_align = btrfs_device_io_align(leaf, dev_item);
5508 device->io_width = btrfs_device_io_width(leaf, dev_item);
5509 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5510 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5511 device->is_tgtdev_for_dev_replace = 0;
5513 ptr = (unsigned long)btrfs_device_uuid(dev_item);
5514 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5517 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5519 struct btrfs_fs_devices *fs_devices;
5522 BUG_ON(!mutex_is_locked(&uuid_mutex));
5524 fs_devices = root->fs_info->fs_devices->seed;
5525 while (fs_devices) {
5526 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5530 fs_devices = fs_devices->seed;
5533 fs_devices = find_fsid(fsid);
5539 fs_devices = clone_fs_devices(fs_devices);
5540 if (IS_ERR(fs_devices)) {
5541 ret = PTR_ERR(fs_devices);
5545 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5546 root->fs_info->bdev_holder);
5548 free_fs_devices(fs_devices);
5552 if (!fs_devices->seeding) {
5553 __btrfs_close_devices(fs_devices);
5554 free_fs_devices(fs_devices);
5559 fs_devices->seed = root->fs_info->fs_devices->seed;
5560 root->fs_info->fs_devices->seed = fs_devices;
5565 static int read_one_dev(struct btrfs_root *root,
5566 struct extent_buffer *leaf,
5567 struct btrfs_dev_item *dev_item)
5569 struct btrfs_device *device;
5572 u8 fs_uuid[BTRFS_UUID_SIZE];
5573 u8 dev_uuid[BTRFS_UUID_SIZE];
5575 devid = btrfs_device_id(leaf, dev_item);
5576 read_extent_buffer(leaf, dev_uuid,
5577 (unsigned long)btrfs_device_uuid(dev_item),
5579 read_extent_buffer(leaf, fs_uuid,
5580 (unsigned long)btrfs_device_fsid(dev_item),
5583 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5584 ret = open_seed_devices(root, fs_uuid);
5585 if (ret && !btrfs_test_opt(root, DEGRADED))
5589 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5590 if (!device || !device->bdev) {
5591 if (!btrfs_test_opt(root, DEGRADED))
5595 btrfs_warn(root->fs_info, "devid %llu missing",
5596 (unsigned long long)devid);
5597 device = add_missing_dev(root, devid, dev_uuid);
5600 } else if (!device->missing) {
5602 * this happens when a device that was properly setup
5603 * in the device info lists suddenly goes bad.
5604 * device->bdev is NULL, and so we have to set
5605 * device->missing to one here
5607 root->fs_info->fs_devices->missing_devices++;
5608 device->missing = 1;
5612 if (device->fs_devices != root->fs_info->fs_devices) {
5613 BUG_ON(device->writeable);
5614 if (device->generation !=
5615 btrfs_device_generation(leaf, dev_item))
5619 fill_device_from_item(leaf, dev_item, device);
5620 device->dev_root = root->fs_info->dev_root;
5621 device->in_fs_metadata = 1;
5622 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5623 device->fs_devices->total_rw_bytes += device->total_bytes;
5624 spin_lock(&root->fs_info->free_chunk_lock);
5625 root->fs_info->free_chunk_space += device->total_bytes -
5627 spin_unlock(&root->fs_info->free_chunk_lock);
5633 int btrfs_read_sys_array(struct btrfs_root *root)
5635 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5636 struct extent_buffer *sb;
5637 struct btrfs_disk_key *disk_key;
5638 struct btrfs_chunk *chunk;
5640 unsigned long sb_ptr;
5646 struct btrfs_key key;
5648 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5649 BTRFS_SUPER_INFO_SIZE);
5652 btrfs_set_buffer_uptodate(sb);
5653 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5655 * The sb extent buffer is artifical and just used to read the system array.
5656 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5657 * pages up-to-date when the page is larger: extent does not cover the
5658 * whole page and consequently check_page_uptodate does not find all
5659 * the page's extents up-to-date (the hole beyond sb),
5660 * write_extent_buffer then triggers a WARN_ON.
5662 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5663 * but sb spans only this function. Add an explicit SetPageUptodate call
5664 * to silence the warning eg. on PowerPC 64.
5666 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5667 SetPageUptodate(sb->pages[0]);
5669 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5670 array_size = btrfs_super_sys_array_size(super_copy);
5672 ptr = super_copy->sys_chunk_array;
5673 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5676 while (cur < array_size) {
5677 disk_key = (struct btrfs_disk_key *)ptr;
5678 btrfs_disk_key_to_cpu(&key, disk_key);
5680 len = sizeof(*disk_key); ptr += len;
5684 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5685 chunk = (struct btrfs_chunk *)sb_ptr;
5686 ret = read_one_chunk(root, &key, sb, chunk);
5689 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5690 len = btrfs_chunk_item_size(num_stripes);
5699 free_extent_buffer(sb);
5703 int btrfs_read_chunk_tree(struct btrfs_root *root)
5705 struct btrfs_path *path;
5706 struct extent_buffer *leaf;
5707 struct btrfs_key key;
5708 struct btrfs_key found_key;
5712 root = root->fs_info->chunk_root;
5714 path = btrfs_alloc_path();
5718 mutex_lock(&uuid_mutex);
5721 /* first we search for all of the device items, and then we
5722 * read in all of the chunk items. This way we can create chunk
5723 * mappings that reference all of the devices that are afound
5725 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5729 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5733 leaf = path->nodes[0];
5734 slot = path->slots[0];
5735 if (slot >= btrfs_header_nritems(leaf)) {
5736 ret = btrfs_next_leaf(root, path);
5743 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5744 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5745 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
5747 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5748 struct btrfs_dev_item *dev_item;
5749 dev_item = btrfs_item_ptr(leaf, slot,
5750 struct btrfs_dev_item);
5751 ret = read_one_dev(root, leaf, dev_item);
5755 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5756 struct btrfs_chunk *chunk;
5757 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5758 ret = read_one_chunk(root, &found_key, leaf, chunk);
5764 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5766 btrfs_release_path(path);
5771 unlock_chunks(root);
5772 mutex_unlock(&uuid_mutex);
5774 btrfs_free_path(path);
5778 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5782 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5783 btrfs_dev_stat_reset(dev, i);
5786 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5788 struct btrfs_key key;
5789 struct btrfs_key found_key;
5790 struct btrfs_root *dev_root = fs_info->dev_root;
5791 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5792 struct extent_buffer *eb;
5795 struct btrfs_device *device;
5796 struct btrfs_path *path = NULL;
5799 path = btrfs_alloc_path();
5805 mutex_lock(&fs_devices->device_list_mutex);
5806 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5808 struct btrfs_dev_stats_item *ptr;
5811 key.type = BTRFS_DEV_STATS_KEY;
5812 key.offset = device->devid;
5813 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
5815 __btrfs_reset_dev_stats(device);
5816 device->dev_stats_valid = 1;
5817 btrfs_release_path(path);
5820 slot = path->slots[0];
5821 eb = path->nodes[0];
5822 btrfs_item_key_to_cpu(eb, &found_key, slot);
5823 item_size = btrfs_item_size_nr(eb, slot);
5825 ptr = btrfs_item_ptr(eb, slot,
5826 struct btrfs_dev_stats_item);
5828 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5829 if (item_size >= (1 + i) * sizeof(__le64))
5830 btrfs_dev_stat_set(device, i,
5831 btrfs_dev_stats_value(eb, ptr, i));
5833 btrfs_dev_stat_reset(device, i);
5836 device->dev_stats_valid = 1;
5837 btrfs_dev_stat_print_on_load(device);
5838 btrfs_release_path(path);
5840 mutex_unlock(&fs_devices->device_list_mutex);
5843 btrfs_free_path(path);
5844 return ret < 0 ? ret : 0;
5847 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
5848 struct btrfs_root *dev_root,
5849 struct btrfs_device *device)
5851 struct btrfs_path *path;
5852 struct btrfs_key key;
5853 struct extent_buffer *eb;
5854 struct btrfs_dev_stats_item *ptr;
5859 key.type = BTRFS_DEV_STATS_KEY;
5860 key.offset = device->devid;
5862 path = btrfs_alloc_path();
5864 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
5866 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
5867 ret, rcu_str_deref(device->name));
5872 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
5873 /* need to delete old one and insert a new one */
5874 ret = btrfs_del_item(trans, dev_root, path);
5876 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
5877 rcu_str_deref(device->name), ret);
5884 /* need to insert a new item */
5885 btrfs_release_path(path);
5886 ret = btrfs_insert_empty_item(trans, dev_root, path,
5887 &key, sizeof(*ptr));
5889 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
5890 rcu_str_deref(device->name), ret);
5895 eb = path->nodes[0];
5896 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
5897 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5898 btrfs_set_dev_stats_value(eb, ptr, i,
5899 btrfs_dev_stat_read(device, i));
5900 btrfs_mark_buffer_dirty(eb);
5903 btrfs_free_path(path);
5908 * called from commit_transaction. Writes all changed device stats to disk.
5910 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5911 struct btrfs_fs_info *fs_info)
5913 struct btrfs_root *dev_root = fs_info->dev_root;
5914 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5915 struct btrfs_device *device;
5918 mutex_lock(&fs_devices->device_list_mutex);
5919 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5920 if (!device->dev_stats_valid || !device->dev_stats_dirty)
5923 ret = update_dev_stat_item(trans, dev_root, device);
5925 device->dev_stats_dirty = 0;
5927 mutex_unlock(&fs_devices->device_list_mutex);
5932 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5934 btrfs_dev_stat_inc(dev, index);
5935 btrfs_dev_stat_print_on_error(dev);
5938 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5940 if (!dev->dev_stats_valid)
5942 printk_ratelimited_in_rcu(KERN_ERR
5943 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5944 rcu_str_deref(dev->name),
5945 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5946 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5947 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5948 btrfs_dev_stat_read(dev,
5949 BTRFS_DEV_STAT_CORRUPTION_ERRS),
5950 btrfs_dev_stat_read(dev,
5951 BTRFS_DEV_STAT_GENERATION_ERRS));
5954 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5958 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5959 if (btrfs_dev_stat_read(dev, i) != 0)
5961 if (i == BTRFS_DEV_STAT_VALUES_MAX)
5962 return; /* all values == 0, suppress message */
5964 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5965 rcu_str_deref(dev->name),
5966 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5967 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5968 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5969 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5970 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5973 int btrfs_get_dev_stats(struct btrfs_root *root,
5974 struct btrfs_ioctl_get_dev_stats *stats)
5976 struct btrfs_device *dev;
5977 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5980 mutex_lock(&fs_devices->device_list_mutex);
5981 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
5982 mutex_unlock(&fs_devices->device_list_mutex);
5986 "btrfs: get dev_stats failed, device not found\n");
5988 } else if (!dev->dev_stats_valid) {
5990 "btrfs: get dev_stats failed, not yet valid\n");
5992 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5993 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5994 if (stats->nr_items > i)
5996 btrfs_dev_stat_read_and_reset(dev, i);
5998 btrfs_dev_stat_reset(dev, i);
6001 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6002 if (stats->nr_items > i)
6003 stats->values[i] = btrfs_dev_stat_read(dev, i);
6005 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6006 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6010 int btrfs_scratch_superblock(struct btrfs_device *device)
6012 struct buffer_head *bh;
6013 struct btrfs_super_block *disk_super;
6015 bh = btrfs_read_dev_super(device->bdev);
6018 disk_super = (struct btrfs_super_block *)bh->b_data;
6020 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6021 set_buffer_dirty(bh);
6022 sync_dirty_buffer(bh);